python_code
stringlengths
0
1.8M
repo_name
stringclasses
7 values
file_path
stringlengths
5
99
// SPDX-License-Identifier: GPL-2.0 /* Copyright (c) 2018, Intel Corporation. */ #include <linux/vmalloc.h> #include "ice_common.h" /** * ice_aq_read_nvm * @hw: pointer to the HW struct * @module_typeid: module pointer location in words from the NVM beginning * @offset: byte offset from the module beginning * @length: length of the section to be read (in bytes from the offset) * @data: command buffer (size [bytes] = length) * @last_command: tells if this is the last command in a series * @read_shadow_ram: tell if this is a shadow RAM read * @cd: pointer to command details structure or NULL * * Read the NVM using the admin queue commands (0x0701) */ static int ice_aq_read_nvm(struct ice_hw *hw, u16 module_typeid, u32 offset, u16 length, void *data, bool last_command, bool read_shadow_ram, struct ice_sq_cd *cd) { struct ice_aq_desc desc; struct ice_aqc_nvm *cmd; cmd = &desc.params.nvm; if (offset > ICE_AQC_NVM_MAX_OFFSET) return -EINVAL; ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_nvm_read); if (!read_shadow_ram && module_typeid == ICE_AQC_NVM_START_POINT) cmd->cmd_flags |= ICE_AQC_NVM_FLASH_ONLY; /* If this is the last command in a series, set the proper flag. */ if (last_command) cmd->cmd_flags |= ICE_AQC_NVM_LAST_CMD; cmd->module_typeid = cpu_to_le16(module_typeid); cmd->offset_low = cpu_to_le16(offset & 0xFFFF); cmd->offset_high = (offset >> 16) & 0xFF; cmd->length = cpu_to_le16(length); return ice_aq_send_cmd(hw, &desc, data, length, cd); } /** * ice_read_flat_nvm - Read portion of NVM by flat offset * @hw: pointer to the HW struct * @offset: offset from beginning of NVM * @length: (in) number of bytes to read; (out) number of bytes actually read * @data: buffer to return data in (sized to fit the specified length) * @read_shadow_ram: if true, read from shadow RAM instead of NVM * * Reads a portion of the NVM, as a flat memory space. This function correctly * breaks read requests across Shadow RAM sectors and ensures that no single * read request exceeds the maximum 4KB read for a single AdminQ command. * * Returns a status code on failure. Note that the data pointer may be * partially updated if some reads succeed before a failure. */ int ice_read_flat_nvm(struct ice_hw *hw, u32 offset, u32 *length, u8 *data, bool read_shadow_ram) { u32 inlen = *length; u32 bytes_read = 0; bool last_cmd; int status; *length = 0; /* Verify the length of the read if this is for the Shadow RAM */ if (read_shadow_ram && ((offset + inlen) > (hw->flash.sr_words * 2u))) { ice_debug(hw, ICE_DBG_NVM, "NVM error: requested offset is beyond Shadow RAM limit\n"); return -EINVAL; } do { u32 read_size, sector_offset; /* ice_aq_read_nvm cannot read more than 4KB at a time. * Additionally, a read from the Shadow RAM may not cross over * a sector boundary. Conveniently, the sector size is also * 4KB. */ sector_offset = offset % ICE_AQ_MAX_BUF_LEN; read_size = min_t(u32, ICE_AQ_MAX_BUF_LEN - sector_offset, inlen - bytes_read); last_cmd = !(bytes_read + read_size < inlen); status = ice_aq_read_nvm(hw, ICE_AQC_NVM_START_POINT, offset, read_size, data + bytes_read, last_cmd, read_shadow_ram, NULL); if (status) break; bytes_read += read_size; offset += read_size; } while (!last_cmd); *length = bytes_read; return status; } /** * ice_aq_update_nvm * @hw: pointer to the HW struct * @module_typeid: module pointer location in words from the NVM beginning * @offset: byte offset from the module beginning * @length: length of the section to be written (in bytes from the offset) * @data: command buffer (size [bytes] = length) * @last_command: tells if this is the last command in a series * @command_flags: command parameters * @cd: pointer to command details structure or NULL * * Update the NVM using the admin queue commands (0x0703) */ int ice_aq_update_nvm(struct ice_hw *hw, u16 module_typeid, u32 offset, u16 length, void *data, bool last_command, u8 command_flags, struct ice_sq_cd *cd) { struct ice_aq_desc desc; struct ice_aqc_nvm *cmd; cmd = &desc.params.nvm; /* In offset the highest byte must be zeroed. */ if (offset & 0xFF000000) return -EINVAL; ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_nvm_write); cmd->cmd_flags |= command_flags; /* If this is the last command in a series, set the proper flag. */ if (last_command) cmd->cmd_flags |= ICE_AQC_NVM_LAST_CMD; cmd->module_typeid = cpu_to_le16(module_typeid); cmd->offset_low = cpu_to_le16(offset & 0xFFFF); cmd->offset_high = (offset >> 16) & 0xFF; cmd->length = cpu_to_le16(length); desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD); return ice_aq_send_cmd(hw, &desc, data, length, cd); } /** * ice_aq_erase_nvm * @hw: pointer to the HW struct * @module_typeid: module pointer location in words from the NVM beginning * @cd: pointer to command details structure or NULL * * Erase the NVM sector using the admin queue commands (0x0702) */ int ice_aq_erase_nvm(struct ice_hw *hw, u16 module_typeid, struct ice_sq_cd *cd) { struct ice_aq_desc desc; struct ice_aqc_nvm *cmd; cmd = &desc.params.nvm; ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_nvm_erase); cmd->module_typeid = cpu_to_le16(module_typeid); cmd->length = cpu_to_le16(ICE_AQC_NVM_ERASE_LEN); cmd->offset_low = 0; cmd->offset_high = 0; return ice_aq_send_cmd(hw, &desc, NULL, 0, cd); } /** * ice_read_sr_word_aq - Reads Shadow RAM via AQ * @hw: pointer to the HW structure * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF) * @data: word read from the Shadow RAM * * Reads one 16 bit word from the Shadow RAM using ice_read_flat_nvm. */ static int ice_read_sr_word_aq(struct ice_hw *hw, u16 offset, u16 *data) { u32 bytes = sizeof(u16); __le16 data_local; int status; /* Note that ice_read_flat_nvm takes into account the 4Kb AdminQ and * Shadow RAM sector restrictions necessary when reading from the NVM. */ status = ice_read_flat_nvm(hw, offset * sizeof(u16), &bytes, (__force u8 *)&data_local, true); if (status) return status; *data = le16_to_cpu(data_local); return 0; } /** * ice_acquire_nvm - Generic request for acquiring the NVM ownership * @hw: pointer to the HW structure * @access: NVM access type (read or write) * * This function will request NVM ownership. */ int ice_acquire_nvm(struct ice_hw *hw, enum ice_aq_res_access_type access) { if (hw->flash.blank_nvm_mode) return 0; return ice_acquire_res(hw, ICE_NVM_RES_ID, access, ICE_NVM_TIMEOUT); } /** * ice_release_nvm - Generic request for releasing the NVM ownership * @hw: pointer to the HW structure * * This function will release NVM ownership. */ void ice_release_nvm(struct ice_hw *hw) { if (hw->flash.blank_nvm_mode) return; ice_release_res(hw, ICE_NVM_RES_ID); } /** * ice_get_flash_bank_offset - Get offset into requested flash bank * @hw: pointer to the HW structure * @bank: whether to read from the active or inactive flash bank * @module: the module to read from * * Based on the module, lookup the module offset from the beginning of the * flash. * * Returns the flash offset. Note that a value of zero is invalid and must be * treated as an error. */ static u32 ice_get_flash_bank_offset(struct ice_hw *hw, enum ice_bank_select bank, u16 module) { struct ice_bank_info *banks = &hw->flash.banks; enum ice_flash_bank active_bank; bool second_bank_active; u32 offset, size; switch (module) { case ICE_SR_1ST_NVM_BANK_PTR: offset = banks->nvm_ptr; size = banks->nvm_size; active_bank = banks->nvm_bank; break; case ICE_SR_1ST_OROM_BANK_PTR: offset = banks->orom_ptr; size = banks->orom_size; active_bank = banks->orom_bank; break; case ICE_SR_NETLIST_BANK_PTR: offset = banks->netlist_ptr; size = banks->netlist_size; active_bank = banks->netlist_bank; break; default: ice_debug(hw, ICE_DBG_NVM, "Unexpected value for flash module: 0x%04x\n", module); return 0; } switch (active_bank) { case ICE_1ST_FLASH_BANK: second_bank_active = false; break; case ICE_2ND_FLASH_BANK: second_bank_active = true; break; default: ice_debug(hw, ICE_DBG_NVM, "Unexpected value for active flash bank: %u\n", active_bank); return 0; } /* The second flash bank is stored immediately following the first * bank. Based on whether the 1st or 2nd bank is active, and whether * we want the active or inactive bank, calculate the desired offset. */ switch (bank) { case ICE_ACTIVE_FLASH_BANK: return offset + (second_bank_active ? size : 0); case ICE_INACTIVE_FLASH_BANK: return offset + (second_bank_active ? 0 : size); } ice_debug(hw, ICE_DBG_NVM, "Unexpected value for flash bank selection: %u\n", bank); return 0; } /** * ice_read_flash_module - Read a word from one of the main NVM modules * @hw: pointer to the HW structure * @bank: which bank of the module to read * @module: the module to read * @offset: the offset into the module in bytes * @data: storage for the word read from the flash * @length: bytes of data to read * * Read data from the specified flash module. The bank parameter indicates * whether or not to read from the active bank or the inactive bank of that * module. * * The word will be read using flat NVM access, and relies on the * hw->flash.banks data being setup by ice_determine_active_flash_banks() * during initialization. */ static int ice_read_flash_module(struct ice_hw *hw, enum ice_bank_select bank, u16 module, u32 offset, u8 *data, u32 length) { int status; u32 start; start = ice_get_flash_bank_offset(hw, bank, module); if (!start) { ice_debug(hw, ICE_DBG_NVM, "Unable to calculate flash bank offset for module 0x%04x\n", module); return -EINVAL; } status = ice_acquire_nvm(hw, ICE_RES_READ); if (status) return status; status = ice_read_flat_nvm(hw, start + offset, &length, data, false); ice_release_nvm(hw); return status; } /** * ice_read_nvm_module - Read from the active main NVM module * @hw: pointer to the HW structure * @bank: whether to read from active or inactive NVM module * @offset: offset into the NVM module to read, in words * @data: storage for returned word value * * Read the specified word from the active NVM module. This includes the CSS * header at the start of the NVM module. */ static int ice_read_nvm_module(struct ice_hw *hw, enum ice_bank_select bank, u32 offset, u16 *data) { __le16 data_local; int status; status = ice_read_flash_module(hw, bank, ICE_SR_1ST_NVM_BANK_PTR, offset * sizeof(u16), (__force u8 *)&data_local, sizeof(u16)); if (!status) *data = le16_to_cpu(data_local); return status; } /** * ice_read_nvm_sr_copy - Read a word from the Shadow RAM copy in the NVM bank * @hw: pointer to the HW structure * @bank: whether to read from the active or inactive NVM module * @offset: offset into the Shadow RAM copy to read, in words * @data: storage for returned word value * * Read the specified word from the copy of the Shadow RAM found in the * specified NVM module. */ static int ice_read_nvm_sr_copy(struct ice_hw *hw, enum ice_bank_select bank, u32 offset, u16 *data) { return ice_read_nvm_module(hw, bank, ICE_NVM_SR_COPY_WORD_OFFSET + offset, data); } /** * ice_read_netlist_module - Read data from the netlist module area * @hw: pointer to the HW structure * @bank: whether to read from the active or inactive module * @offset: offset into the netlist to read from * @data: storage for returned word value * * Read a word from the specified netlist bank. */ static int ice_read_netlist_module(struct ice_hw *hw, enum ice_bank_select bank, u32 offset, u16 *data) { __le16 data_local; int status; status = ice_read_flash_module(hw, bank, ICE_SR_NETLIST_BANK_PTR, offset * sizeof(u16), (__force u8 *)&data_local, sizeof(u16)); if (!status) *data = le16_to_cpu(data_local); return status; } /** * ice_read_sr_word - Reads Shadow RAM word and acquire NVM if necessary * @hw: pointer to the HW structure * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF) * @data: word read from the Shadow RAM * * Reads one 16 bit word from the Shadow RAM using the ice_read_sr_word_aq. */ int ice_read_sr_word(struct ice_hw *hw, u16 offset, u16 *data) { int status; status = ice_acquire_nvm(hw, ICE_RES_READ); if (!status) { status = ice_read_sr_word_aq(hw, offset, data); ice_release_nvm(hw); } return status; } /** * ice_get_pfa_module_tlv - Reads sub module TLV from NVM PFA * @hw: pointer to hardware structure * @module_tlv: pointer to module TLV to return * @module_tlv_len: pointer to module TLV length to return * @module_type: module type requested * * Finds the requested sub module TLV type from the Preserved Field * Area (PFA) and returns the TLV pointer and length. The caller can * use these to read the variable length TLV value. */ int ice_get_pfa_module_tlv(struct ice_hw *hw, u16 *module_tlv, u16 *module_tlv_len, u16 module_type) { u16 pfa_len, pfa_ptr; u16 next_tlv; int status; status = ice_read_sr_word(hw, ICE_SR_PFA_PTR, &pfa_ptr); if (status) { ice_debug(hw, ICE_DBG_INIT, "Preserved Field Array pointer.\n"); return status; } status = ice_read_sr_word(hw, pfa_ptr, &pfa_len); if (status) { ice_debug(hw, ICE_DBG_INIT, "Failed to read PFA length.\n"); return status; } /* Starting with first TLV after PFA length, iterate through the list * of TLVs to find the requested one. */ next_tlv = pfa_ptr + 1; while (next_tlv < pfa_ptr + pfa_len) { u16 tlv_sub_module_type; u16 tlv_len; /* Read TLV type */ status = ice_read_sr_word(hw, next_tlv, &tlv_sub_module_type); if (status) { ice_debug(hw, ICE_DBG_INIT, "Failed to read TLV type.\n"); break; } /* Read TLV length */ status = ice_read_sr_word(hw, next_tlv + 1, &tlv_len); if (status) { ice_debug(hw, ICE_DBG_INIT, "Failed to read TLV length.\n"); break; } if (tlv_sub_module_type == module_type) { if (tlv_len) { *module_tlv = next_tlv; *module_tlv_len = tlv_len; return 0; } return -EINVAL; } /* Check next TLV, i.e. current TLV pointer + length + 2 words * (for current TLV's type and length) */ next_tlv = next_tlv + tlv_len + 2; } /* Module does not exist */ return -ENOENT; } /** * ice_read_pba_string - Reads part number string from NVM * @hw: pointer to hardware structure * @pba_num: stores the part number string from the NVM * @pba_num_size: part number string buffer length * * Reads the part number string from the NVM. */ int ice_read_pba_string(struct ice_hw *hw, u8 *pba_num, u32 pba_num_size) { u16 pba_tlv, pba_tlv_len; u16 pba_word, pba_size; int status; u16 i; status = ice_get_pfa_module_tlv(hw, &pba_tlv, &pba_tlv_len, ICE_SR_PBA_BLOCK_PTR); if (status) { ice_debug(hw, ICE_DBG_INIT, "Failed to read PBA Block TLV.\n"); return status; } /* pba_size is the next word */ status = ice_read_sr_word(hw, (pba_tlv + 2), &pba_size); if (status) { ice_debug(hw, ICE_DBG_INIT, "Failed to read PBA Section size.\n"); return status; } if (pba_tlv_len < pba_size) { ice_debug(hw, ICE_DBG_INIT, "Invalid PBA Block TLV size.\n"); return -EINVAL; } /* Subtract one to get PBA word count (PBA Size word is included in * total size) */ pba_size--; if (pba_num_size < (((u32)pba_size * 2) + 1)) { ice_debug(hw, ICE_DBG_INIT, "Buffer too small for PBA data.\n"); return -EINVAL; } for (i = 0; i < pba_size; i++) { status = ice_read_sr_word(hw, (pba_tlv + 2 + 1) + i, &pba_word); if (status) { ice_debug(hw, ICE_DBG_INIT, "Failed to read PBA Block word %d.\n", i); return status; } pba_num[(i * 2)] = (pba_word >> 8) & 0xFF; pba_num[(i * 2) + 1] = pba_word & 0xFF; } pba_num[(pba_size * 2)] = '\0'; return status; } /** * ice_get_nvm_ver_info - Read NVM version information * @hw: pointer to the HW struct * @bank: whether to read from the active or inactive flash bank * @nvm: pointer to NVM info structure * * Read the NVM EETRACK ID and map version of the main NVM image bank, filling * in the NVM info structure. */ static int ice_get_nvm_ver_info(struct ice_hw *hw, enum ice_bank_select bank, struct ice_nvm_info *nvm) { u16 eetrack_lo, eetrack_hi, ver; int status; status = ice_read_nvm_sr_copy(hw, bank, ICE_SR_NVM_DEV_STARTER_VER, &ver); if (status) { ice_debug(hw, ICE_DBG_NVM, "Failed to read DEV starter version.\n"); return status; } nvm->major = (ver & ICE_NVM_VER_HI_MASK) >> ICE_NVM_VER_HI_SHIFT; nvm->minor = (ver & ICE_NVM_VER_LO_MASK) >> ICE_NVM_VER_LO_SHIFT; status = ice_read_nvm_sr_copy(hw, bank, ICE_SR_NVM_EETRACK_LO, &eetrack_lo); if (status) { ice_debug(hw, ICE_DBG_NVM, "Failed to read EETRACK lo.\n"); return status; } status = ice_read_nvm_sr_copy(hw, bank, ICE_SR_NVM_EETRACK_HI, &eetrack_hi); if (status) { ice_debug(hw, ICE_DBG_NVM, "Failed to read EETRACK hi.\n"); return status; } nvm->eetrack = (eetrack_hi << 16) | eetrack_lo; return 0; } /** * ice_get_inactive_nvm_ver - Read Option ROM version from the inactive bank * @hw: pointer to the HW structure * @nvm: storage for Option ROM version information * * Reads the NVM EETRACK ID, Map version, and security revision of the * inactive NVM bank. Used to access version data for a pending update that * has not yet been activated. */ int ice_get_inactive_nvm_ver(struct ice_hw *hw, struct ice_nvm_info *nvm) { return ice_get_nvm_ver_info(hw, ICE_INACTIVE_FLASH_BANK, nvm); } /** * ice_get_orom_civd_data - Get the combo version information from Option ROM * @hw: pointer to the HW struct * @bank: whether to read from the active or inactive flash module * @civd: storage for the Option ROM CIVD data. * * Searches through the Option ROM flash contents to locate the CIVD data for * the image. */ static int ice_get_orom_civd_data(struct ice_hw *hw, enum ice_bank_select bank, struct ice_orom_civd_info *civd) { u8 *orom_data; int status; u32 offset; /* The CIVD section is located in the Option ROM aligned to 512 bytes. * The first 4 bytes must contain the ASCII characters "$CIV". * A simple modulo 256 sum of all of the bytes of the structure must * equal 0. * * The exact location is unknown and varies between images but is * usually somewhere in the middle of the bank. We need to scan the * Option ROM bank to locate it. * * It's significantly faster to read the entire Option ROM up front * using the maximum page size, than to read each possible location * with a separate firmware command. */ orom_data = vzalloc(hw->flash.banks.orom_size); if (!orom_data) return -ENOMEM; status = ice_read_flash_module(hw, bank, ICE_SR_1ST_OROM_BANK_PTR, 0, orom_data, hw->flash.banks.orom_size); if (status) { vfree(orom_data); ice_debug(hw, ICE_DBG_NVM, "Unable to read Option ROM data\n"); return status; } /* Scan the memory buffer to locate the CIVD data section */ for (offset = 0; (offset + 512) <= hw->flash.banks.orom_size; offset += 512) { struct ice_orom_civd_info *tmp; u8 sum = 0, i; tmp = (struct ice_orom_civd_info *)&orom_data[offset]; /* Skip forward until we find a matching signature */ if (memcmp("$CIV", tmp->signature, sizeof(tmp->signature)) != 0) continue; ice_debug(hw, ICE_DBG_NVM, "Found CIVD section at offset %u\n", offset); /* Verify that the simple checksum is zero */ for (i = 0; i < sizeof(*tmp); i++) sum += ((u8 *)tmp)[i]; if (sum) { ice_debug(hw, ICE_DBG_NVM, "Found CIVD data with invalid checksum of %u\n", sum); goto err_invalid_checksum; } *civd = *tmp; vfree(orom_data); return 0; } ice_debug(hw, ICE_DBG_NVM, "Unable to locate CIVD data within the Option ROM\n"); err_invalid_checksum: vfree(orom_data); return -EIO; } /** * ice_get_orom_ver_info - Read Option ROM version information * @hw: pointer to the HW struct * @bank: whether to read from the active or inactive flash module * @orom: pointer to Option ROM info structure * * Read Option ROM version and security revision from the Option ROM flash * section. */ static int ice_get_orom_ver_info(struct ice_hw *hw, enum ice_bank_select bank, struct ice_orom_info *orom) { struct ice_orom_civd_info civd; u32 combo_ver; int status; status = ice_get_orom_civd_data(hw, bank, &civd); if (status) { ice_debug(hw, ICE_DBG_NVM, "Failed to locate valid Option ROM CIVD data\n"); return status; } combo_ver = le32_to_cpu(civd.combo_ver); orom->major = (u8)((combo_ver & ICE_OROM_VER_MASK) >> ICE_OROM_VER_SHIFT); orom->patch = (u8)(combo_ver & ICE_OROM_VER_PATCH_MASK); orom->build = (u16)((combo_ver & ICE_OROM_VER_BUILD_MASK) >> ICE_OROM_VER_BUILD_SHIFT); return 0; } /** * ice_get_inactive_orom_ver - Read Option ROM version from the inactive bank * @hw: pointer to the HW structure * @orom: storage for Option ROM version information * * Reads the Option ROM version and security revision data for the inactive * section of flash. Used to access version data for a pending update that has * not yet been activated. */ int ice_get_inactive_orom_ver(struct ice_hw *hw, struct ice_orom_info *orom) { return ice_get_orom_ver_info(hw, ICE_INACTIVE_FLASH_BANK, orom); } /** * ice_get_netlist_info * @hw: pointer to the HW struct * @bank: whether to read from the active or inactive flash bank * @netlist: pointer to netlist version info structure * * Get the netlist version information from the requested bank. Reads the Link * Topology section to find the Netlist ID block and extract the relevant * information into the netlist version structure. */ static int ice_get_netlist_info(struct ice_hw *hw, enum ice_bank_select bank, struct ice_netlist_info *netlist) { u16 module_id, length, node_count, i; u16 *id_blk; int status; status = ice_read_netlist_module(hw, bank, ICE_NETLIST_TYPE_OFFSET, &module_id); if (status) return status; if (module_id != ICE_NETLIST_LINK_TOPO_MOD_ID) { ice_debug(hw, ICE_DBG_NVM, "Expected netlist module_id ID of 0x%04x, but got 0x%04x\n", ICE_NETLIST_LINK_TOPO_MOD_ID, module_id); return -EIO; } status = ice_read_netlist_module(hw, bank, ICE_LINK_TOPO_MODULE_LEN, &length); if (status) return status; /* sanity check that we have at least enough words to store the netlist ID block */ if (length < ICE_NETLIST_ID_BLK_SIZE) { ice_debug(hw, ICE_DBG_NVM, "Netlist Link Topology module too small. Expected at least %u words, but got %u words.\n", ICE_NETLIST_ID_BLK_SIZE, length); return -EIO; } status = ice_read_netlist_module(hw, bank, ICE_LINK_TOPO_NODE_COUNT, &node_count); if (status) return status; node_count &= ICE_LINK_TOPO_NODE_COUNT_M; id_blk = kcalloc(ICE_NETLIST_ID_BLK_SIZE, sizeof(*id_blk), GFP_KERNEL); if (!id_blk) return -ENOMEM; /* Read out the entire Netlist ID Block at once. */ status = ice_read_flash_module(hw, bank, ICE_SR_NETLIST_BANK_PTR, ICE_NETLIST_ID_BLK_OFFSET(node_count) * sizeof(u16), (u8 *)id_blk, ICE_NETLIST_ID_BLK_SIZE * sizeof(u16)); if (status) goto exit_error; for (i = 0; i < ICE_NETLIST_ID_BLK_SIZE; i++) id_blk[i] = le16_to_cpu(((__force __le16 *)id_blk)[i]); netlist->major = id_blk[ICE_NETLIST_ID_BLK_MAJOR_VER_HIGH] << 16 | id_blk[ICE_NETLIST_ID_BLK_MAJOR_VER_LOW]; netlist->minor = id_blk[ICE_NETLIST_ID_BLK_MINOR_VER_HIGH] << 16 | id_blk[ICE_NETLIST_ID_BLK_MINOR_VER_LOW]; netlist->type = id_blk[ICE_NETLIST_ID_BLK_TYPE_HIGH] << 16 | id_blk[ICE_NETLIST_ID_BLK_TYPE_LOW]; netlist->rev = id_blk[ICE_NETLIST_ID_BLK_REV_HIGH] << 16 | id_blk[ICE_NETLIST_ID_BLK_REV_LOW]; netlist->cust_ver = id_blk[ICE_NETLIST_ID_BLK_CUST_VER]; /* Read the left most 4 bytes of SHA */ netlist->hash = id_blk[ICE_NETLIST_ID_BLK_SHA_HASH_WORD(15)] << 16 | id_blk[ICE_NETLIST_ID_BLK_SHA_HASH_WORD(14)]; exit_error: kfree(id_blk); return status; } /** * ice_get_inactive_netlist_ver * @hw: pointer to the HW struct * @netlist: pointer to netlist version info structure * * Read the netlist version data from the inactive netlist bank. Used to * extract version data of a pending flash update in order to display the * version data. */ int ice_get_inactive_netlist_ver(struct ice_hw *hw, struct ice_netlist_info *netlist) { return ice_get_netlist_info(hw, ICE_INACTIVE_FLASH_BANK, netlist); } /** * ice_discover_flash_size - Discover the available flash size. * @hw: pointer to the HW struct * * The device flash could be up to 16MB in size. However, it is possible that * the actual size is smaller. Use bisection to determine the accessible size * of flash memory. */ static int ice_discover_flash_size(struct ice_hw *hw) { u32 min_size = 0, max_size = ICE_AQC_NVM_MAX_OFFSET + 1; int status; status = ice_acquire_nvm(hw, ICE_RES_READ); if (status) return status; while ((max_size - min_size) > 1) { u32 offset = (max_size + min_size) / 2; u32 len = 1; u8 data; status = ice_read_flat_nvm(hw, offset, &len, &data, false); if (status == -EIO && hw->adminq.sq_last_status == ICE_AQ_RC_EINVAL) { ice_debug(hw, ICE_DBG_NVM, "%s: New upper bound of %u bytes\n", __func__, offset); status = 0; max_size = offset; } else if (!status) { ice_debug(hw, ICE_DBG_NVM, "%s: New lower bound of %u bytes\n", __func__, offset); min_size = offset; } else { /* an unexpected error occurred */ goto err_read_flat_nvm; } } ice_debug(hw, ICE_DBG_NVM, "Predicted flash size is %u bytes\n", max_size); hw->flash.flash_size = max_size; err_read_flat_nvm: ice_release_nvm(hw); return status; } /** * ice_read_sr_pointer - Read the value of a Shadow RAM pointer word * @hw: pointer to the HW structure * @offset: the word offset of the Shadow RAM word to read * @pointer: pointer value read from Shadow RAM * * Read the given Shadow RAM word, and convert it to a pointer value specified * in bytes. This function assumes the specified offset is a valid pointer * word. * * Each pointer word specifies whether it is stored in word size or 4KB * sector size by using the highest bit. The reported pointer value will be in * bytes, intended for flat NVM reads. */ static int ice_read_sr_pointer(struct ice_hw *hw, u16 offset, u32 *pointer) { int status; u16 value; status = ice_read_sr_word(hw, offset, &value); if (status) return status; /* Determine if the pointer is in 4KB or word units */ if (value & ICE_SR_NVM_PTR_4KB_UNITS) *pointer = (value & ~ICE_SR_NVM_PTR_4KB_UNITS) * 4 * 1024; else *pointer = value * 2; return 0; } /** * ice_read_sr_area_size - Read an area size from a Shadow RAM word * @hw: pointer to the HW structure * @offset: the word offset of the Shadow RAM to read * @size: size value read from the Shadow RAM * * Read the given Shadow RAM word, and convert it to an area size value * specified in bytes. This function assumes the specified offset is a valid * area size word. * * Each area size word is specified in 4KB sector units. This function reports * the size in bytes, intended for flat NVM reads. */ static int ice_read_sr_area_size(struct ice_hw *hw, u16 offset, u32 *size) { int status; u16 value; status = ice_read_sr_word(hw, offset, &value); if (status) return status; /* Area sizes are always specified in 4KB units */ *size = value * 4 * 1024; return 0; } /** * ice_determine_active_flash_banks - Discover active bank for each module * @hw: pointer to the HW struct * * Read the Shadow RAM control word and determine which banks are active for * the NVM, OROM, and Netlist modules. Also read and calculate the associated * pointer and size. These values are then cached into the ice_flash_info * structure for later use in order to calculate the correct offset to read * from the active module. */ static int ice_determine_active_flash_banks(struct ice_hw *hw) { struct ice_bank_info *banks = &hw->flash.banks; u16 ctrl_word; int status; status = ice_read_sr_word(hw, ICE_SR_NVM_CTRL_WORD, &ctrl_word); if (status) { ice_debug(hw, ICE_DBG_NVM, "Failed to read the Shadow RAM control word\n"); return status; } /* Check that the control word indicates validity */ if ((ctrl_word & ICE_SR_CTRL_WORD_1_M) >> ICE_SR_CTRL_WORD_1_S != ICE_SR_CTRL_WORD_VALID) { ice_debug(hw, ICE_DBG_NVM, "Shadow RAM control word is invalid\n"); return -EIO; } if (!(ctrl_word & ICE_SR_CTRL_WORD_NVM_BANK)) banks->nvm_bank = ICE_1ST_FLASH_BANK; else banks->nvm_bank = ICE_2ND_FLASH_BANK; if (!(ctrl_word & ICE_SR_CTRL_WORD_OROM_BANK)) banks->orom_bank = ICE_1ST_FLASH_BANK; else banks->orom_bank = ICE_2ND_FLASH_BANK; if (!(ctrl_word & ICE_SR_CTRL_WORD_NETLIST_BANK)) banks->netlist_bank = ICE_1ST_FLASH_BANK; else banks->netlist_bank = ICE_2ND_FLASH_BANK; status = ice_read_sr_pointer(hw, ICE_SR_1ST_NVM_BANK_PTR, &banks->nvm_ptr); if (status) { ice_debug(hw, ICE_DBG_NVM, "Failed to read NVM bank pointer\n"); return status; } status = ice_read_sr_area_size(hw, ICE_SR_NVM_BANK_SIZE, &banks->nvm_size); if (status) { ice_debug(hw, ICE_DBG_NVM, "Failed to read NVM bank area size\n"); return status; } status = ice_read_sr_pointer(hw, ICE_SR_1ST_OROM_BANK_PTR, &banks->orom_ptr); if (status) { ice_debug(hw, ICE_DBG_NVM, "Failed to read OROM bank pointer\n"); return status; } status = ice_read_sr_area_size(hw, ICE_SR_OROM_BANK_SIZE, &banks->orom_size); if (status) { ice_debug(hw, ICE_DBG_NVM, "Failed to read OROM bank area size\n"); return status; } status = ice_read_sr_pointer(hw, ICE_SR_NETLIST_BANK_PTR, &banks->netlist_ptr); if (status) { ice_debug(hw, ICE_DBG_NVM, "Failed to read Netlist bank pointer\n"); return status; } status = ice_read_sr_area_size(hw, ICE_SR_NETLIST_BANK_SIZE, &banks->netlist_size); if (status) { ice_debug(hw, ICE_DBG_NVM, "Failed to read Netlist bank area size\n"); return status; } return 0; } /** * ice_init_nvm - initializes NVM setting * @hw: pointer to the HW struct * * This function reads and populates NVM settings such as Shadow RAM size, * max_timeout, and blank_nvm_mode */ int ice_init_nvm(struct ice_hw *hw) { struct ice_flash_info *flash = &hw->flash; u32 fla, gens_stat; u8 sr_size; int status; /* The SR size is stored regardless of the NVM programming mode * as the blank mode may be used in the factory line. */ gens_stat = rd32(hw, GLNVM_GENS); sr_size = (gens_stat & GLNVM_GENS_SR_SIZE_M) >> GLNVM_GENS_SR_SIZE_S; /* Switching to words (sr_size contains power of 2) */ flash->sr_words = BIT(sr_size) * ICE_SR_WORDS_IN_1KB; /* Check if we are in the normal or blank NVM programming mode */ fla = rd32(hw, GLNVM_FLA); if (fla & GLNVM_FLA_LOCKED_M) { /* Normal programming mode */ flash->blank_nvm_mode = false; } else { /* Blank programming mode */ flash->blank_nvm_mode = true; ice_debug(hw, ICE_DBG_NVM, "NVM init error: unsupported blank mode.\n"); return -EIO; } status = ice_discover_flash_size(hw); if (status) { ice_debug(hw, ICE_DBG_NVM, "NVM init error: failed to discover flash size.\n"); return status; } status = ice_determine_active_flash_banks(hw); if (status) { ice_debug(hw, ICE_DBG_NVM, "Failed to determine active flash banks.\n"); return status; } status = ice_get_nvm_ver_info(hw, ICE_ACTIVE_FLASH_BANK, &flash->nvm); if (status) { ice_debug(hw, ICE_DBG_INIT, "Failed to read NVM info.\n"); return status; } status = ice_get_orom_ver_info(hw, ICE_ACTIVE_FLASH_BANK, &flash->orom); if (status) ice_debug(hw, ICE_DBG_INIT, "Failed to read Option ROM info.\n"); /* read the netlist version information */ status = ice_get_netlist_info(hw, ICE_ACTIVE_FLASH_BANK, &flash->netlist); if (status) ice_debug(hw, ICE_DBG_INIT, "Failed to read netlist info.\n"); return 0; } /** * ice_nvm_validate_checksum * @hw: pointer to the HW struct * * Verify NVM PFA checksum validity (0x0706) */ int ice_nvm_validate_checksum(struct ice_hw *hw) { struct ice_aqc_nvm_checksum *cmd; struct ice_aq_desc desc; int status; status = ice_acquire_nvm(hw, ICE_RES_READ); if (status) return status; cmd = &desc.params.nvm_checksum; ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_nvm_checksum); cmd->flags = ICE_AQC_NVM_CHECKSUM_VERIFY; status = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); ice_release_nvm(hw); if (!status) if (le16_to_cpu(cmd->checksum) != ICE_AQC_NVM_CHECKSUM_CORRECT) status = -EIO; return status; } /** * ice_nvm_write_activate * @hw: pointer to the HW struct * @cmd_flags: flags for write activate command * @response_flags: response indicators from firmware * * Update the control word with the required banks' validity bits * and dumps the Shadow RAM to flash (0x0707) * * cmd_flags controls which banks to activate, the preservation level to use * when activating the NVM bank, and whether an EMP reset is required for * activation. * * Note that the 16bit cmd_flags value is split between two separate 1 byte * flag values in the descriptor. * * On successful return of the firmware command, the response_flags variable * is updated with the flags reported by firmware indicating certain status, * such as whether EMP reset is enabled. */ int ice_nvm_write_activate(struct ice_hw *hw, u16 cmd_flags, u8 *response_flags) { struct ice_aqc_nvm *cmd; struct ice_aq_desc desc; int err; cmd = &desc.params.nvm; ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_nvm_write_activate); cmd->cmd_flags = (u8)(cmd_flags & 0xFF); cmd->offset_high = (u8)((cmd_flags >> 8) & 0xFF); err = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); if (!err && response_flags) *response_flags = cmd->cmd_flags; return err; } /** * ice_aq_nvm_update_empr * @hw: pointer to the HW struct * * Update empr (0x0709). This command allows SW to * request an EMPR to activate new FW. */ int ice_aq_nvm_update_empr(struct ice_hw *hw) { struct ice_aq_desc desc; ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_nvm_update_empr); return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); } /* ice_nvm_set_pkg_data * @hw: pointer to the HW struct * @del_pkg_data_flag: If is set then the current pkg_data store by FW * is deleted. * If bit is set to 1, then buffer should be size 0. * @data: pointer to buffer * @length: length of the buffer * @cd: pointer to command details structure or NULL * * Set package data (0x070A). This command is equivalent to the reception * of a PLDM FW Update GetPackageData cmd. This command should be sent * as part of the NVM update as the first cmd in the flow. */ int ice_nvm_set_pkg_data(struct ice_hw *hw, bool del_pkg_data_flag, u8 *data, u16 length, struct ice_sq_cd *cd) { struct ice_aqc_nvm_pkg_data *cmd; struct ice_aq_desc desc; if (length != 0 && !data) return -EINVAL; cmd = &desc.params.pkg_data; ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_nvm_pkg_data); desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD); if (del_pkg_data_flag) cmd->cmd_flags |= ICE_AQC_NVM_PKG_DELETE; return ice_aq_send_cmd(hw, &desc, data, length, cd); } /* ice_nvm_pass_component_tbl * @hw: pointer to the HW struct * @data: pointer to buffer * @length: length of the buffer * @transfer_flag: parameter for determining stage of the update * @comp_response: a pointer to the response from the 0x070B AQC. * @comp_response_code: a pointer to the response code from the 0x070B AQC. * @cd: pointer to command details structure or NULL * * Pass component table (0x070B). This command is equivalent to the reception * of a PLDM FW Update PassComponentTable cmd. This command should be sent once * per component. It can be only sent after Set Package Data cmd and before * actual update. FW will assume these commands are going to be sent until * the TransferFlag is set to End or StartAndEnd. */ int ice_nvm_pass_component_tbl(struct ice_hw *hw, u8 *data, u16 length, u8 transfer_flag, u8 *comp_response, u8 *comp_response_code, struct ice_sq_cd *cd) { struct ice_aqc_nvm_pass_comp_tbl *cmd; struct ice_aq_desc desc; int status; if (!data || !comp_response || !comp_response_code) return -EINVAL; cmd = &desc.params.pass_comp_tbl; ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_nvm_pass_component_tbl); desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD); cmd->transfer_flag = transfer_flag; status = ice_aq_send_cmd(hw, &desc, data, length, cd); if (!status) { *comp_response = cmd->component_response; *comp_response_code = cmd->component_response_code; } return status; }
linux-master
drivers/net/ethernet/intel/ice/ice_nvm.c
// SPDX-License-Identifier: GPL-2.0 /* Copyright(c) 2013 - 2018 Intel Corporation. */ #include "i40e_diag.h" #include "i40e_prototype.h" /** * i40e_diag_reg_pattern_test * @hw: pointer to the hw struct * @reg: reg to be tested * @mask: bits to be touched **/ static int i40e_diag_reg_pattern_test(struct i40e_hw *hw, u32 reg, u32 mask) { static const u32 patterns[] = { 0x5A5A5A5A, 0xA5A5A5A5, 0x00000000, 0xFFFFFFFF }; u32 pat, val, orig_val; int i; orig_val = rd32(hw, reg); for (i = 0; i < ARRAY_SIZE(patterns); i++) { pat = patterns[i]; wr32(hw, reg, (pat & mask)); val = rd32(hw, reg); if ((val & mask) != (pat & mask)) { i40e_debug(hw, I40E_DEBUG_DIAG, "%s: reg pattern test failed - reg 0x%08x pat 0x%08x val 0x%08x\n", __func__, reg, pat, val); return -EIO; } } wr32(hw, reg, orig_val); val = rd32(hw, reg); if (val != orig_val) { i40e_debug(hw, I40E_DEBUG_DIAG, "%s: reg restore test failed - reg 0x%08x orig_val 0x%08x val 0x%08x\n", __func__, reg, orig_val, val); return -EIO; } return 0; } const struct i40e_diag_reg_test_info i40e_reg_list[] = { /* offset mask elements stride */ {I40E_QTX_CTL(0), 0x0000FFBF, 1, I40E_QTX_CTL(1) - I40E_QTX_CTL(0)}, {I40E_PFINT_ITR0(0), 0x00000FFF, 3, I40E_PFINT_ITR0(1) - I40E_PFINT_ITR0(0)}, {I40E_PFINT_ITRN(0, 0), 0x00000FFF, 1, I40E_PFINT_ITRN(0, 1) - I40E_PFINT_ITRN(0, 0)}, {I40E_PFINT_ITRN(1, 0), 0x00000FFF, 1, I40E_PFINT_ITRN(1, 1) - I40E_PFINT_ITRN(1, 0)}, {I40E_PFINT_ITRN(2, 0), 0x00000FFF, 1, I40E_PFINT_ITRN(2, 1) - I40E_PFINT_ITRN(2, 0)}, {I40E_PFINT_STAT_CTL0, 0x0000000C, 1, 0}, {I40E_PFINT_LNKLST0, 0x00001FFF, 1, 0}, {I40E_PFINT_LNKLSTN(0), 0x000007FF, 1, I40E_PFINT_LNKLSTN(1) - I40E_PFINT_LNKLSTN(0)}, {I40E_QINT_TQCTL(0), 0x000000FF, 1, I40E_QINT_TQCTL(1) - I40E_QINT_TQCTL(0)}, {I40E_QINT_RQCTL(0), 0x000000FF, 1, I40E_QINT_RQCTL(1) - I40E_QINT_RQCTL(0)}, {I40E_PFINT_ICR0_ENA, 0xF7F20000, 1, 0}, { 0 } }; /** * i40e_diag_reg_test * @hw: pointer to the hw struct * * Perform registers diagnostic test **/ int i40e_diag_reg_test(struct i40e_hw *hw) { int ret_code = 0; u32 reg, mask; u32 elements; u32 i, j; for (i = 0; i40e_reg_list[i].offset != 0 && !ret_code; i++) { elements = i40e_reg_list[i].elements; /* set actual reg range for dynamically allocated resources */ if (i40e_reg_list[i].offset == I40E_QTX_CTL(0) && hw->func_caps.num_tx_qp != 0) elements = hw->func_caps.num_tx_qp; if ((i40e_reg_list[i].offset == I40E_PFINT_ITRN(0, 0) || i40e_reg_list[i].offset == I40E_PFINT_ITRN(1, 0) || i40e_reg_list[i].offset == I40E_PFINT_ITRN(2, 0) || i40e_reg_list[i].offset == I40E_QINT_TQCTL(0) || i40e_reg_list[i].offset == I40E_QINT_RQCTL(0)) && hw->func_caps.num_msix_vectors != 0) elements = hw->func_caps.num_msix_vectors - 1; /* test register access */ mask = i40e_reg_list[i].mask; for (j = 0; j < elements && !ret_code; j++) { reg = i40e_reg_list[i].offset + (j * i40e_reg_list[i].stride); ret_code = i40e_diag_reg_pattern_test(hw, reg, mask); } } return ret_code; } /** * i40e_diag_eeprom_test * @hw: pointer to the hw struct * * Perform EEPROM diagnostic test **/ int i40e_diag_eeprom_test(struct i40e_hw *hw) { int ret_code; u16 reg_val; /* read NVM control word and if NVM valid, validate EEPROM checksum*/ ret_code = i40e_read_nvm_word(hw, I40E_SR_NVM_CONTROL_WORD, &reg_val); if (!ret_code && ((reg_val & I40E_SR_CONTROL_WORD_1_MASK) == BIT(I40E_SR_CONTROL_WORD_1_SHIFT))) return i40e_validate_nvm_checksum(hw, NULL); else return -EIO; }
linux-master
drivers/net/ethernet/intel/i40e/i40e_diag.c
// SPDX-License-Identifier: GPL-2.0 /* Copyright(c) 2013 - 2018 Intel Corporation. */ #include <linux/list.h> #include <linux/errno.h> #include <linux/net/intel/i40e_client.h> #include "i40e.h" #include "i40e_prototype.h" static LIST_HEAD(i40e_devices); static DEFINE_MUTEX(i40e_device_mutex); DEFINE_IDA(i40e_client_ida); static int i40e_client_virtchnl_send(struct i40e_info *ldev, struct i40e_client *client, u32 vf_id, u8 *msg, u16 len); static int i40e_client_setup_qvlist(struct i40e_info *ldev, struct i40e_client *client, struct i40e_qvlist_info *qvlist_info); static void i40e_client_request_reset(struct i40e_info *ldev, struct i40e_client *client, u32 reset_level); static int i40e_client_update_vsi_ctxt(struct i40e_info *ldev, struct i40e_client *client, bool is_vf, u32 vf_id, u32 flag, u32 valid_flag); static struct i40e_ops i40e_lan_ops = { .virtchnl_send = i40e_client_virtchnl_send, .setup_qvlist = i40e_client_setup_qvlist, .request_reset = i40e_client_request_reset, .update_vsi_ctxt = i40e_client_update_vsi_ctxt, }; /** * i40e_client_get_params - Get the params that can change at runtime * @vsi: the VSI with the message * @params: client param struct * **/ static int i40e_client_get_params(struct i40e_vsi *vsi, struct i40e_params *params) { struct i40e_dcbx_config *dcb_cfg = &vsi->back->hw.local_dcbx_config; int i = 0; for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) { u8 tc = dcb_cfg->etscfg.prioritytable[i]; u16 qs_handle; /* If TC is not enabled for VSI use TC0 for UP */ if (!(vsi->tc_config.enabled_tc & BIT(tc))) tc = 0; qs_handle = le16_to_cpu(vsi->info.qs_handle[tc]); params->qos.prio_qos[i].tc = tc; params->qos.prio_qos[i].qs_handle = qs_handle; if (qs_handle == I40E_AQ_VSI_QS_HANDLE_INVALID) { dev_err(&vsi->back->pdev->dev, "Invalid queue set handle for TC = %d, vsi id = %d\n", tc, vsi->id); return -EINVAL; } } params->mtu = vsi->netdev->mtu; return 0; } /** * i40e_notify_client_of_vf_msg - call the client vf message callback * @vsi: the VSI with the message * @vf_id: the absolute VF id that sent the message * @msg: message buffer * @len: length of the message * * If there is a client to this VSI, call the client **/ void i40e_notify_client_of_vf_msg(struct i40e_vsi *vsi, u32 vf_id, u8 *msg, u16 len) { struct i40e_pf *pf = vsi->back; struct i40e_client_instance *cdev = pf->cinst; if (!cdev || !cdev->client) return; if (!cdev->client->ops || !cdev->client->ops->virtchnl_receive) { dev_dbg(&pf->pdev->dev, "Cannot locate client instance virtual channel receive routine\n"); return; } if (!test_bit(__I40E_CLIENT_INSTANCE_OPENED, &cdev->state)) { dev_dbg(&pf->pdev->dev, "Client is not open, abort virtchnl_receive\n"); return; } cdev->client->ops->virtchnl_receive(&cdev->lan_info, cdev->client, vf_id, msg, len); } /** * i40e_notify_client_of_l2_param_changes - call the client notify callback * @vsi: the VSI with l2 param changes * * If there is a client to this VSI, call the client **/ void i40e_notify_client_of_l2_param_changes(struct i40e_vsi *vsi) { struct i40e_pf *pf = vsi->back; struct i40e_client_instance *cdev = pf->cinst; struct i40e_params params; if (!cdev || !cdev->client) return; if (!cdev->client->ops || !cdev->client->ops->l2_param_change) { dev_dbg(&vsi->back->pdev->dev, "Cannot locate client instance l2_param_change routine\n"); return; } if (!test_bit(__I40E_CLIENT_INSTANCE_OPENED, &cdev->state)) { dev_dbg(&vsi->back->pdev->dev, "Client is not open, abort l2 param change\n"); return; } memset(&params, 0, sizeof(params)); i40e_client_get_params(vsi, &params); memcpy(&cdev->lan_info.params, &params, sizeof(struct i40e_params)); cdev->client->ops->l2_param_change(&cdev->lan_info, cdev->client, &params); } /** * i40e_client_release_qvlist - release MSI-X vector mapping for client * @ldev: pointer to L2 context. * **/ static void i40e_client_release_qvlist(struct i40e_info *ldev) { struct i40e_qvlist_info *qvlist_info = ldev->qvlist_info; u32 i; if (!ldev->qvlist_info) return; for (i = 0; i < qvlist_info->num_vectors; i++) { struct i40e_pf *pf = ldev->pf; struct i40e_qv_info *qv_info; u32 reg_idx; qv_info = &qvlist_info->qv_info[i]; if (!qv_info) continue; reg_idx = I40E_PFINT_LNKLSTN(qv_info->v_idx - 1); wr32(&pf->hw, reg_idx, I40E_PFINT_LNKLSTN_FIRSTQ_INDX_MASK); } kfree(ldev->qvlist_info); ldev->qvlist_info = NULL; } /** * i40e_notify_client_of_netdev_close - call the client close callback * @vsi: the VSI with netdev closed * @reset: true when close called due to a reset pending * * If there is a client to this netdev, call the client with close **/ void i40e_notify_client_of_netdev_close(struct i40e_vsi *vsi, bool reset) { struct i40e_pf *pf = vsi->back; struct i40e_client_instance *cdev = pf->cinst; if (!cdev || !cdev->client) return; if (!cdev->client->ops || !cdev->client->ops->close) { dev_dbg(&vsi->back->pdev->dev, "Cannot locate client instance close routine\n"); return; } if (!test_bit(__I40E_CLIENT_INSTANCE_OPENED, &cdev->state)) { dev_dbg(&pf->pdev->dev, "Client is not open, abort close\n"); return; } cdev->client->ops->close(&cdev->lan_info, cdev->client, reset); clear_bit(__I40E_CLIENT_INSTANCE_OPENED, &cdev->state); i40e_client_release_qvlist(&cdev->lan_info); } /** * i40e_notify_client_of_vf_reset - call the client vf reset callback * @pf: PF device pointer * @vf_id: asolute id of VF being reset * * If there is a client attached to this PF, notify when a VF is reset **/ void i40e_notify_client_of_vf_reset(struct i40e_pf *pf, u32 vf_id) { struct i40e_client_instance *cdev = pf->cinst; if (!cdev || !cdev->client) return; if (!cdev->client->ops || !cdev->client->ops->vf_reset) { dev_dbg(&pf->pdev->dev, "Cannot locate client instance VF reset routine\n"); return; } if (!test_bit(__I40E_CLIENT_INSTANCE_OPENED, &cdev->state)) { dev_dbg(&pf->pdev->dev, "Client is not open, abort vf-reset\n"); return; } cdev->client->ops->vf_reset(&cdev->lan_info, cdev->client, vf_id); } /** * i40e_notify_client_of_vf_enable - call the client vf notification callback * @pf: PF device pointer * @num_vfs: the number of VFs currently enabled, 0 for disable * * If there is a client attached to this PF, call its VF notification routine **/ void i40e_notify_client_of_vf_enable(struct i40e_pf *pf, u32 num_vfs) { struct i40e_client_instance *cdev = pf->cinst; if (!cdev || !cdev->client) return; if (!cdev->client->ops || !cdev->client->ops->vf_enable) { dev_dbg(&pf->pdev->dev, "Cannot locate client instance VF enable routine\n"); return; } if (!test_bit(__I40E_CLIENT_INSTANCE_OPENED, &cdev->state)) { dev_dbg(&pf->pdev->dev, "Client is not open, abort vf-enable\n"); return; } cdev->client->ops->vf_enable(&cdev->lan_info, cdev->client, num_vfs); } /** * i40e_vf_client_capable - ask the client if it likes the specified VF * @pf: PF device pointer * @vf_id: the VF in question * * If there is a client of the specified type attached to this PF, call * its vf_capable routine **/ int i40e_vf_client_capable(struct i40e_pf *pf, u32 vf_id) { struct i40e_client_instance *cdev = pf->cinst; int capable = false; if (!cdev || !cdev->client) goto out; if (!cdev->client->ops || !cdev->client->ops->vf_capable) { dev_dbg(&pf->pdev->dev, "Cannot locate client instance VF capability routine\n"); goto out; } if (!test_bit(__I40E_CLIENT_INSTANCE_OPENED, &cdev->state)) goto out; capable = cdev->client->ops->vf_capable(&cdev->lan_info, cdev->client, vf_id); out: return capable; } void i40e_client_update_msix_info(struct i40e_pf *pf) { struct i40e_client_instance *cdev = pf->cinst; if (!cdev || !cdev->client) return; cdev->lan_info.msix_count = pf->num_iwarp_msix; cdev->lan_info.msix_entries = &pf->msix_entries[pf->iwarp_base_vector]; } static void i40e_auxiliary_dev_release(struct device *dev) { struct i40e_auxiliary_device *i40e_aux_dev = container_of(dev, struct i40e_auxiliary_device, aux_dev.dev); ida_free(&i40e_client_ida, i40e_aux_dev->aux_dev.id); kfree(i40e_aux_dev); } static int i40e_register_auxiliary_dev(struct i40e_info *ldev, const char *name) { struct i40e_auxiliary_device *i40e_aux_dev; struct pci_dev *pdev = ldev->pcidev; struct auxiliary_device *aux_dev; int ret; i40e_aux_dev = kzalloc(sizeof(*i40e_aux_dev), GFP_KERNEL); if (!i40e_aux_dev) return -ENOMEM; i40e_aux_dev->ldev = ldev; aux_dev = &i40e_aux_dev->aux_dev; aux_dev->name = name; aux_dev->dev.parent = &pdev->dev; aux_dev->dev.release = i40e_auxiliary_dev_release; ldev->aux_dev = aux_dev; ret = ida_alloc(&i40e_client_ida, GFP_KERNEL); if (ret < 0) { kfree(i40e_aux_dev); return ret; } aux_dev->id = ret; ret = auxiliary_device_init(aux_dev); if (ret < 0) { ida_free(&i40e_client_ida, aux_dev->id); kfree(i40e_aux_dev); return ret; } ret = auxiliary_device_add(aux_dev); if (ret) { auxiliary_device_uninit(aux_dev); return ret; } return ret; } /** * i40e_client_add_instance - add a client instance struct to the instance list * @pf: pointer to the board struct * **/ static void i40e_client_add_instance(struct i40e_pf *pf) { struct i40e_client_instance *cdev = NULL; struct netdev_hw_addr *mac = NULL; struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi]; cdev = kzalloc(sizeof(*cdev), GFP_KERNEL); if (!cdev) return; cdev->lan_info.pf = (void *)pf; cdev->lan_info.netdev = vsi->netdev; cdev->lan_info.pcidev = pf->pdev; cdev->lan_info.fid = pf->hw.pf_id; cdev->lan_info.ftype = I40E_CLIENT_FTYPE_PF; cdev->lan_info.hw_addr = pf->hw.hw_addr; cdev->lan_info.ops = &i40e_lan_ops; cdev->lan_info.version.major = I40E_CLIENT_VERSION_MAJOR; cdev->lan_info.version.minor = I40E_CLIENT_VERSION_MINOR; cdev->lan_info.version.build = I40E_CLIENT_VERSION_BUILD; cdev->lan_info.fw_maj_ver = pf->hw.aq.fw_maj_ver; cdev->lan_info.fw_min_ver = pf->hw.aq.fw_min_ver; cdev->lan_info.fw_build = pf->hw.aq.fw_build; set_bit(__I40E_CLIENT_INSTANCE_NONE, &cdev->state); if (i40e_client_get_params(vsi, &cdev->lan_info.params)) goto free_cdev; mac = list_first_entry(&cdev->lan_info.netdev->dev_addrs.list, struct netdev_hw_addr, list); if (mac) ether_addr_copy(cdev->lan_info.lanmac, mac->addr); else dev_err(&pf->pdev->dev, "MAC address list is empty!\n"); pf->cinst = cdev; cdev->lan_info.msix_count = pf->num_iwarp_msix; cdev->lan_info.msix_entries = &pf->msix_entries[pf->iwarp_base_vector]; if (i40e_register_auxiliary_dev(&cdev->lan_info, "iwarp")) goto free_cdev; return; free_cdev: kfree(cdev); pf->cinst = NULL; } /** * i40e_client_del_instance - removes a client instance from the list * @pf: pointer to the board struct * **/ static void i40e_client_del_instance(struct i40e_pf *pf) { kfree(pf->cinst); pf->cinst = NULL; } /** * i40e_client_subtask - client maintenance work * @pf: board private structure **/ void i40e_client_subtask(struct i40e_pf *pf) { struct i40e_client *client; struct i40e_client_instance *cdev; struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi]; int ret = 0; if (!test_and_clear_bit(__I40E_CLIENT_SERVICE_REQUESTED, pf->state)) return; cdev = pf->cinst; /* If we're down or resetting, just bail */ if (test_bit(__I40E_DOWN, pf->state) || test_bit(__I40E_CONFIG_BUSY, pf->state)) return; if (!cdev || !cdev->client) return; client = cdev->client; /* Here we handle client opens. If the client is down, and * the netdev is registered, then open the client. */ if (!test_bit(__I40E_CLIENT_INSTANCE_OPENED, &cdev->state)) { if (vsi->netdev_registered && client->ops && client->ops->open) { set_bit(__I40E_CLIENT_INSTANCE_OPENED, &cdev->state); ret = client->ops->open(&cdev->lan_info, client); if (ret) { /* Remove failed client instance */ clear_bit(__I40E_CLIENT_INSTANCE_OPENED, &cdev->state); return; } } } /* enable/disable PE TCP_ENA flag based on netdev down/up */ if (test_bit(__I40E_VSI_DOWN, vsi->state)) i40e_client_update_vsi_ctxt(&cdev->lan_info, client, 0, 0, 0, I40E_CLIENT_VSI_FLAG_TCP_ENABLE); else i40e_client_update_vsi_ctxt(&cdev->lan_info, client, 0, 0, I40E_CLIENT_VSI_FLAG_TCP_ENABLE, I40E_CLIENT_VSI_FLAG_TCP_ENABLE); } /** * i40e_lan_add_device - add a lan device struct to the list of lan devices * @pf: pointer to the board struct * * Returns 0 on success or none 0 on error **/ int i40e_lan_add_device(struct i40e_pf *pf) { struct i40e_device *ldev; int ret = 0; mutex_lock(&i40e_device_mutex); list_for_each_entry(ldev, &i40e_devices, list) { if (ldev->pf == pf) { ret = -EEXIST; goto out; } } ldev = kzalloc(sizeof(*ldev), GFP_KERNEL); if (!ldev) { ret = -ENOMEM; goto out; } ldev->pf = pf; INIT_LIST_HEAD(&ldev->list); list_add(&ldev->list, &i40e_devices); dev_info(&pf->pdev->dev, "Added LAN device PF%d bus=0x%02x dev=0x%02x func=0x%02x\n", pf->hw.pf_id, pf->hw.bus.bus_id, pf->hw.bus.device, pf->hw.bus.func); i40e_client_add_instance(pf); set_bit(__I40E_CLIENT_SERVICE_REQUESTED, pf->state); i40e_service_event_schedule(pf); out: mutex_unlock(&i40e_device_mutex); return ret; } /** * i40e_lan_del_device - removes a lan device from the device list * @pf: pointer to the board struct * * Returns 0 on success or non-0 on error **/ int i40e_lan_del_device(struct i40e_pf *pf) { struct auxiliary_device *aux_dev = pf->cinst->lan_info.aux_dev; struct i40e_device *ldev, *tmp; int ret = -ENODEV; auxiliary_device_delete(aux_dev); auxiliary_device_uninit(aux_dev); /* First, remove any client instance. */ i40e_client_del_instance(pf); mutex_lock(&i40e_device_mutex); list_for_each_entry_safe(ldev, tmp, &i40e_devices, list) { if (ldev->pf == pf) { dev_info(&pf->pdev->dev, "Deleted LAN device PF%d bus=0x%02x dev=0x%02x func=0x%02x\n", pf->hw.pf_id, pf->hw.bus.bus_id, pf->hw.bus.device, pf->hw.bus.func); list_del(&ldev->list); kfree(ldev); ret = 0; break; } } mutex_unlock(&i40e_device_mutex); return ret; } /** * i40e_client_virtchnl_send - TBD * @ldev: pointer to L2 context * @client: Client pointer * @vf_id: absolute VF identifier * @msg: message buffer * @len: length of message buffer * * Return 0 on success or < 0 on error **/ static int i40e_client_virtchnl_send(struct i40e_info *ldev, struct i40e_client *client, u32 vf_id, u8 *msg, u16 len) { struct i40e_pf *pf = ldev->pf; struct i40e_hw *hw = &pf->hw; int err; err = i40e_aq_send_msg_to_vf(hw, vf_id, VIRTCHNL_OP_RDMA, 0, msg, len, NULL); if (err) dev_err(&pf->pdev->dev, "Unable to send iWarp message to VF, error %d, aq status %d\n", err, hw->aq.asq_last_status); return err; } /** * i40e_client_setup_qvlist * @ldev: pointer to L2 context. * @client: Client pointer. * @qvlist_info: queue and vector list * * Return 0 on success or < 0 on error **/ static int i40e_client_setup_qvlist(struct i40e_info *ldev, struct i40e_client *client, struct i40e_qvlist_info *qvlist_info) { struct i40e_pf *pf = ldev->pf; struct i40e_hw *hw = &pf->hw; struct i40e_qv_info *qv_info; u32 v_idx, i, reg_idx, reg; ldev->qvlist_info = kzalloc(struct_size(ldev->qvlist_info, qv_info, qvlist_info->num_vectors), GFP_KERNEL); if (!ldev->qvlist_info) return -ENOMEM; ldev->qvlist_info->num_vectors = qvlist_info->num_vectors; for (i = 0; i < qvlist_info->num_vectors; i++) { qv_info = &qvlist_info->qv_info[i]; if (!qv_info) continue; v_idx = qv_info->v_idx; /* Validate vector id belongs to this client */ if ((v_idx >= (pf->iwarp_base_vector + pf->num_iwarp_msix)) || (v_idx < pf->iwarp_base_vector)) goto err; ldev->qvlist_info->qv_info[i] = *qv_info; reg_idx = I40E_PFINT_LNKLSTN(v_idx - 1); if (qv_info->ceq_idx == I40E_QUEUE_INVALID_IDX) { /* Special case - No CEQ mapped on this vector */ wr32(hw, reg_idx, I40E_PFINT_LNKLSTN_FIRSTQ_INDX_MASK); } else { reg = (qv_info->ceq_idx & I40E_PFINT_LNKLSTN_FIRSTQ_INDX_MASK) | (I40E_QUEUE_TYPE_PE_CEQ << I40E_PFINT_LNKLSTN_FIRSTQ_TYPE_SHIFT); wr32(hw, reg_idx, reg); reg = (I40E_PFINT_CEQCTL_CAUSE_ENA_MASK | (v_idx << I40E_PFINT_CEQCTL_MSIX_INDX_SHIFT) | (qv_info->itr_idx << I40E_PFINT_CEQCTL_ITR_INDX_SHIFT) | (I40E_QUEUE_END_OF_LIST << I40E_PFINT_CEQCTL_NEXTQ_INDX_SHIFT)); wr32(hw, I40E_PFINT_CEQCTL(qv_info->ceq_idx), reg); } if (qv_info->aeq_idx != I40E_QUEUE_INVALID_IDX) { reg = (I40E_PFINT_AEQCTL_CAUSE_ENA_MASK | (v_idx << I40E_PFINT_AEQCTL_MSIX_INDX_SHIFT) | (qv_info->itr_idx << I40E_PFINT_AEQCTL_ITR_INDX_SHIFT)); wr32(hw, I40E_PFINT_AEQCTL, reg); } } /* Mitigate sync problems with iwarp VF driver */ i40e_flush(hw); return 0; err: kfree(ldev->qvlist_info); ldev->qvlist_info = NULL; return -EINVAL; } /** * i40e_client_request_reset * @ldev: pointer to L2 context. * @client: Client pointer. * @reset_level: reset level **/ static void i40e_client_request_reset(struct i40e_info *ldev, struct i40e_client *client, u32 reset_level) { struct i40e_pf *pf = ldev->pf; switch (reset_level) { case I40E_CLIENT_RESET_LEVEL_PF: set_bit(__I40E_PF_RESET_REQUESTED, pf->state); break; case I40E_CLIENT_RESET_LEVEL_CORE: set_bit(__I40E_PF_RESET_REQUESTED, pf->state); break; default: dev_warn(&pf->pdev->dev, "Client for PF id %d requested an unsupported reset: %d.\n", pf->hw.pf_id, reset_level); break; } i40e_service_event_schedule(pf); } /** * i40e_client_update_vsi_ctxt * @ldev: pointer to L2 context. * @client: Client pointer. * @is_vf: if this for the VF * @vf_id: if is_vf true this carries the vf_id * @flag: Any device level setting that needs to be done for PE * @valid_flag: Bits in this match up and enable changing of flag bits * * Return 0 on success or < 0 on error **/ static int i40e_client_update_vsi_ctxt(struct i40e_info *ldev, struct i40e_client *client, bool is_vf, u32 vf_id, u32 flag, u32 valid_flag) { struct i40e_pf *pf = ldev->pf; struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi]; struct i40e_vsi_context ctxt; bool update = true; int err; /* TODO: for now do not allow setting VF's VSI setting */ if (is_vf) return -EINVAL; ctxt.seid = pf->main_vsi_seid; ctxt.pf_num = pf->hw.pf_id; err = i40e_aq_get_vsi_params(&pf->hw, &ctxt, NULL); ctxt.flags = I40E_AQ_VSI_TYPE_PF; if (err) { dev_info(&pf->pdev->dev, "couldn't get PF vsi config, err %pe aq_err %s\n", ERR_PTR(err), i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); return -ENOENT; } if ((valid_flag & I40E_CLIENT_VSI_FLAG_TCP_ENABLE) && (flag & I40E_CLIENT_VSI_FLAG_TCP_ENABLE)) { ctxt.info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_QUEUE_OPT_VALID); ctxt.info.queueing_opt_flags |= I40E_AQ_VSI_QUE_OPT_TCP_ENA; } else if ((valid_flag & I40E_CLIENT_VSI_FLAG_TCP_ENABLE) && !(flag & I40E_CLIENT_VSI_FLAG_TCP_ENABLE)) { ctxt.info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_QUEUE_OPT_VALID); ctxt.info.queueing_opt_flags &= ~I40E_AQ_VSI_QUE_OPT_TCP_ENA; } else { update = false; dev_warn(&pf->pdev->dev, "Client for PF id %d request an unsupported Config: %x.\n", pf->hw.pf_id, flag); } if (update) { err = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL); if (err) { dev_info(&pf->pdev->dev, "update VSI ctxt for PE failed, err %pe aq_err %s\n", ERR_PTR(err), i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); } } return err; } void i40e_client_device_register(struct i40e_info *ldev, struct i40e_client *client) { struct i40e_pf *pf = ldev->pf; pf->cinst->client = client; set_bit(__I40E_CLIENT_SERVICE_REQUESTED, pf->state); i40e_service_event_schedule(pf); } EXPORT_SYMBOL_GPL(i40e_client_device_register); void i40e_client_device_unregister(struct i40e_info *ldev) { struct i40e_pf *pf = ldev->pf; struct i40e_client_instance *cdev = pf->cinst; if (!cdev) return; while (test_and_set_bit(__I40E_SERVICE_SCHED, pf->state)) usleep_range(500, 1000); if (test_bit(__I40E_CLIENT_INSTANCE_OPENED, &cdev->state)) { cdev->client->ops->close(&cdev->lan_info, cdev->client, false); clear_bit(__I40E_CLIENT_INSTANCE_OPENED, &cdev->state); i40e_client_release_qvlist(&cdev->lan_info); } pf->cinst->client = NULL; clear_bit(__I40E_SERVICE_SCHED, pf->state); } EXPORT_SYMBOL_GPL(i40e_client_device_unregister);
linux-master
drivers/net/ethernet/intel/i40e/i40e_client.c
// SPDX-License-Identifier: GPL-2.0 /* Copyright(c) 2013 - 2021 Intel Corporation. */ #include "i40e_adminq.h" #include "i40e_prototype.h" #include "i40e_dcb.h" /** * i40e_get_dcbx_status * @hw: pointer to the hw struct * @status: Embedded DCBX Engine Status * * Get the DCBX status from the Firmware **/ int i40e_get_dcbx_status(struct i40e_hw *hw, u16 *status) { u32 reg; if (!status) return -EINVAL; reg = rd32(hw, I40E_PRTDCB_GENS); *status = (u16)((reg & I40E_PRTDCB_GENS_DCBX_STATUS_MASK) >> I40E_PRTDCB_GENS_DCBX_STATUS_SHIFT); return 0; } /** * i40e_parse_ieee_etscfg_tlv * @tlv: IEEE 802.1Qaz ETS CFG TLV * @dcbcfg: Local store to update ETS CFG data * * Parses IEEE 802.1Qaz ETS CFG TLV **/ static void i40e_parse_ieee_etscfg_tlv(struct i40e_lldp_org_tlv *tlv, struct i40e_dcbx_config *dcbcfg) { struct i40e_dcb_ets_config *etscfg; u8 *buf = tlv->tlvinfo; u16 offset = 0; u8 priority; int i; /* First Octet post subtype * -------------------------- * |will-|CBS | Re- | Max | * |ing | |served| TCs | * -------------------------- * |1bit | 1bit|3 bits|3bits| */ etscfg = &dcbcfg->etscfg; etscfg->willing = (u8)((buf[offset] & I40E_IEEE_ETS_WILLING_MASK) >> I40E_IEEE_ETS_WILLING_SHIFT); etscfg->cbs = (u8)((buf[offset] & I40E_IEEE_ETS_CBS_MASK) >> I40E_IEEE_ETS_CBS_SHIFT); etscfg->maxtcs = (u8)((buf[offset] & I40E_IEEE_ETS_MAXTC_MASK) >> I40E_IEEE_ETS_MAXTC_SHIFT); /* Move offset to Priority Assignment Table */ offset++; /* Priority Assignment Table (4 octets) * Octets:| 1 | 2 | 3 | 4 | * ----------------------------------------- * |pri0|pri1|pri2|pri3|pri4|pri5|pri6|pri7| * ----------------------------------------- * Bits:|7 4|3 0|7 4|3 0|7 4|3 0|7 4|3 0| * ----------------------------------------- */ for (i = 0; i < 4; i++) { priority = (u8)((buf[offset] & I40E_IEEE_ETS_PRIO_1_MASK) >> I40E_IEEE_ETS_PRIO_1_SHIFT); etscfg->prioritytable[i * 2] = priority; priority = (u8)((buf[offset] & I40E_IEEE_ETS_PRIO_0_MASK) >> I40E_IEEE_ETS_PRIO_0_SHIFT); etscfg->prioritytable[i * 2 + 1] = priority; offset++; } /* TC Bandwidth Table (8 octets) * Octets:| 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | * --------------------------------- * |tc0|tc1|tc2|tc3|tc4|tc5|tc6|tc7| * --------------------------------- */ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) etscfg->tcbwtable[i] = buf[offset++]; /* TSA Assignment Table (8 octets) * Octets:| 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | * --------------------------------- * |tc0|tc1|tc2|tc3|tc4|tc5|tc6|tc7| * --------------------------------- */ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) etscfg->tsatable[i] = buf[offset++]; } /** * i40e_parse_ieee_etsrec_tlv * @tlv: IEEE 802.1Qaz ETS REC TLV * @dcbcfg: Local store to update ETS REC data * * Parses IEEE 802.1Qaz ETS REC TLV **/ static void i40e_parse_ieee_etsrec_tlv(struct i40e_lldp_org_tlv *tlv, struct i40e_dcbx_config *dcbcfg) { u8 *buf = tlv->tlvinfo; u16 offset = 0; u8 priority; int i; /* Move offset to priority table */ offset++; /* Priority Assignment Table (4 octets) * Octets:| 1 | 2 | 3 | 4 | * ----------------------------------------- * |pri0|pri1|pri2|pri3|pri4|pri5|pri6|pri7| * ----------------------------------------- * Bits:|7 4|3 0|7 4|3 0|7 4|3 0|7 4|3 0| * ----------------------------------------- */ for (i = 0; i < 4; i++) { priority = (u8)((buf[offset] & I40E_IEEE_ETS_PRIO_1_MASK) >> I40E_IEEE_ETS_PRIO_1_SHIFT); dcbcfg->etsrec.prioritytable[i*2] = priority; priority = (u8)((buf[offset] & I40E_IEEE_ETS_PRIO_0_MASK) >> I40E_IEEE_ETS_PRIO_0_SHIFT); dcbcfg->etsrec.prioritytable[i*2 + 1] = priority; offset++; } /* TC Bandwidth Table (8 octets) * Octets:| 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | * --------------------------------- * |tc0|tc1|tc2|tc3|tc4|tc5|tc6|tc7| * --------------------------------- */ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) dcbcfg->etsrec.tcbwtable[i] = buf[offset++]; /* TSA Assignment Table (8 octets) * Octets:| 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | * --------------------------------- * |tc0|tc1|tc2|tc3|tc4|tc5|tc6|tc7| * --------------------------------- */ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) dcbcfg->etsrec.tsatable[i] = buf[offset++]; } /** * i40e_parse_ieee_pfccfg_tlv * @tlv: IEEE 802.1Qaz PFC CFG TLV * @dcbcfg: Local store to update PFC CFG data * * Parses IEEE 802.1Qaz PFC CFG TLV **/ static void i40e_parse_ieee_pfccfg_tlv(struct i40e_lldp_org_tlv *tlv, struct i40e_dcbx_config *dcbcfg) { u8 *buf = tlv->tlvinfo; /* ---------------------------------------- * |will-|MBC | Re- | PFC | PFC Enable | * |ing | |served| cap | | * ----------------------------------------- * |1bit | 1bit|2 bits|4bits| 1 octet | */ dcbcfg->pfc.willing = (u8)((buf[0] & I40E_IEEE_PFC_WILLING_MASK) >> I40E_IEEE_PFC_WILLING_SHIFT); dcbcfg->pfc.mbc = (u8)((buf[0] & I40E_IEEE_PFC_MBC_MASK) >> I40E_IEEE_PFC_MBC_SHIFT); dcbcfg->pfc.pfccap = (u8)((buf[0] & I40E_IEEE_PFC_CAP_MASK) >> I40E_IEEE_PFC_CAP_SHIFT); dcbcfg->pfc.pfcenable = buf[1]; } /** * i40e_parse_ieee_app_tlv * @tlv: IEEE 802.1Qaz APP TLV * @dcbcfg: Local store to update APP PRIO data * * Parses IEEE 802.1Qaz APP PRIO TLV **/ static void i40e_parse_ieee_app_tlv(struct i40e_lldp_org_tlv *tlv, struct i40e_dcbx_config *dcbcfg) { u16 typelength; u16 offset = 0; u16 length; int i = 0; u8 *buf; typelength = ntohs(tlv->typelength); length = (u16)((typelength & I40E_LLDP_TLV_LEN_MASK) >> I40E_LLDP_TLV_LEN_SHIFT); buf = tlv->tlvinfo; /* The App priority table starts 5 octets after TLV header */ length -= (sizeof(tlv->ouisubtype) + 1); /* Move offset to App Priority Table */ offset++; /* Application Priority Table (3 octets) * Octets:| 1 | 2 | 3 | * ----------------------------------------- * |Priority|Rsrvd| Sel | Protocol ID | * ----------------------------------------- * Bits:|23 21|20 19|18 16|15 0| * ----------------------------------------- */ while (offset < length) { dcbcfg->app[i].priority = (u8)((buf[offset] & I40E_IEEE_APP_PRIO_MASK) >> I40E_IEEE_APP_PRIO_SHIFT); dcbcfg->app[i].selector = (u8)((buf[offset] & I40E_IEEE_APP_SEL_MASK) >> I40E_IEEE_APP_SEL_SHIFT); dcbcfg->app[i].protocolid = (buf[offset + 1] << 0x8) | buf[offset + 2]; /* Move to next app */ offset += 3; i++; if (i >= I40E_DCBX_MAX_APPS) break; } dcbcfg->numapps = i; } /** * i40e_parse_ieee_tlv * @tlv: IEEE 802.1Qaz TLV * @dcbcfg: Local store to update ETS REC data * * Get the TLV subtype and send it to parsing function * based on the subtype value **/ static void i40e_parse_ieee_tlv(struct i40e_lldp_org_tlv *tlv, struct i40e_dcbx_config *dcbcfg) { u32 ouisubtype; u8 subtype; ouisubtype = ntohl(tlv->ouisubtype); subtype = (u8)((ouisubtype & I40E_LLDP_TLV_SUBTYPE_MASK) >> I40E_LLDP_TLV_SUBTYPE_SHIFT); switch (subtype) { case I40E_IEEE_SUBTYPE_ETS_CFG: i40e_parse_ieee_etscfg_tlv(tlv, dcbcfg); break; case I40E_IEEE_SUBTYPE_ETS_REC: i40e_parse_ieee_etsrec_tlv(tlv, dcbcfg); break; case I40E_IEEE_SUBTYPE_PFC_CFG: i40e_parse_ieee_pfccfg_tlv(tlv, dcbcfg); break; case I40E_IEEE_SUBTYPE_APP_PRI: i40e_parse_ieee_app_tlv(tlv, dcbcfg); break; default: break; } } /** * i40e_parse_cee_pgcfg_tlv * @tlv: CEE DCBX PG CFG TLV * @dcbcfg: Local store to update ETS CFG data * * Parses CEE DCBX PG CFG TLV **/ static void i40e_parse_cee_pgcfg_tlv(struct i40e_cee_feat_tlv *tlv, struct i40e_dcbx_config *dcbcfg) { struct i40e_dcb_ets_config *etscfg; u8 *buf = tlv->tlvinfo; u16 offset = 0; u8 priority; int i; etscfg = &dcbcfg->etscfg; if (tlv->en_will_err & I40E_CEE_FEAT_TLV_WILLING_MASK) etscfg->willing = 1; etscfg->cbs = 0; /* Priority Group Table (4 octets) * Octets:| 1 | 2 | 3 | 4 | * ----------------------------------------- * |pri0|pri1|pri2|pri3|pri4|pri5|pri6|pri7| * ----------------------------------------- * Bits:|7 4|3 0|7 4|3 0|7 4|3 0|7 4|3 0| * ----------------------------------------- */ for (i = 0; i < 4; i++) { priority = (u8)((buf[offset] & I40E_CEE_PGID_PRIO_1_MASK) >> I40E_CEE_PGID_PRIO_1_SHIFT); etscfg->prioritytable[i * 2] = priority; priority = (u8)((buf[offset] & I40E_CEE_PGID_PRIO_0_MASK) >> I40E_CEE_PGID_PRIO_0_SHIFT); etscfg->prioritytable[i * 2 + 1] = priority; offset++; } /* PG Percentage Table (8 octets) * Octets:| 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | * --------------------------------- * |pg0|pg1|pg2|pg3|pg4|pg5|pg6|pg7| * --------------------------------- */ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) etscfg->tcbwtable[i] = buf[offset++]; /* Number of TCs supported (1 octet) */ etscfg->maxtcs = buf[offset]; } /** * i40e_parse_cee_pfccfg_tlv * @tlv: CEE DCBX PFC CFG TLV * @dcbcfg: Local store to update PFC CFG data * * Parses CEE DCBX PFC CFG TLV **/ static void i40e_parse_cee_pfccfg_tlv(struct i40e_cee_feat_tlv *tlv, struct i40e_dcbx_config *dcbcfg) { u8 *buf = tlv->tlvinfo; if (tlv->en_will_err & I40E_CEE_FEAT_TLV_WILLING_MASK) dcbcfg->pfc.willing = 1; /* ------------------------ * | PFC Enable | PFC TCs | * ------------------------ * | 1 octet | 1 octet | */ dcbcfg->pfc.pfcenable = buf[0]; dcbcfg->pfc.pfccap = buf[1]; } /** * i40e_parse_cee_app_tlv * @tlv: CEE DCBX APP TLV * @dcbcfg: Local store to update APP PRIO data * * Parses CEE DCBX APP PRIO TLV **/ static void i40e_parse_cee_app_tlv(struct i40e_cee_feat_tlv *tlv, struct i40e_dcbx_config *dcbcfg) { u16 length, typelength, offset = 0; struct i40e_cee_app_prio *app; u8 i; typelength = ntohs(tlv->hdr.typelen); length = (u16)((typelength & I40E_LLDP_TLV_LEN_MASK) >> I40E_LLDP_TLV_LEN_SHIFT); dcbcfg->numapps = length / sizeof(*app); if (!dcbcfg->numapps) return; if (dcbcfg->numapps > I40E_DCBX_MAX_APPS) dcbcfg->numapps = I40E_DCBX_MAX_APPS; for (i = 0; i < dcbcfg->numapps; i++) { u8 up, selector; app = (struct i40e_cee_app_prio *)(tlv->tlvinfo + offset); for (up = 0; up < I40E_MAX_USER_PRIORITY; up++) { if (app->prio_map & BIT(up)) break; } dcbcfg->app[i].priority = up; /* Get Selector from lower 2 bits, and convert to IEEE */ selector = (app->upper_oui_sel & I40E_CEE_APP_SELECTOR_MASK); switch (selector) { case I40E_CEE_APP_SEL_ETHTYPE: dcbcfg->app[i].selector = I40E_APP_SEL_ETHTYPE; break; case I40E_CEE_APP_SEL_TCPIP: dcbcfg->app[i].selector = I40E_APP_SEL_TCPIP; break; default: /* Keep selector as it is for unknown types */ dcbcfg->app[i].selector = selector; } dcbcfg->app[i].protocolid = ntohs(app->protocol); /* Move to next app */ offset += sizeof(*app); } } /** * i40e_parse_cee_tlv * @tlv: CEE DCBX TLV * @dcbcfg: Local store to update DCBX config data * * Get the TLV subtype and send it to parsing function * based on the subtype value **/ static void i40e_parse_cee_tlv(struct i40e_lldp_org_tlv *tlv, struct i40e_dcbx_config *dcbcfg) { u16 len, tlvlen, sublen, typelength; struct i40e_cee_feat_tlv *sub_tlv; u8 subtype, feat_tlv_count = 0; u32 ouisubtype; ouisubtype = ntohl(tlv->ouisubtype); subtype = (u8)((ouisubtype & I40E_LLDP_TLV_SUBTYPE_MASK) >> I40E_LLDP_TLV_SUBTYPE_SHIFT); /* Return if not CEE DCBX */ if (subtype != I40E_CEE_DCBX_TYPE) return; typelength = ntohs(tlv->typelength); tlvlen = (u16)((typelength & I40E_LLDP_TLV_LEN_MASK) >> I40E_LLDP_TLV_LEN_SHIFT); len = sizeof(tlv->typelength) + sizeof(ouisubtype) + sizeof(struct i40e_cee_ctrl_tlv); /* Return if no CEE DCBX Feature TLVs */ if (tlvlen <= len) return; sub_tlv = (struct i40e_cee_feat_tlv *)((char *)tlv + len); while (feat_tlv_count < I40E_CEE_MAX_FEAT_TYPE) { typelength = ntohs(sub_tlv->hdr.typelen); sublen = (u16)((typelength & I40E_LLDP_TLV_LEN_MASK) >> I40E_LLDP_TLV_LEN_SHIFT); subtype = (u8)((typelength & I40E_LLDP_TLV_TYPE_MASK) >> I40E_LLDP_TLV_TYPE_SHIFT); switch (subtype) { case I40E_CEE_SUBTYPE_PG_CFG: i40e_parse_cee_pgcfg_tlv(sub_tlv, dcbcfg); break; case I40E_CEE_SUBTYPE_PFC_CFG: i40e_parse_cee_pfccfg_tlv(sub_tlv, dcbcfg); break; case I40E_CEE_SUBTYPE_APP_PRI: i40e_parse_cee_app_tlv(sub_tlv, dcbcfg); break; default: return; /* Invalid Sub-type return */ } feat_tlv_count++; /* Move to next sub TLV */ sub_tlv = (struct i40e_cee_feat_tlv *)((char *)sub_tlv + sizeof(sub_tlv->hdr.typelen) + sublen); } } /** * i40e_parse_org_tlv * @tlv: Organization specific TLV * @dcbcfg: Local store to update ETS REC data * * Currently only IEEE 802.1Qaz TLV is supported, all others * will be returned **/ static void i40e_parse_org_tlv(struct i40e_lldp_org_tlv *tlv, struct i40e_dcbx_config *dcbcfg) { u32 ouisubtype; u32 oui; ouisubtype = ntohl(tlv->ouisubtype); oui = (u32)((ouisubtype & I40E_LLDP_TLV_OUI_MASK) >> I40E_LLDP_TLV_OUI_SHIFT); switch (oui) { case I40E_IEEE_8021QAZ_OUI: i40e_parse_ieee_tlv(tlv, dcbcfg); break; case I40E_CEE_DCBX_OUI: i40e_parse_cee_tlv(tlv, dcbcfg); break; default: break; } } /** * i40e_lldp_to_dcb_config * @lldpmib: LLDPDU to be parsed * @dcbcfg: store for LLDPDU data * * Parse DCB configuration from the LLDPDU **/ int i40e_lldp_to_dcb_config(u8 *lldpmib, struct i40e_dcbx_config *dcbcfg) { struct i40e_lldp_org_tlv *tlv; u16 typelength; u16 offset = 0; int ret = 0; u16 length; u16 type; if (!lldpmib || !dcbcfg) return -EINVAL; /* set to the start of LLDPDU */ lldpmib += ETH_HLEN; tlv = (struct i40e_lldp_org_tlv *)lldpmib; while (1) { typelength = ntohs(tlv->typelength); type = (u16)((typelength & I40E_LLDP_TLV_TYPE_MASK) >> I40E_LLDP_TLV_TYPE_SHIFT); length = (u16)((typelength & I40E_LLDP_TLV_LEN_MASK) >> I40E_LLDP_TLV_LEN_SHIFT); offset += sizeof(typelength) + length; /* END TLV or beyond LLDPDU size */ if ((type == I40E_TLV_TYPE_END) || (offset > I40E_LLDPDU_SIZE)) break; switch (type) { case I40E_TLV_TYPE_ORG: i40e_parse_org_tlv(tlv, dcbcfg); break; default: break; } /* Move to next TLV */ tlv = (struct i40e_lldp_org_tlv *)((char *)tlv + sizeof(tlv->typelength) + length); } return ret; } /** * i40e_aq_get_dcb_config * @hw: pointer to the hw struct * @mib_type: mib type for the query * @bridgetype: bridge type for the query (remote) * @dcbcfg: store for LLDPDU data * * Query DCB configuration from the Firmware **/ int i40e_aq_get_dcb_config(struct i40e_hw *hw, u8 mib_type, u8 bridgetype, struct i40e_dcbx_config *dcbcfg) { struct i40e_virt_mem mem; int ret = 0; u8 *lldpmib; /* Allocate the LLDPDU */ ret = i40e_allocate_virt_mem(hw, &mem, I40E_LLDPDU_SIZE); if (ret) return ret; lldpmib = (u8 *)mem.va; ret = i40e_aq_get_lldp_mib(hw, bridgetype, mib_type, (void *)lldpmib, I40E_LLDPDU_SIZE, NULL, NULL, NULL); if (ret) goto free_mem; /* Parse LLDP MIB to get dcb configuration */ ret = i40e_lldp_to_dcb_config(lldpmib, dcbcfg); free_mem: i40e_free_virt_mem(hw, &mem); return ret; } /** * i40e_cee_to_dcb_v1_config * @cee_cfg: pointer to CEE v1 response configuration struct * @dcbcfg: DCB configuration struct * * Convert CEE v1 configuration from firmware to DCB configuration **/ static void i40e_cee_to_dcb_v1_config( struct i40e_aqc_get_cee_dcb_cfg_v1_resp *cee_cfg, struct i40e_dcbx_config *dcbcfg) { u16 status, tlv_status = le16_to_cpu(cee_cfg->tlv_status); u16 app_prio = le16_to_cpu(cee_cfg->oper_app_prio); u8 i, tc, err; /* CEE PG data to ETS config */ dcbcfg->etscfg.maxtcs = cee_cfg->oper_num_tc; /* Note that the FW creates the oper_prio_tc nibbles reversed * from those in the CEE Priority Group sub-TLV. */ for (i = 0; i < 4; i++) { tc = (u8)((cee_cfg->oper_prio_tc[i] & I40E_CEE_PGID_PRIO_0_MASK) >> I40E_CEE_PGID_PRIO_0_SHIFT); dcbcfg->etscfg.prioritytable[i * 2] = tc; tc = (u8)((cee_cfg->oper_prio_tc[i] & I40E_CEE_PGID_PRIO_1_MASK) >> I40E_CEE_PGID_PRIO_1_SHIFT); dcbcfg->etscfg.prioritytable[i*2 + 1] = tc; } for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) dcbcfg->etscfg.tcbwtable[i] = cee_cfg->oper_tc_bw[i]; for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { if (dcbcfg->etscfg.prioritytable[i] == I40E_CEE_PGID_STRICT) { /* Map it to next empty TC */ dcbcfg->etscfg.prioritytable[i] = cee_cfg->oper_num_tc - 1; dcbcfg->etscfg.tsatable[i] = I40E_IEEE_TSA_STRICT; } else { dcbcfg->etscfg.tsatable[i] = I40E_IEEE_TSA_ETS; } } /* CEE PFC data to ETS config */ dcbcfg->pfc.pfcenable = cee_cfg->oper_pfc_en; dcbcfg->pfc.pfccap = I40E_MAX_TRAFFIC_CLASS; status = (tlv_status & I40E_AQC_CEE_APP_STATUS_MASK) >> I40E_AQC_CEE_APP_STATUS_SHIFT; err = (status & I40E_TLV_STATUS_ERR) ? 1 : 0; /* Add APPs if Error is False */ if (!err) { /* CEE operating configuration supports FCoE/iSCSI/FIP only */ dcbcfg->numapps = I40E_CEE_OPER_MAX_APPS; /* FCoE APP */ dcbcfg->app[0].priority = (app_prio & I40E_AQC_CEE_APP_FCOE_MASK) >> I40E_AQC_CEE_APP_FCOE_SHIFT; dcbcfg->app[0].selector = I40E_APP_SEL_ETHTYPE; dcbcfg->app[0].protocolid = I40E_APP_PROTOID_FCOE; /* iSCSI APP */ dcbcfg->app[1].priority = (app_prio & I40E_AQC_CEE_APP_ISCSI_MASK) >> I40E_AQC_CEE_APP_ISCSI_SHIFT; dcbcfg->app[1].selector = I40E_APP_SEL_TCPIP; dcbcfg->app[1].protocolid = I40E_APP_PROTOID_ISCSI; /* FIP APP */ dcbcfg->app[2].priority = (app_prio & I40E_AQC_CEE_APP_FIP_MASK) >> I40E_AQC_CEE_APP_FIP_SHIFT; dcbcfg->app[2].selector = I40E_APP_SEL_ETHTYPE; dcbcfg->app[2].protocolid = I40E_APP_PROTOID_FIP; } } /** * i40e_cee_to_dcb_config * @cee_cfg: pointer to CEE configuration struct * @dcbcfg: DCB configuration struct * * Convert CEE configuration from firmware to DCB configuration **/ static void i40e_cee_to_dcb_config( struct i40e_aqc_get_cee_dcb_cfg_resp *cee_cfg, struct i40e_dcbx_config *dcbcfg) { u32 status, tlv_status = le32_to_cpu(cee_cfg->tlv_status); u16 app_prio = le16_to_cpu(cee_cfg->oper_app_prio); u8 i, tc, err, sync, oper; /* CEE PG data to ETS config */ dcbcfg->etscfg.maxtcs = cee_cfg->oper_num_tc; /* Note that the FW creates the oper_prio_tc nibbles reversed * from those in the CEE Priority Group sub-TLV. */ for (i = 0; i < 4; i++) { tc = (u8)((cee_cfg->oper_prio_tc[i] & I40E_CEE_PGID_PRIO_0_MASK) >> I40E_CEE_PGID_PRIO_0_SHIFT); dcbcfg->etscfg.prioritytable[i * 2] = tc; tc = (u8)((cee_cfg->oper_prio_tc[i] & I40E_CEE_PGID_PRIO_1_MASK) >> I40E_CEE_PGID_PRIO_1_SHIFT); dcbcfg->etscfg.prioritytable[i * 2 + 1] = tc; } for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) dcbcfg->etscfg.tcbwtable[i] = cee_cfg->oper_tc_bw[i]; for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { if (dcbcfg->etscfg.prioritytable[i] == I40E_CEE_PGID_STRICT) { /* Map it to next empty TC */ dcbcfg->etscfg.prioritytable[i] = cee_cfg->oper_num_tc - 1; dcbcfg->etscfg.tsatable[i] = I40E_IEEE_TSA_STRICT; } else { dcbcfg->etscfg.tsatable[i] = I40E_IEEE_TSA_ETS; } } /* CEE PFC data to ETS config */ dcbcfg->pfc.pfcenable = cee_cfg->oper_pfc_en; dcbcfg->pfc.pfccap = I40E_MAX_TRAFFIC_CLASS; i = 0; status = (tlv_status & I40E_AQC_CEE_FCOE_STATUS_MASK) >> I40E_AQC_CEE_FCOE_STATUS_SHIFT; err = (status & I40E_TLV_STATUS_ERR) ? 1 : 0; sync = (status & I40E_TLV_STATUS_SYNC) ? 1 : 0; oper = (status & I40E_TLV_STATUS_OPER) ? 1 : 0; /* Add FCoE APP if Error is False and Oper/Sync is True */ if (!err && sync && oper) { /* FCoE APP */ dcbcfg->app[i].priority = (app_prio & I40E_AQC_CEE_APP_FCOE_MASK) >> I40E_AQC_CEE_APP_FCOE_SHIFT; dcbcfg->app[i].selector = I40E_APP_SEL_ETHTYPE; dcbcfg->app[i].protocolid = I40E_APP_PROTOID_FCOE; i++; } status = (tlv_status & I40E_AQC_CEE_ISCSI_STATUS_MASK) >> I40E_AQC_CEE_ISCSI_STATUS_SHIFT; err = (status & I40E_TLV_STATUS_ERR) ? 1 : 0; sync = (status & I40E_TLV_STATUS_SYNC) ? 1 : 0; oper = (status & I40E_TLV_STATUS_OPER) ? 1 : 0; /* Add iSCSI APP if Error is False and Oper/Sync is True */ if (!err && sync && oper) { /* iSCSI APP */ dcbcfg->app[i].priority = (app_prio & I40E_AQC_CEE_APP_ISCSI_MASK) >> I40E_AQC_CEE_APP_ISCSI_SHIFT; dcbcfg->app[i].selector = I40E_APP_SEL_TCPIP; dcbcfg->app[i].protocolid = I40E_APP_PROTOID_ISCSI; i++; } status = (tlv_status & I40E_AQC_CEE_FIP_STATUS_MASK) >> I40E_AQC_CEE_FIP_STATUS_SHIFT; err = (status & I40E_TLV_STATUS_ERR) ? 1 : 0; sync = (status & I40E_TLV_STATUS_SYNC) ? 1 : 0; oper = (status & I40E_TLV_STATUS_OPER) ? 1 : 0; /* Add FIP APP if Error is False and Oper/Sync is True */ if (!err && sync && oper) { /* FIP APP */ dcbcfg->app[i].priority = (app_prio & I40E_AQC_CEE_APP_FIP_MASK) >> I40E_AQC_CEE_APP_FIP_SHIFT; dcbcfg->app[i].selector = I40E_APP_SEL_ETHTYPE; dcbcfg->app[i].protocolid = I40E_APP_PROTOID_FIP; i++; } dcbcfg->numapps = i; } /** * i40e_get_ieee_dcb_config * @hw: pointer to the hw struct * * Get IEEE mode DCB configuration from the Firmware **/ static int i40e_get_ieee_dcb_config(struct i40e_hw *hw) { int ret = 0; /* IEEE mode */ hw->local_dcbx_config.dcbx_mode = I40E_DCBX_MODE_IEEE; /* Get Local DCB Config */ ret = i40e_aq_get_dcb_config(hw, I40E_AQ_LLDP_MIB_LOCAL, 0, &hw->local_dcbx_config); if (ret) goto out; /* Get Remote DCB Config */ ret = i40e_aq_get_dcb_config(hw, I40E_AQ_LLDP_MIB_REMOTE, I40E_AQ_LLDP_BRIDGE_TYPE_NEAREST_BRIDGE, &hw->remote_dcbx_config); /* Don't treat ENOENT as an error for Remote MIBs */ if (hw->aq.asq_last_status == I40E_AQ_RC_ENOENT) ret = 0; out: return ret; } /** * i40e_get_dcb_config * @hw: pointer to the hw struct * * Get DCB configuration from the Firmware **/ int i40e_get_dcb_config(struct i40e_hw *hw) { struct i40e_aqc_get_cee_dcb_cfg_v1_resp cee_v1_cfg; struct i40e_aqc_get_cee_dcb_cfg_resp cee_cfg; int ret = 0; /* If Firmware version < v4.33 on X710/XL710, IEEE only */ if ((hw->mac.type == I40E_MAC_XL710) && (((hw->aq.fw_maj_ver == 4) && (hw->aq.fw_min_ver < 33)) || (hw->aq.fw_maj_ver < 4))) return i40e_get_ieee_dcb_config(hw); /* If Firmware version == v4.33 on X710/XL710, use old CEE struct */ if ((hw->mac.type == I40E_MAC_XL710) && ((hw->aq.fw_maj_ver == 4) && (hw->aq.fw_min_ver == 33))) { ret = i40e_aq_get_cee_dcb_config(hw, &cee_v1_cfg, sizeof(cee_v1_cfg), NULL); if (!ret) { /* CEE mode */ hw->local_dcbx_config.dcbx_mode = I40E_DCBX_MODE_CEE; hw->local_dcbx_config.tlv_status = le16_to_cpu(cee_v1_cfg.tlv_status); i40e_cee_to_dcb_v1_config(&cee_v1_cfg, &hw->local_dcbx_config); } } else { ret = i40e_aq_get_cee_dcb_config(hw, &cee_cfg, sizeof(cee_cfg), NULL); if (!ret) { /* CEE mode */ hw->local_dcbx_config.dcbx_mode = I40E_DCBX_MODE_CEE; hw->local_dcbx_config.tlv_status = le32_to_cpu(cee_cfg.tlv_status); i40e_cee_to_dcb_config(&cee_cfg, &hw->local_dcbx_config); } } /* CEE mode not enabled try querying IEEE data */ if (hw->aq.asq_last_status == I40E_AQ_RC_ENOENT) return i40e_get_ieee_dcb_config(hw); if (ret) goto out; /* Get CEE DCB Desired Config */ ret = i40e_aq_get_dcb_config(hw, I40E_AQ_LLDP_MIB_LOCAL, 0, &hw->desired_dcbx_config); if (ret) goto out; /* Get Remote DCB Config */ ret = i40e_aq_get_dcb_config(hw, I40E_AQ_LLDP_MIB_REMOTE, I40E_AQ_LLDP_BRIDGE_TYPE_NEAREST_BRIDGE, &hw->remote_dcbx_config); /* Don't treat ENOENT as an error for Remote MIBs */ if (hw->aq.asq_last_status == I40E_AQ_RC_ENOENT) ret = 0; out: return ret; } /** * i40e_init_dcb * @hw: pointer to the hw struct * @enable_mib_change: enable mib change event * * Update DCB configuration from the Firmware **/ int i40e_init_dcb(struct i40e_hw *hw, bool enable_mib_change) { struct i40e_lldp_variables lldp_cfg; u8 adminstatus = 0; int ret = 0; if (!hw->func_caps.dcb) return -EOPNOTSUPP; /* Read LLDP NVM area */ if (hw->flags & I40E_HW_FLAG_FW_LLDP_PERSISTENT) { u8 offset = 0; if (hw->mac.type == I40E_MAC_XL710) offset = I40E_LLDP_CURRENT_STATUS_XL710_OFFSET; else if (hw->mac.type == I40E_MAC_X722) offset = I40E_LLDP_CURRENT_STATUS_X722_OFFSET; else return -EOPNOTSUPP; ret = i40e_read_nvm_module_data(hw, I40E_SR_EMP_SR_SETTINGS_PTR, offset, I40E_LLDP_CURRENT_STATUS_OFFSET, I40E_LLDP_CURRENT_STATUS_SIZE, &lldp_cfg.adminstatus); } else { ret = i40e_read_lldp_cfg(hw, &lldp_cfg); } if (ret) return -EBUSY; /* Get the LLDP AdminStatus for the current port */ adminstatus = lldp_cfg.adminstatus >> (hw->port * 4); adminstatus &= 0xF; /* LLDP agent disabled */ if (!adminstatus) { hw->dcbx_status = I40E_DCBX_STATUS_DISABLED; return -EBUSY; } /* Get DCBX status */ ret = i40e_get_dcbx_status(hw, &hw->dcbx_status); if (ret) return ret; /* Check the DCBX Status */ if (hw->dcbx_status == I40E_DCBX_STATUS_DONE || hw->dcbx_status == I40E_DCBX_STATUS_IN_PROGRESS) { /* Get current DCBX configuration */ ret = i40e_get_dcb_config(hw); if (ret) return ret; } else if (hw->dcbx_status == I40E_DCBX_STATUS_DISABLED) { return -EBUSY; } /* Configure the LLDP MIB change event */ if (enable_mib_change) ret = i40e_aq_cfg_lldp_mib_change_event(hw, true, NULL); return ret; } /** * i40e_get_fw_lldp_status * @hw: pointer to the hw struct * @lldp_status: pointer to the status enum * * Get status of FW Link Layer Discovery Protocol (LLDP) Agent. * Status of agent is reported via @lldp_status parameter. **/ int i40e_get_fw_lldp_status(struct i40e_hw *hw, enum i40e_get_fw_lldp_status_resp *lldp_status) { struct i40e_virt_mem mem; u8 *lldpmib; int ret; if (!lldp_status) return -EINVAL; /* Allocate buffer for the LLDPDU */ ret = i40e_allocate_virt_mem(hw, &mem, I40E_LLDPDU_SIZE); if (ret) return ret; lldpmib = (u8 *)mem.va; ret = i40e_aq_get_lldp_mib(hw, 0, 0, (void *)lldpmib, I40E_LLDPDU_SIZE, NULL, NULL, NULL); if (!ret) { *lldp_status = I40E_GET_FW_LLDP_STATUS_ENABLED; } else if (hw->aq.asq_last_status == I40E_AQ_RC_ENOENT) { /* MIB is not available yet but the agent is running */ *lldp_status = I40E_GET_FW_LLDP_STATUS_ENABLED; ret = 0; } else if (hw->aq.asq_last_status == I40E_AQ_RC_EPERM) { *lldp_status = I40E_GET_FW_LLDP_STATUS_DISABLED; ret = 0; } i40e_free_virt_mem(hw, &mem); return ret; } /** * i40e_add_ieee_ets_tlv - Prepare ETS TLV in IEEE format * @tlv: Fill the ETS config data in IEEE format * @dcbcfg: Local store which holds the DCB Config * * Prepare IEEE 802.1Qaz ETS CFG TLV **/ static void i40e_add_ieee_ets_tlv(struct i40e_lldp_org_tlv *tlv, struct i40e_dcbx_config *dcbcfg) { u8 priority0, priority1, maxtcwilling = 0; struct i40e_dcb_ets_config *etscfg; u16 offset = 0, typelength, i; u8 *buf = tlv->tlvinfo; u32 ouisubtype; typelength = (u16)((I40E_TLV_TYPE_ORG << I40E_LLDP_TLV_TYPE_SHIFT) | I40E_IEEE_ETS_TLV_LENGTH); tlv->typelength = htons(typelength); ouisubtype = (u32)((I40E_IEEE_8021QAZ_OUI << I40E_LLDP_TLV_OUI_SHIFT) | I40E_IEEE_SUBTYPE_ETS_CFG); tlv->ouisubtype = htonl(ouisubtype); /* First Octet post subtype * -------------------------- * |will-|CBS | Re- | Max | * |ing | |served| TCs | * -------------------------- * |1bit | 1bit|3 bits|3bits| */ etscfg = &dcbcfg->etscfg; if (etscfg->willing) maxtcwilling = BIT(I40E_IEEE_ETS_WILLING_SHIFT); maxtcwilling |= etscfg->maxtcs & I40E_IEEE_ETS_MAXTC_MASK; buf[offset] = maxtcwilling; /* Move offset to Priority Assignment Table */ offset++; /* Priority Assignment Table (4 octets) * Octets:| 1 | 2 | 3 | 4 | * ----------------------------------------- * |pri0|pri1|pri2|pri3|pri4|pri5|pri6|pri7| * ----------------------------------------- * Bits:|7 4|3 0|7 4|3 0|7 4|3 0|7 4|3 0| * ----------------------------------------- */ for (i = 0; i < 4; i++) { priority0 = etscfg->prioritytable[i * 2] & 0xF; priority1 = etscfg->prioritytable[i * 2 + 1] & 0xF; buf[offset] = (priority0 << I40E_IEEE_ETS_PRIO_1_SHIFT) | priority1; offset++; } /* TC Bandwidth Table (8 octets) * Octets:| 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | * --------------------------------- * |tc0|tc1|tc2|tc3|tc4|tc5|tc6|tc7| * --------------------------------- */ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) buf[offset++] = etscfg->tcbwtable[i]; /* TSA Assignment Table (8 octets) * Octets:| 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | * --------------------------------- * |tc0|tc1|tc2|tc3|tc4|tc5|tc6|tc7| * --------------------------------- */ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) buf[offset++] = etscfg->tsatable[i]; } /** * i40e_add_ieee_etsrec_tlv - Prepare ETS Recommended TLV in IEEE format * @tlv: Fill ETS Recommended TLV in IEEE format * @dcbcfg: Local store which holds the DCB Config * * Prepare IEEE 802.1Qaz ETS REC TLV **/ static void i40e_add_ieee_etsrec_tlv(struct i40e_lldp_org_tlv *tlv, struct i40e_dcbx_config *dcbcfg) { struct i40e_dcb_ets_config *etsrec; u16 offset = 0, typelength, i; u8 priority0, priority1; u8 *buf = tlv->tlvinfo; u32 ouisubtype; typelength = (u16)((I40E_TLV_TYPE_ORG << I40E_LLDP_TLV_TYPE_SHIFT) | I40E_IEEE_ETS_TLV_LENGTH); tlv->typelength = htons(typelength); ouisubtype = (u32)((I40E_IEEE_8021QAZ_OUI << I40E_LLDP_TLV_OUI_SHIFT) | I40E_IEEE_SUBTYPE_ETS_REC); tlv->ouisubtype = htonl(ouisubtype); etsrec = &dcbcfg->etsrec; /* First Octet is reserved */ /* Move offset to Priority Assignment Table */ offset++; /* Priority Assignment Table (4 octets) * Octets:| 1 | 2 | 3 | 4 | * ----------------------------------------- * |pri0|pri1|pri2|pri3|pri4|pri5|pri6|pri7| * ----------------------------------------- * Bits:|7 4|3 0|7 4|3 0|7 4|3 0|7 4|3 0| * ----------------------------------------- */ for (i = 0; i < 4; i++) { priority0 = etsrec->prioritytable[i * 2] & 0xF; priority1 = etsrec->prioritytable[i * 2 + 1] & 0xF; buf[offset] = (priority0 << I40E_IEEE_ETS_PRIO_1_SHIFT) | priority1; offset++; } /* TC Bandwidth Table (8 octets) * Octets:| 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | * --------------------------------- * |tc0|tc1|tc2|tc3|tc4|tc5|tc6|tc7| * --------------------------------- */ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) buf[offset++] = etsrec->tcbwtable[i]; /* TSA Assignment Table (8 octets) * Octets:| 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | * --------------------------------- * |tc0|tc1|tc2|tc3|tc4|tc5|tc6|tc7| * --------------------------------- */ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) buf[offset++] = etsrec->tsatable[i]; } /** * i40e_add_ieee_pfc_tlv - Prepare PFC TLV in IEEE format * @tlv: Fill PFC TLV in IEEE format * @dcbcfg: Local store to get PFC CFG data * * Prepare IEEE 802.1Qaz PFC CFG TLV **/ static void i40e_add_ieee_pfc_tlv(struct i40e_lldp_org_tlv *tlv, struct i40e_dcbx_config *dcbcfg) { u8 *buf = tlv->tlvinfo; u32 ouisubtype; u16 typelength; typelength = (u16)((I40E_TLV_TYPE_ORG << I40E_LLDP_TLV_TYPE_SHIFT) | I40E_IEEE_PFC_TLV_LENGTH); tlv->typelength = htons(typelength); ouisubtype = (u32)((I40E_IEEE_8021QAZ_OUI << I40E_LLDP_TLV_OUI_SHIFT) | I40E_IEEE_SUBTYPE_PFC_CFG); tlv->ouisubtype = htonl(ouisubtype); /* ---------------------------------------- * |will-|MBC | Re- | PFC | PFC Enable | * |ing | |served| cap | | * ----------------------------------------- * |1bit | 1bit|2 bits|4bits| 1 octet | */ if (dcbcfg->pfc.willing) buf[0] = BIT(I40E_IEEE_PFC_WILLING_SHIFT); if (dcbcfg->pfc.mbc) buf[0] |= BIT(I40E_IEEE_PFC_MBC_SHIFT); buf[0] |= dcbcfg->pfc.pfccap & 0xF; buf[1] = dcbcfg->pfc.pfcenable; } /** * i40e_add_ieee_app_pri_tlv - Prepare APP TLV in IEEE format * @tlv: Fill APP TLV in IEEE format * @dcbcfg: Local store to get APP CFG data * * Prepare IEEE 802.1Qaz APP CFG TLV **/ static void i40e_add_ieee_app_pri_tlv(struct i40e_lldp_org_tlv *tlv, struct i40e_dcbx_config *dcbcfg) { u16 typelength, length, offset = 0; u8 priority, selector, i = 0; u8 *buf = tlv->tlvinfo; u32 ouisubtype; /* No APP TLVs then just return */ if (dcbcfg->numapps == 0) return; ouisubtype = (u32)((I40E_IEEE_8021QAZ_OUI << I40E_LLDP_TLV_OUI_SHIFT) | I40E_IEEE_SUBTYPE_APP_PRI); tlv->ouisubtype = htonl(ouisubtype); /* Move offset to App Priority Table */ offset++; /* Application Priority Table (3 octets) * Octets:| 1 | 2 | 3 | * ----------------------------------------- * |Priority|Rsrvd| Sel | Protocol ID | * ----------------------------------------- * Bits:|23 21|20 19|18 16|15 0| * ----------------------------------------- */ while (i < dcbcfg->numapps) { priority = dcbcfg->app[i].priority & 0x7; selector = dcbcfg->app[i].selector & 0x7; buf[offset] = (priority << I40E_IEEE_APP_PRIO_SHIFT) | selector; buf[offset + 1] = (dcbcfg->app[i].protocolid >> 0x8) & 0xFF; buf[offset + 2] = dcbcfg->app[i].protocolid & 0xFF; /* Move to next app */ offset += 3; i++; if (i >= I40E_DCBX_MAX_APPS) break; } /* length includes size of ouisubtype + 1 reserved + 3*numapps */ length = sizeof(tlv->ouisubtype) + 1 + (i * 3); typelength = (u16)((I40E_TLV_TYPE_ORG << I40E_LLDP_TLV_TYPE_SHIFT) | (length & 0x1FF)); tlv->typelength = htons(typelength); } /** * i40e_add_dcb_tlv - Add all IEEE TLVs * @tlv: pointer to org tlv * @dcbcfg: pointer to modified dcbx config structure * * @tlvid: tlv id to be added * add tlv information **/ static void i40e_add_dcb_tlv(struct i40e_lldp_org_tlv *tlv, struct i40e_dcbx_config *dcbcfg, u16 tlvid) { switch (tlvid) { case I40E_IEEE_TLV_ID_ETS_CFG: i40e_add_ieee_ets_tlv(tlv, dcbcfg); break; case I40E_IEEE_TLV_ID_ETS_REC: i40e_add_ieee_etsrec_tlv(tlv, dcbcfg); break; case I40E_IEEE_TLV_ID_PFC_CFG: i40e_add_ieee_pfc_tlv(tlv, dcbcfg); break; case I40E_IEEE_TLV_ID_APP_PRI: i40e_add_ieee_app_pri_tlv(tlv, dcbcfg); break; default: break; } } /** * i40e_set_dcb_config - Set the local LLDP MIB to FW * @hw: pointer to the hw struct * * Set DCB configuration to the Firmware **/ int i40e_set_dcb_config(struct i40e_hw *hw) { struct i40e_dcbx_config *dcbcfg; struct i40e_virt_mem mem; u8 mib_type, *lldpmib; u16 miblen; int ret; /* update the hw local config */ dcbcfg = &hw->local_dcbx_config; /* Allocate the LLDPDU */ ret = i40e_allocate_virt_mem(hw, &mem, I40E_LLDPDU_SIZE); if (ret) return ret; mib_type = SET_LOCAL_MIB_AC_TYPE_LOCAL_MIB; if (dcbcfg->app_mode == I40E_DCBX_APPS_NON_WILLING) { mib_type |= SET_LOCAL_MIB_AC_TYPE_NON_WILLING_APPS << SET_LOCAL_MIB_AC_TYPE_NON_WILLING_APPS_SHIFT; } lldpmib = (u8 *)mem.va; i40e_dcb_config_to_lldp(lldpmib, &miblen, dcbcfg); ret = i40e_aq_set_lldp_mib(hw, mib_type, (void *)lldpmib, miblen, NULL); i40e_free_virt_mem(hw, &mem); return ret; } /** * i40e_dcb_config_to_lldp - Convert Dcbconfig to MIB format * @lldpmib: pointer to mib to be output * @miblen: pointer to u16 for length of lldpmib * @dcbcfg: store for LLDPDU data * * send DCB configuration to FW **/ int i40e_dcb_config_to_lldp(u8 *lldpmib, u16 *miblen, struct i40e_dcbx_config *dcbcfg) { u16 length, offset = 0, tlvid, typelength; struct i40e_lldp_org_tlv *tlv; tlv = (struct i40e_lldp_org_tlv *)lldpmib; tlvid = I40E_TLV_ID_START; do { i40e_add_dcb_tlv(tlv, dcbcfg, tlvid++); typelength = ntohs(tlv->typelength); length = (u16)((typelength & I40E_LLDP_TLV_LEN_MASK) >> I40E_LLDP_TLV_LEN_SHIFT); if (length) offset += length + I40E_IEEE_TLV_HEADER_LENGTH; /* END TLV or beyond LLDPDU size */ if (tlvid >= I40E_TLV_ID_END_OF_LLDPPDU || offset >= I40E_LLDPDU_SIZE) break; /* Move to next TLV */ if (length) tlv = (struct i40e_lldp_org_tlv *)((char *)tlv + sizeof(tlv->typelength) + length); } while (tlvid < I40E_TLV_ID_END_OF_LLDPPDU); *miblen = offset; return 0; } /** * i40e_dcb_hw_rx_fifo_config * @hw: pointer to the hw struct * @ets_mode: Strict Priority or Round Robin mode * @non_ets_mode: Strict Priority or Round Robin * @max_exponent: Exponent to calculate max refill credits * @lltc_map: Low latency TC bitmap * * Configure HW Rx FIFO as part of DCB configuration. **/ void i40e_dcb_hw_rx_fifo_config(struct i40e_hw *hw, enum i40e_dcb_arbiter_mode ets_mode, enum i40e_dcb_arbiter_mode non_ets_mode, u32 max_exponent, u8 lltc_map) { u32 reg = rd32(hw, I40E_PRTDCB_RETSC); reg &= ~I40E_PRTDCB_RETSC_ETS_MODE_MASK; reg |= ((u32)ets_mode << I40E_PRTDCB_RETSC_ETS_MODE_SHIFT) & I40E_PRTDCB_RETSC_ETS_MODE_MASK; reg &= ~I40E_PRTDCB_RETSC_NON_ETS_MODE_MASK; reg |= ((u32)non_ets_mode << I40E_PRTDCB_RETSC_NON_ETS_MODE_SHIFT) & I40E_PRTDCB_RETSC_NON_ETS_MODE_MASK; reg &= ~I40E_PRTDCB_RETSC_ETS_MAX_EXP_MASK; reg |= (max_exponent << I40E_PRTDCB_RETSC_ETS_MAX_EXP_SHIFT) & I40E_PRTDCB_RETSC_ETS_MAX_EXP_MASK; reg &= ~I40E_PRTDCB_RETSC_LLTC_MASK; reg |= (lltc_map << I40E_PRTDCB_RETSC_LLTC_SHIFT) & I40E_PRTDCB_RETSC_LLTC_MASK; wr32(hw, I40E_PRTDCB_RETSC, reg); } /** * i40e_dcb_hw_rx_cmd_monitor_config * @hw: pointer to the hw struct * @num_tc: Total number of traffic class * @num_ports: Total number of ports on device * * Configure HW Rx command monitor as part of DCB configuration. **/ void i40e_dcb_hw_rx_cmd_monitor_config(struct i40e_hw *hw, u8 num_tc, u8 num_ports) { u32 threshold; u32 fifo_size; u32 reg; /* Set the threshold and fifo_size based on number of ports */ switch (num_ports) { case 1: threshold = I40E_DCB_1_PORT_THRESHOLD; fifo_size = I40E_DCB_1_PORT_FIFO_SIZE; break; case 2: if (num_tc > 4) { threshold = I40E_DCB_2_PORT_THRESHOLD_HIGH_NUM_TC; fifo_size = I40E_DCB_2_PORT_FIFO_SIZE_HIGH_NUM_TC; } else { threshold = I40E_DCB_2_PORT_THRESHOLD_LOW_NUM_TC; fifo_size = I40E_DCB_2_PORT_FIFO_SIZE_LOW_NUM_TC; } break; case 4: if (num_tc > 4) { threshold = I40E_DCB_4_PORT_THRESHOLD_HIGH_NUM_TC; fifo_size = I40E_DCB_4_PORT_FIFO_SIZE_HIGH_NUM_TC; } else { threshold = I40E_DCB_4_PORT_THRESHOLD_LOW_NUM_TC; fifo_size = I40E_DCB_4_PORT_FIFO_SIZE_LOW_NUM_TC; } break; default: i40e_debug(hw, I40E_DEBUG_DCB, "Invalid num_ports %u.\n", (u32)num_ports); return; } /* The hardware manual describes setting up of I40E_PRT_SWR_PM_THR * based on the number of ports and traffic classes for a given port as * part of DCB configuration. */ reg = rd32(hw, I40E_PRT_SWR_PM_THR); reg &= ~I40E_PRT_SWR_PM_THR_THRESHOLD_MASK; reg |= (threshold << I40E_PRT_SWR_PM_THR_THRESHOLD_SHIFT) & I40E_PRT_SWR_PM_THR_THRESHOLD_MASK; wr32(hw, I40E_PRT_SWR_PM_THR, reg); reg = rd32(hw, I40E_PRTDCB_RPPMC); reg &= ~I40E_PRTDCB_RPPMC_RX_FIFO_SIZE_MASK; reg |= (fifo_size << I40E_PRTDCB_RPPMC_RX_FIFO_SIZE_SHIFT) & I40E_PRTDCB_RPPMC_RX_FIFO_SIZE_MASK; wr32(hw, I40E_PRTDCB_RPPMC, reg); } /** * i40e_dcb_hw_pfc_config * @hw: pointer to the hw struct * @pfc_en: Bitmap of PFC enabled priorities * @prio_tc: priority to tc assignment indexed by priority * * Configure HW Priority Flow Controller as part of DCB configuration. **/ void i40e_dcb_hw_pfc_config(struct i40e_hw *hw, u8 pfc_en, u8 *prio_tc) { u16 refresh_time = (u16)I40E_DEFAULT_PAUSE_TIME / 2; u32 link_speed = hw->phy.link_info.link_speed; u8 first_pfc_prio = 0; u8 num_pfc_tc = 0; u8 tc2pfc = 0; u32 reg; u8 i; /* Get Number of PFC TCs and TC2PFC map */ for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) { if (pfc_en & BIT(i)) { if (!first_pfc_prio) first_pfc_prio = i; /* Set bit for the PFC TC */ tc2pfc |= BIT(prio_tc[i]); num_pfc_tc++; } } switch (link_speed) { case I40E_LINK_SPEED_10GB: reg = rd32(hw, I40E_PRTDCB_MFLCN); reg |= BIT(I40E_PRTDCB_MFLCN_DPF_SHIFT) & I40E_PRTDCB_MFLCN_DPF_MASK; reg &= ~I40E_PRTDCB_MFLCN_RFCE_MASK; reg &= ~I40E_PRTDCB_MFLCN_RPFCE_MASK; if (pfc_en) { reg |= BIT(I40E_PRTDCB_MFLCN_RPFCM_SHIFT) & I40E_PRTDCB_MFLCN_RPFCM_MASK; reg |= ((u32)pfc_en << I40E_PRTDCB_MFLCN_RPFCE_SHIFT) & I40E_PRTDCB_MFLCN_RPFCE_MASK; } wr32(hw, I40E_PRTDCB_MFLCN, reg); reg = rd32(hw, I40E_PRTDCB_FCCFG); reg &= ~I40E_PRTDCB_FCCFG_TFCE_MASK; if (pfc_en) reg |= (I40E_DCB_PFC_ENABLED << I40E_PRTDCB_FCCFG_TFCE_SHIFT) & I40E_PRTDCB_FCCFG_TFCE_MASK; wr32(hw, I40E_PRTDCB_FCCFG, reg); /* FCTTV and FCRTV to be set by default */ break; case I40E_LINK_SPEED_40GB: reg = rd32(hw, I40E_PRTMAC_HSEC_CTL_RX_ENABLE_GPP); reg &= ~I40E_PRTMAC_HSEC_CTL_RX_ENABLE_GPP_MASK; wr32(hw, I40E_PRTMAC_HSEC_CTL_RX_ENABLE_GPP, reg); reg = rd32(hw, I40E_PRTMAC_HSEC_CTL_RX_ENABLE_PPP); reg &= ~I40E_PRTMAC_HSEC_CTL_RX_ENABLE_GPP_MASK; reg |= BIT(I40E_PRTMAC_HSEC_CTL_RX_ENABLE_PPP_SHIFT) & I40E_PRTMAC_HSEC_CTL_RX_ENABLE_PPP_MASK; wr32(hw, I40E_PRTMAC_HSEC_CTL_RX_ENABLE_PPP, reg); reg = rd32(hw, I40E_PRTMAC_HSEC_CTL_RX_PAUSE_ENABLE); reg &= ~I40E_PRTMAC_HSEC_CTL_RX_PAUSE_ENABLE_MASK; reg |= ((u32)pfc_en << I40E_PRTMAC_HSEC_CTL_RX_PAUSE_ENABLE_SHIFT) & I40E_PRTMAC_HSEC_CTL_RX_PAUSE_ENABLE_MASK; wr32(hw, I40E_PRTMAC_HSEC_CTL_RX_PAUSE_ENABLE, reg); reg = rd32(hw, I40E_PRTMAC_HSEC_CTL_TX_PAUSE_ENABLE); reg &= ~I40E_PRTMAC_HSEC_CTL_TX_PAUSE_ENABLE_MASK; reg |= ((u32)pfc_en << I40E_PRTMAC_HSEC_CTL_TX_PAUSE_ENABLE_SHIFT) & I40E_PRTMAC_HSEC_CTL_TX_PAUSE_ENABLE_MASK; wr32(hw, I40E_PRTMAC_HSEC_CTL_TX_PAUSE_ENABLE, reg); for (i = 0; i < I40E_PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_MAX_INDEX; i++) { reg = rd32(hw, I40E_PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER(i)); reg &= ~I40E_PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_MASK; if (pfc_en) { reg |= ((u32)refresh_time << I40E_PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_SHIFT) & I40E_PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_MASK; } wr32(hw, I40E_PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER(i), reg); } /* PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA default value is 0xFFFF * for all user priorities */ break; } reg = rd32(hw, I40E_PRTDCB_TC2PFC); reg &= ~I40E_PRTDCB_TC2PFC_TC2PFC_MASK; reg |= ((u32)tc2pfc << I40E_PRTDCB_TC2PFC_TC2PFC_SHIFT) & I40E_PRTDCB_TC2PFC_TC2PFC_MASK; wr32(hw, I40E_PRTDCB_TC2PFC, reg); reg = rd32(hw, I40E_PRTDCB_RUP); reg &= ~I40E_PRTDCB_RUP_NOVLANUP_MASK; reg |= ((u32)first_pfc_prio << I40E_PRTDCB_RUP_NOVLANUP_SHIFT) & I40E_PRTDCB_RUP_NOVLANUP_MASK; wr32(hw, I40E_PRTDCB_RUP, reg); reg = rd32(hw, I40E_PRTDCB_TDPMC); reg &= ~I40E_PRTDCB_TDPMC_TCPM_MODE_MASK; if (num_pfc_tc > I40E_DCB_PFC_FORCED_NUM_TC) { reg |= BIT(I40E_PRTDCB_TDPMC_TCPM_MODE_SHIFT) & I40E_PRTDCB_TDPMC_TCPM_MODE_MASK; } wr32(hw, I40E_PRTDCB_TDPMC, reg); reg = rd32(hw, I40E_PRTDCB_TCPMC); reg &= ~I40E_PRTDCB_TCPMC_TCPM_MODE_MASK; if (num_pfc_tc > I40E_DCB_PFC_FORCED_NUM_TC) { reg |= BIT(I40E_PRTDCB_TCPMC_TCPM_MODE_SHIFT) & I40E_PRTDCB_TCPMC_TCPM_MODE_MASK; } wr32(hw, I40E_PRTDCB_TCPMC, reg); } /** * i40e_dcb_hw_set_num_tc * @hw: pointer to the hw struct * @num_tc: number of traffic classes * * Configure number of traffic classes in HW **/ void i40e_dcb_hw_set_num_tc(struct i40e_hw *hw, u8 num_tc) { u32 reg = rd32(hw, I40E_PRTDCB_GENC); reg &= ~I40E_PRTDCB_GENC_NUMTC_MASK; reg |= ((u32)num_tc << I40E_PRTDCB_GENC_NUMTC_SHIFT) & I40E_PRTDCB_GENC_NUMTC_MASK; wr32(hw, I40E_PRTDCB_GENC, reg); } /** * i40e_dcb_hw_get_num_tc * @hw: pointer to the hw struct * * Returns number of traffic classes configured in HW **/ u8 i40e_dcb_hw_get_num_tc(struct i40e_hw *hw) { u32 reg = rd32(hw, I40E_PRTDCB_GENC); return (u8)((reg & I40E_PRTDCB_GENC_NUMTC_MASK) >> I40E_PRTDCB_GENC_NUMTC_SHIFT); } /** * i40e_dcb_hw_rx_ets_bw_config * @hw: pointer to the hw struct * @bw_share: Bandwidth share indexed per traffic class * @mode: Strict Priority or Round Robin mode between UP sharing same * traffic class * @prio_type: TC is ETS enabled or strict priority * * Configure HW Rx ETS bandwidth as part of DCB configuration. **/ void i40e_dcb_hw_rx_ets_bw_config(struct i40e_hw *hw, u8 *bw_share, u8 *mode, u8 *prio_type) { u32 reg; u8 i; for (i = 0; i <= I40E_PRTDCB_RETSTCC_MAX_INDEX; i++) { reg = rd32(hw, I40E_PRTDCB_RETSTCC(i)); reg &= ~(I40E_PRTDCB_RETSTCC_BWSHARE_MASK | I40E_PRTDCB_RETSTCC_UPINTC_MODE_MASK | I40E_PRTDCB_RETSTCC_ETSTC_SHIFT); reg |= ((u32)bw_share[i] << I40E_PRTDCB_RETSTCC_BWSHARE_SHIFT) & I40E_PRTDCB_RETSTCC_BWSHARE_MASK; reg |= ((u32)mode[i] << I40E_PRTDCB_RETSTCC_UPINTC_MODE_SHIFT) & I40E_PRTDCB_RETSTCC_UPINTC_MODE_MASK; reg |= ((u32)prio_type[i] << I40E_PRTDCB_RETSTCC_ETSTC_SHIFT) & I40E_PRTDCB_RETSTCC_ETSTC_MASK; wr32(hw, I40E_PRTDCB_RETSTCC(i), reg); } } /** * i40e_dcb_hw_rx_up2tc_config * @hw: pointer to the hw struct * @prio_tc: priority to tc assignment indexed by priority * * Configure HW Rx UP2TC map as part of DCB configuration. **/ void i40e_dcb_hw_rx_up2tc_config(struct i40e_hw *hw, u8 *prio_tc) { u32 reg = rd32(hw, I40E_PRTDCB_RUP2TC); #define I40E_UP2TC_REG(val, i) \ (((val) << I40E_PRTDCB_RUP2TC_UP##i##TC_SHIFT) & \ I40E_PRTDCB_RUP2TC_UP##i##TC_MASK) reg |= I40E_UP2TC_REG(prio_tc[0], 0); reg |= I40E_UP2TC_REG(prio_tc[1], 1); reg |= I40E_UP2TC_REG(prio_tc[2], 2); reg |= I40E_UP2TC_REG(prio_tc[3], 3); reg |= I40E_UP2TC_REG(prio_tc[4], 4); reg |= I40E_UP2TC_REG(prio_tc[5], 5); reg |= I40E_UP2TC_REG(prio_tc[6], 6); reg |= I40E_UP2TC_REG(prio_tc[7], 7); wr32(hw, I40E_PRTDCB_RUP2TC, reg); } /** * i40e_dcb_hw_calculate_pool_sizes - configure dcb pool sizes * @hw: pointer to the hw struct * @num_ports: Number of available ports on the device * @eee_enabled: EEE enabled for the given port * @pfc_en: Bit map of PFC enabled traffic classes * @mfs_tc: Array of max frame size for each traffic class * @pb_cfg: pointer to packet buffer configuration * * Calculate the shared and dedicated per TC pool sizes, * watermarks and threshold values. **/ void i40e_dcb_hw_calculate_pool_sizes(struct i40e_hw *hw, u8 num_ports, bool eee_enabled, u8 pfc_en, u32 *mfs_tc, struct i40e_rx_pb_config *pb_cfg) { u32 pool_size[I40E_MAX_TRAFFIC_CLASS]; u32 high_wm[I40E_MAX_TRAFFIC_CLASS]; u32 low_wm[I40E_MAX_TRAFFIC_CLASS]; u32 total_pool_size = 0; int shared_pool_size; /* Need signed variable */ u32 port_pb_size; u32 mfs_max = 0; u32 pcirtt; u8 i; /* Get the MFS(max) for the port */ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { if (mfs_tc[i] > mfs_max) mfs_max = mfs_tc[i]; } pcirtt = I40E_BT2B(I40E_PCIRTT_LINK_SPEED_10G); /* Calculate effective Rx PB size per port */ port_pb_size = I40E_DEVICE_RPB_SIZE / num_ports; if (eee_enabled) port_pb_size -= I40E_BT2B(I40E_EEE_TX_LPI_EXIT_TIME); port_pb_size -= mfs_max; /* Step 1 Calculating tc pool/shared pool sizes and watermarks */ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { if (pfc_en & BIT(i)) { low_wm[i] = (I40E_DCB_WATERMARK_START_FACTOR * mfs_tc[i]) + pcirtt; high_wm[i] = low_wm[i]; high_wm[i] += ((mfs_max > I40E_MAX_FRAME_SIZE) ? mfs_max : I40E_MAX_FRAME_SIZE); pool_size[i] = high_wm[i]; pool_size[i] += I40E_BT2B(I40E_STD_DV_TC(mfs_max, mfs_tc[i])); } else { low_wm[i] = 0; pool_size[i] = (I40E_DCB_WATERMARK_START_FACTOR * mfs_tc[i]) + pcirtt; high_wm[i] = pool_size[i]; } total_pool_size += pool_size[i]; } shared_pool_size = port_pb_size - total_pool_size; if (shared_pool_size > 0) { pb_cfg->shared_pool_size = shared_pool_size; pb_cfg->shared_pool_high_wm = shared_pool_size; pb_cfg->shared_pool_low_wm = 0; for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { pb_cfg->shared_pool_low_thresh[i] = 0; pb_cfg->shared_pool_high_thresh[i] = shared_pool_size; pb_cfg->tc_pool_size[i] = pool_size[i]; pb_cfg->tc_pool_high_wm[i] = high_wm[i]; pb_cfg->tc_pool_low_wm[i] = low_wm[i]; } } else { i40e_debug(hw, I40E_DEBUG_DCB, "The shared pool size for the port is negative %d.\n", shared_pool_size); } } /** * i40e_dcb_hw_rx_pb_config * @hw: pointer to the hw struct * @old_pb_cfg: Existing Rx Packet buffer configuration * @new_pb_cfg: New Rx Packet buffer configuration * * Program the Rx Packet Buffer registers. **/ void i40e_dcb_hw_rx_pb_config(struct i40e_hw *hw, struct i40e_rx_pb_config *old_pb_cfg, struct i40e_rx_pb_config *new_pb_cfg) { u32 old_val; u32 new_val; u32 reg; u8 i; /* The Rx Packet buffer register programming needs to be done in a * certain order and the following code is based on that * requirement. */ /* Program the shared pool low water mark per port if decreasing */ old_val = old_pb_cfg->shared_pool_low_wm; new_val = new_pb_cfg->shared_pool_low_wm; if (new_val < old_val) { reg = rd32(hw, I40E_PRTRPB_SLW); reg &= ~I40E_PRTRPB_SLW_SLW_MASK; reg |= (new_val << I40E_PRTRPB_SLW_SLW_SHIFT) & I40E_PRTRPB_SLW_SLW_MASK; wr32(hw, I40E_PRTRPB_SLW, reg); } /* Program the shared pool low threshold and tc pool * low water mark per TC that are decreasing. */ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { old_val = old_pb_cfg->shared_pool_low_thresh[i]; new_val = new_pb_cfg->shared_pool_low_thresh[i]; if (new_val < old_val) { reg = rd32(hw, I40E_PRTRPB_SLT(i)); reg &= ~I40E_PRTRPB_SLT_SLT_TCN_MASK; reg |= (new_val << I40E_PRTRPB_SLT_SLT_TCN_SHIFT) & I40E_PRTRPB_SLT_SLT_TCN_MASK; wr32(hw, I40E_PRTRPB_SLT(i), reg); } old_val = old_pb_cfg->tc_pool_low_wm[i]; new_val = new_pb_cfg->tc_pool_low_wm[i]; if (new_val < old_val) { reg = rd32(hw, I40E_PRTRPB_DLW(i)); reg &= ~I40E_PRTRPB_DLW_DLW_TCN_MASK; reg |= (new_val << I40E_PRTRPB_DLW_DLW_TCN_SHIFT) & I40E_PRTRPB_DLW_DLW_TCN_MASK; wr32(hw, I40E_PRTRPB_DLW(i), reg); } } /* Program the shared pool high water mark per port if decreasing */ old_val = old_pb_cfg->shared_pool_high_wm; new_val = new_pb_cfg->shared_pool_high_wm; if (new_val < old_val) { reg = rd32(hw, I40E_PRTRPB_SHW); reg &= ~I40E_PRTRPB_SHW_SHW_MASK; reg |= (new_val << I40E_PRTRPB_SHW_SHW_SHIFT) & I40E_PRTRPB_SHW_SHW_MASK; wr32(hw, I40E_PRTRPB_SHW, reg); } /* Program the shared pool high threshold and tc pool * high water mark per TC that are decreasing. */ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { old_val = old_pb_cfg->shared_pool_high_thresh[i]; new_val = new_pb_cfg->shared_pool_high_thresh[i]; if (new_val < old_val) { reg = rd32(hw, I40E_PRTRPB_SHT(i)); reg &= ~I40E_PRTRPB_SHT_SHT_TCN_MASK; reg |= (new_val << I40E_PRTRPB_SHT_SHT_TCN_SHIFT) & I40E_PRTRPB_SHT_SHT_TCN_MASK; wr32(hw, I40E_PRTRPB_SHT(i), reg); } old_val = old_pb_cfg->tc_pool_high_wm[i]; new_val = new_pb_cfg->tc_pool_high_wm[i]; if (new_val < old_val) { reg = rd32(hw, I40E_PRTRPB_DHW(i)); reg &= ~I40E_PRTRPB_DHW_DHW_TCN_MASK; reg |= (new_val << I40E_PRTRPB_DHW_DHW_TCN_SHIFT) & I40E_PRTRPB_DHW_DHW_TCN_MASK; wr32(hw, I40E_PRTRPB_DHW(i), reg); } } /* Write Dedicated Pool Sizes per TC */ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { new_val = new_pb_cfg->tc_pool_size[i]; reg = rd32(hw, I40E_PRTRPB_DPS(i)); reg &= ~I40E_PRTRPB_DPS_DPS_TCN_MASK; reg |= (new_val << I40E_PRTRPB_DPS_DPS_TCN_SHIFT) & I40E_PRTRPB_DPS_DPS_TCN_MASK; wr32(hw, I40E_PRTRPB_DPS(i), reg); } /* Write Shared Pool Size per port */ new_val = new_pb_cfg->shared_pool_size; reg = rd32(hw, I40E_PRTRPB_SPS); reg &= ~I40E_PRTRPB_SPS_SPS_MASK; reg |= (new_val << I40E_PRTRPB_SPS_SPS_SHIFT) & I40E_PRTRPB_SPS_SPS_MASK; wr32(hw, I40E_PRTRPB_SPS, reg); /* Program the shared pool low water mark per port if increasing */ old_val = old_pb_cfg->shared_pool_low_wm; new_val = new_pb_cfg->shared_pool_low_wm; if (new_val > old_val) { reg = rd32(hw, I40E_PRTRPB_SLW); reg &= ~I40E_PRTRPB_SLW_SLW_MASK; reg |= (new_val << I40E_PRTRPB_SLW_SLW_SHIFT) & I40E_PRTRPB_SLW_SLW_MASK; wr32(hw, I40E_PRTRPB_SLW, reg); } /* Program the shared pool low threshold and tc pool * low water mark per TC that are increasing. */ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { old_val = old_pb_cfg->shared_pool_low_thresh[i]; new_val = new_pb_cfg->shared_pool_low_thresh[i]; if (new_val > old_val) { reg = rd32(hw, I40E_PRTRPB_SLT(i)); reg &= ~I40E_PRTRPB_SLT_SLT_TCN_MASK; reg |= (new_val << I40E_PRTRPB_SLT_SLT_TCN_SHIFT) & I40E_PRTRPB_SLT_SLT_TCN_MASK; wr32(hw, I40E_PRTRPB_SLT(i), reg); } old_val = old_pb_cfg->tc_pool_low_wm[i]; new_val = new_pb_cfg->tc_pool_low_wm[i]; if (new_val > old_val) { reg = rd32(hw, I40E_PRTRPB_DLW(i)); reg &= ~I40E_PRTRPB_DLW_DLW_TCN_MASK; reg |= (new_val << I40E_PRTRPB_DLW_DLW_TCN_SHIFT) & I40E_PRTRPB_DLW_DLW_TCN_MASK; wr32(hw, I40E_PRTRPB_DLW(i), reg); } } /* Program the shared pool high water mark per port if increasing */ old_val = old_pb_cfg->shared_pool_high_wm; new_val = new_pb_cfg->shared_pool_high_wm; if (new_val > old_val) { reg = rd32(hw, I40E_PRTRPB_SHW); reg &= ~I40E_PRTRPB_SHW_SHW_MASK; reg |= (new_val << I40E_PRTRPB_SHW_SHW_SHIFT) & I40E_PRTRPB_SHW_SHW_MASK; wr32(hw, I40E_PRTRPB_SHW, reg); } /* Program the shared pool high threshold and tc pool * high water mark per TC that are increasing. */ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { old_val = old_pb_cfg->shared_pool_high_thresh[i]; new_val = new_pb_cfg->shared_pool_high_thresh[i]; if (new_val > old_val) { reg = rd32(hw, I40E_PRTRPB_SHT(i)); reg &= ~I40E_PRTRPB_SHT_SHT_TCN_MASK; reg |= (new_val << I40E_PRTRPB_SHT_SHT_TCN_SHIFT) & I40E_PRTRPB_SHT_SHT_TCN_MASK; wr32(hw, I40E_PRTRPB_SHT(i), reg); } old_val = old_pb_cfg->tc_pool_high_wm[i]; new_val = new_pb_cfg->tc_pool_high_wm[i]; if (new_val > old_val) { reg = rd32(hw, I40E_PRTRPB_DHW(i)); reg &= ~I40E_PRTRPB_DHW_DHW_TCN_MASK; reg |= (new_val << I40E_PRTRPB_DHW_DHW_TCN_SHIFT) & I40E_PRTRPB_DHW_DHW_TCN_MASK; wr32(hw, I40E_PRTRPB_DHW(i), reg); } } } /** * _i40e_read_lldp_cfg - generic read of LLDP Configuration data from NVM * @hw: pointer to the HW structure * @lldp_cfg: pointer to hold lldp configuration variables * @module: address of the module pointer * @word_offset: offset of LLDP configuration * * Reads the LLDP configuration data from NVM using passed addresses **/ static int _i40e_read_lldp_cfg(struct i40e_hw *hw, struct i40e_lldp_variables *lldp_cfg, u8 module, u32 word_offset) { u32 address, offset = (2 * word_offset); __le16 raw_mem; int ret; u16 mem; ret = i40e_acquire_nvm(hw, I40E_RESOURCE_READ); if (ret) return ret; ret = i40e_aq_read_nvm(hw, 0x0, module * 2, sizeof(raw_mem), &raw_mem, true, NULL); i40e_release_nvm(hw); if (ret) return ret; mem = le16_to_cpu(raw_mem); /* Check if this pointer needs to be read in word size or 4K sector * units. */ if (mem & I40E_PTR_TYPE) address = (0x7FFF & mem) * 4096; else address = (0x7FFF & mem) * 2; ret = i40e_acquire_nvm(hw, I40E_RESOURCE_READ); if (ret) goto err_lldp_cfg; ret = i40e_aq_read_nvm(hw, module, offset, sizeof(raw_mem), &raw_mem, true, NULL); i40e_release_nvm(hw); if (ret) return ret; mem = le16_to_cpu(raw_mem); offset = mem + word_offset; offset *= 2; ret = i40e_acquire_nvm(hw, I40E_RESOURCE_READ); if (ret) goto err_lldp_cfg; ret = i40e_aq_read_nvm(hw, 0, address + offset, sizeof(struct i40e_lldp_variables), lldp_cfg, true, NULL); i40e_release_nvm(hw); err_lldp_cfg: return ret; } /** * i40e_read_lldp_cfg - read LLDP Configuration data from NVM * @hw: pointer to the HW structure * @lldp_cfg: pointer to hold lldp configuration variables * * Reads the LLDP configuration data from NVM **/ int i40e_read_lldp_cfg(struct i40e_hw *hw, struct i40e_lldp_variables *lldp_cfg) { int ret = 0; u32 mem; if (!lldp_cfg) return -EINVAL; ret = i40e_acquire_nvm(hw, I40E_RESOURCE_READ); if (ret) return ret; ret = i40e_aq_read_nvm(hw, I40E_SR_NVM_CONTROL_WORD, 0, sizeof(mem), &mem, true, NULL); i40e_release_nvm(hw); if (ret) return ret; /* Read a bit that holds information whether we are running flat or * structured NVM image. Flat image has LLDP configuration in shadow * ram, so there is a need to pass different addresses for both cases. */ if (mem & I40E_SR_NVM_MAP_STRUCTURE_TYPE) { /* Flat NVM case */ ret = _i40e_read_lldp_cfg(hw, lldp_cfg, I40E_SR_EMP_MODULE_PTR, I40E_SR_LLDP_CFG_PTR); } else { /* Good old structured NVM image */ ret = _i40e_read_lldp_cfg(hw, lldp_cfg, I40E_EMP_MODULE_PTR, I40E_NVM_LLDP_CFG_PTR); } return ret; }
linux-master
drivers/net/ethernet/intel/i40e/i40e_dcb.c
// SPDX-License-Identifier: GPL-2.0 /* Copyright(c) 2018 Intel Corporation. */ #include <linux/bpf_trace.h> #include <linux/stringify.h> #include <net/xdp_sock_drv.h> #include <net/xdp.h> #include "i40e.h" #include "i40e_txrx_common.h" #include "i40e_xsk.h" void i40e_clear_rx_bi_zc(struct i40e_ring *rx_ring) { memset(rx_ring->rx_bi_zc, 0, sizeof(*rx_ring->rx_bi_zc) * rx_ring->count); } static struct xdp_buff **i40e_rx_bi(struct i40e_ring *rx_ring, u32 idx) { return &rx_ring->rx_bi_zc[idx]; } /** * i40e_realloc_rx_xdp_bi - reallocate SW ring for either XSK or normal buffer * @rx_ring: Current rx ring * @pool_present: is pool for XSK present * * Try allocating memory and return ENOMEM, if failed to allocate. * If allocation was successful, substitute buffer with allocated one. * Returns 0 on success, negative on failure */ static int i40e_realloc_rx_xdp_bi(struct i40e_ring *rx_ring, bool pool_present) { size_t elem_size = pool_present ? sizeof(*rx_ring->rx_bi_zc) : sizeof(*rx_ring->rx_bi); void *sw_ring = kcalloc(rx_ring->count, elem_size, GFP_KERNEL); if (!sw_ring) return -ENOMEM; if (pool_present) { kfree(rx_ring->rx_bi); rx_ring->rx_bi = NULL; rx_ring->rx_bi_zc = sw_ring; } else { kfree(rx_ring->rx_bi_zc); rx_ring->rx_bi_zc = NULL; rx_ring->rx_bi = sw_ring; } return 0; } /** * i40e_realloc_rx_bi_zc - reallocate rx SW rings * @vsi: Current VSI * @zc: is zero copy set * * Reallocate buffer for rx_rings that might be used by XSK. * XDP requires more memory, than rx_buf provides. * Returns 0 on success, negative on failure */ int i40e_realloc_rx_bi_zc(struct i40e_vsi *vsi, bool zc) { struct i40e_ring *rx_ring; unsigned long q; for_each_set_bit(q, vsi->af_xdp_zc_qps, vsi->alloc_queue_pairs) { rx_ring = vsi->rx_rings[q]; if (i40e_realloc_rx_xdp_bi(rx_ring, zc)) return -ENOMEM; } return 0; } /** * i40e_xsk_pool_enable - Enable/associate an AF_XDP buffer pool to a * certain ring/qid * @vsi: Current VSI * @pool: buffer pool * @qid: Rx ring to associate buffer pool with * * Returns 0 on success, <0 on failure **/ static int i40e_xsk_pool_enable(struct i40e_vsi *vsi, struct xsk_buff_pool *pool, u16 qid) { struct net_device *netdev = vsi->netdev; bool if_running; int err; if (vsi->type != I40E_VSI_MAIN) return -EINVAL; if (qid >= vsi->num_queue_pairs) return -EINVAL; if (qid >= netdev->real_num_rx_queues || qid >= netdev->real_num_tx_queues) return -EINVAL; err = xsk_pool_dma_map(pool, &vsi->back->pdev->dev, I40E_RX_DMA_ATTR); if (err) return err; set_bit(qid, vsi->af_xdp_zc_qps); if_running = netif_running(vsi->netdev) && i40e_enabled_xdp_vsi(vsi); if (if_running) { err = i40e_queue_pair_disable(vsi, qid); if (err) return err; err = i40e_realloc_rx_xdp_bi(vsi->rx_rings[qid], true); if (err) return err; err = i40e_queue_pair_enable(vsi, qid); if (err) return err; /* Kick start the NAPI context so that receiving will start */ err = i40e_xsk_wakeup(vsi->netdev, qid, XDP_WAKEUP_RX); if (err) return err; } return 0; } /** * i40e_xsk_pool_disable - Disassociate an AF_XDP buffer pool from a * certain ring/qid * @vsi: Current VSI * @qid: Rx ring to associate buffer pool with * * Returns 0 on success, <0 on failure **/ static int i40e_xsk_pool_disable(struct i40e_vsi *vsi, u16 qid) { struct net_device *netdev = vsi->netdev; struct xsk_buff_pool *pool; bool if_running; int err; pool = xsk_get_pool_from_qid(netdev, qid); if (!pool) return -EINVAL; if_running = netif_running(vsi->netdev) && i40e_enabled_xdp_vsi(vsi); if (if_running) { err = i40e_queue_pair_disable(vsi, qid); if (err) return err; } clear_bit(qid, vsi->af_xdp_zc_qps); xsk_pool_dma_unmap(pool, I40E_RX_DMA_ATTR); if (if_running) { err = i40e_realloc_rx_xdp_bi(vsi->rx_rings[qid], false); if (err) return err; err = i40e_queue_pair_enable(vsi, qid); if (err) return err; } return 0; } /** * i40e_xsk_pool_setup - Enable/disassociate an AF_XDP buffer pool to/from * a ring/qid * @vsi: Current VSI * @pool: Buffer pool to enable/associate to a ring, or NULL to disable * @qid: Rx ring to (dis)associate buffer pool (from)to * * This function enables or disables a buffer pool to a certain ring. * * Returns 0 on success, <0 on failure **/ int i40e_xsk_pool_setup(struct i40e_vsi *vsi, struct xsk_buff_pool *pool, u16 qid) { return pool ? i40e_xsk_pool_enable(vsi, pool, qid) : i40e_xsk_pool_disable(vsi, qid); } /** * i40e_run_xdp_zc - Executes an XDP program on an xdp_buff * @rx_ring: Rx ring * @xdp: xdp_buff used as input to the XDP program * @xdp_prog: XDP program to run * * Returns any of I40E_XDP_{PASS, CONSUMED, TX, REDIR} **/ static int i40e_run_xdp_zc(struct i40e_ring *rx_ring, struct xdp_buff *xdp, struct bpf_prog *xdp_prog) { int err, result = I40E_XDP_PASS; struct i40e_ring *xdp_ring; u32 act; act = bpf_prog_run_xdp(xdp_prog, xdp); if (likely(act == XDP_REDIRECT)) { err = xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog); if (!err) return I40E_XDP_REDIR; if (xsk_uses_need_wakeup(rx_ring->xsk_pool) && err == -ENOBUFS) result = I40E_XDP_EXIT; else result = I40E_XDP_CONSUMED; goto out_failure; } switch (act) { case XDP_PASS: break; case XDP_TX: xdp_ring = rx_ring->vsi->xdp_rings[rx_ring->queue_index]; result = i40e_xmit_xdp_tx_ring(xdp, xdp_ring); if (result == I40E_XDP_CONSUMED) goto out_failure; break; case XDP_DROP: result = I40E_XDP_CONSUMED; break; default: bpf_warn_invalid_xdp_action(rx_ring->netdev, xdp_prog, act); fallthrough; case XDP_ABORTED: result = I40E_XDP_CONSUMED; out_failure: trace_xdp_exception(rx_ring->netdev, xdp_prog, act); } return result; } bool i40e_alloc_rx_buffers_zc(struct i40e_ring *rx_ring, u16 count) { u16 ntu = rx_ring->next_to_use; union i40e_rx_desc *rx_desc; struct xdp_buff **xdp; u32 nb_buffs, i; dma_addr_t dma; rx_desc = I40E_RX_DESC(rx_ring, ntu); xdp = i40e_rx_bi(rx_ring, ntu); nb_buffs = min_t(u16, count, rx_ring->count - ntu); nb_buffs = xsk_buff_alloc_batch(rx_ring->xsk_pool, xdp, nb_buffs); if (!nb_buffs) return false; i = nb_buffs; while (i--) { dma = xsk_buff_xdp_get_dma(*xdp); rx_desc->read.pkt_addr = cpu_to_le64(dma); rx_desc->read.hdr_addr = 0; rx_desc++; xdp++; } ntu += nb_buffs; if (ntu == rx_ring->count) { rx_desc = I40E_RX_DESC(rx_ring, 0); ntu = 0; } /* clear the status bits for the next_to_use descriptor */ rx_desc->wb.qword1.status_error_len = 0; i40e_release_rx_desc(rx_ring, ntu); return count == nb_buffs; } /** * i40e_construct_skb_zc - Create skbuff from zero-copy Rx buffer * @rx_ring: Rx ring * @xdp: xdp_buff * * This functions allocates a new skb from a zero-copy Rx buffer. * * Returns the skb, or NULL on failure. **/ static struct sk_buff *i40e_construct_skb_zc(struct i40e_ring *rx_ring, struct xdp_buff *xdp) { unsigned int totalsize = xdp->data_end - xdp->data_meta; unsigned int metasize = xdp->data - xdp->data_meta; struct skb_shared_info *sinfo = NULL; struct sk_buff *skb; u32 nr_frags = 0; if (unlikely(xdp_buff_has_frags(xdp))) { sinfo = xdp_get_shared_info_from_buff(xdp); nr_frags = sinfo->nr_frags; } net_prefetch(xdp->data_meta); /* allocate a skb to store the frags */ skb = __napi_alloc_skb(&rx_ring->q_vector->napi, totalsize, GFP_ATOMIC | __GFP_NOWARN); if (unlikely(!skb)) goto out; memcpy(__skb_put(skb, totalsize), xdp->data_meta, ALIGN(totalsize, sizeof(long))); if (metasize) { skb_metadata_set(skb, metasize); __skb_pull(skb, metasize); } if (likely(!xdp_buff_has_frags(xdp))) goto out; for (int i = 0; i < nr_frags; i++) { struct skb_shared_info *skinfo = skb_shinfo(skb); skb_frag_t *frag = &sinfo->frags[i]; struct page *page; void *addr; page = dev_alloc_page(); if (!page) { dev_kfree_skb(skb); return NULL; } addr = page_to_virt(page); memcpy(addr, skb_frag_page(frag), skb_frag_size(frag)); __skb_fill_page_desc_noacc(skinfo, skinfo->nr_frags++, addr, 0, skb_frag_size(frag)); } out: xsk_buff_free(xdp); return skb; } static void i40e_handle_xdp_result_zc(struct i40e_ring *rx_ring, struct xdp_buff *xdp_buff, union i40e_rx_desc *rx_desc, unsigned int *rx_packets, unsigned int *rx_bytes, unsigned int xdp_res, bool *failure) { struct sk_buff *skb; *rx_packets = 1; *rx_bytes = xdp_get_buff_len(xdp_buff); if (likely(xdp_res == I40E_XDP_REDIR) || xdp_res == I40E_XDP_TX) return; if (xdp_res == I40E_XDP_EXIT) { *failure = true; return; } if (xdp_res == I40E_XDP_CONSUMED) { xsk_buff_free(xdp_buff); return; } if (xdp_res == I40E_XDP_PASS) { /* NB! We are not checking for errors using * i40e_test_staterr with * BIT(I40E_RXD_QW1_ERROR_SHIFT). This is due to that * SBP is *not* set in PRT_SBPVSI (default not set). */ skb = i40e_construct_skb_zc(rx_ring, xdp_buff); if (!skb) { rx_ring->rx_stats.alloc_buff_failed++; *rx_packets = 0; *rx_bytes = 0; return; } if (eth_skb_pad(skb)) { *rx_packets = 0; *rx_bytes = 0; return; } i40e_process_skb_fields(rx_ring, rx_desc, skb); napi_gro_receive(&rx_ring->q_vector->napi, skb); return; } /* Should never get here, as all valid cases have been handled already. */ WARN_ON_ONCE(1); } static int i40e_add_xsk_frag(struct i40e_ring *rx_ring, struct xdp_buff *first, struct xdp_buff *xdp, const unsigned int size) { struct skb_shared_info *sinfo = xdp_get_shared_info_from_buff(first); if (!xdp_buff_has_frags(first)) { sinfo->nr_frags = 0; sinfo->xdp_frags_size = 0; xdp_buff_set_frags_flag(first); } if (unlikely(sinfo->nr_frags == MAX_SKB_FRAGS)) { xsk_buff_free(first); return -ENOMEM; } __skb_fill_page_desc_noacc(sinfo, sinfo->nr_frags++, virt_to_page(xdp->data_hard_start), 0, size); sinfo->xdp_frags_size += size; xsk_buff_add_frag(xdp); return 0; } /** * i40e_clean_rx_irq_zc - Consumes Rx packets from the hardware ring * @rx_ring: Rx ring * @budget: NAPI budget * * Returns amount of work completed **/ int i40e_clean_rx_irq_zc(struct i40e_ring *rx_ring, int budget) { unsigned int total_rx_bytes = 0, total_rx_packets = 0; u16 next_to_process = rx_ring->next_to_process; u16 next_to_clean = rx_ring->next_to_clean; u16 count_mask = rx_ring->count - 1; unsigned int xdp_res, xdp_xmit = 0; struct xdp_buff *first = NULL; struct bpf_prog *xdp_prog; bool failure = false; u16 cleaned_count; if (next_to_process != next_to_clean) first = *i40e_rx_bi(rx_ring, next_to_clean); /* NB! xdp_prog will always be !NULL, due to the fact that * this path is enabled by setting an XDP program. */ xdp_prog = READ_ONCE(rx_ring->xdp_prog); while (likely(total_rx_packets < (unsigned int)budget)) { union i40e_rx_desc *rx_desc; unsigned int rx_packets; unsigned int rx_bytes; struct xdp_buff *bi; unsigned int size; u64 qword; rx_desc = I40E_RX_DESC(rx_ring, next_to_process); qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len); /* This memory barrier is needed to keep us from reading * any other fields out of the rx_desc until we have * verified the descriptor has been written back. */ dma_rmb(); if (i40e_rx_is_programming_status(qword)) { i40e_clean_programming_status(rx_ring, rx_desc->raw.qword[0], qword); bi = *i40e_rx_bi(rx_ring, next_to_process); xsk_buff_free(bi); next_to_process = (next_to_process + 1) & count_mask; continue; } size = (qword & I40E_RXD_QW1_LENGTH_PBUF_MASK) >> I40E_RXD_QW1_LENGTH_PBUF_SHIFT; if (!size) break; bi = *i40e_rx_bi(rx_ring, next_to_process); xsk_buff_set_size(bi, size); xsk_buff_dma_sync_for_cpu(bi, rx_ring->xsk_pool); if (!first) first = bi; else if (i40e_add_xsk_frag(rx_ring, first, bi, size)) break; next_to_process = (next_to_process + 1) & count_mask; if (i40e_is_non_eop(rx_ring, rx_desc)) continue; xdp_res = i40e_run_xdp_zc(rx_ring, first, xdp_prog); i40e_handle_xdp_result_zc(rx_ring, first, rx_desc, &rx_packets, &rx_bytes, xdp_res, &failure); first->flags = 0; next_to_clean = next_to_process; if (failure) break; total_rx_packets += rx_packets; total_rx_bytes += rx_bytes; xdp_xmit |= xdp_res & (I40E_XDP_TX | I40E_XDP_REDIR); first = NULL; } rx_ring->next_to_clean = next_to_clean; rx_ring->next_to_process = next_to_process; cleaned_count = (next_to_clean - rx_ring->next_to_use - 1) & count_mask; if (cleaned_count >= I40E_RX_BUFFER_WRITE) failure |= !i40e_alloc_rx_buffers_zc(rx_ring, cleaned_count); i40e_finalize_xdp_rx(rx_ring, xdp_xmit); i40e_update_rx_stats(rx_ring, total_rx_bytes, total_rx_packets); if (xsk_uses_need_wakeup(rx_ring->xsk_pool)) { if (failure || next_to_clean == rx_ring->next_to_use) xsk_set_rx_need_wakeup(rx_ring->xsk_pool); else xsk_clear_rx_need_wakeup(rx_ring->xsk_pool); return (int)total_rx_packets; } return failure ? budget : (int)total_rx_packets; } static void i40e_xmit_pkt(struct i40e_ring *xdp_ring, struct xdp_desc *desc, unsigned int *total_bytes) { u32 cmd = I40E_TX_DESC_CMD_ICRC | xsk_is_eop_desc(desc); struct i40e_tx_desc *tx_desc; dma_addr_t dma; dma = xsk_buff_raw_get_dma(xdp_ring->xsk_pool, desc->addr); xsk_buff_raw_dma_sync_for_device(xdp_ring->xsk_pool, dma, desc->len); tx_desc = I40E_TX_DESC(xdp_ring, xdp_ring->next_to_use++); tx_desc->buffer_addr = cpu_to_le64(dma); tx_desc->cmd_type_offset_bsz = build_ctob(cmd, 0, desc->len, 0); *total_bytes += desc->len; } static void i40e_xmit_pkt_batch(struct i40e_ring *xdp_ring, struct xdp_desc *desc, unsigned int *total_bytes) { u16 ntu = xdp_ring->next_to_use; struct i40e_tx_desc *tx_desc; dma_addr_t dma; u32 i; loop_unrolled_for(i = 0; i < PKTS_PER_BATCH; i++) { u32 cmd = I40E_TX_DESC_CMD_ICRC | xsk_is_eop_desc(&desc[i]); dma = xsk_buff_raw_get_dma(xdp_ring->xsk_pool, desc[i].addr); xsk_buff_raw_dma_sync_for_device(xdp_ring->xsk_pool, dma, desc[i].len); tx_desc = I40E_TX_DESC(xdp_ring, ntu++); tx_desc->buffer_addr = cpu_to_le64(dma); tx_desc->cmd_type_offset_bsz = build_ctob(cmd, 0, desc[i].len, 0); *total_bytes += desc[i].len; } xdp_ring->next_to_use = ntu; } static void i40e_fill_tx_hw_ring(struct i40e_ring *xdp_ring, struct xdp_desc *descs, u32 nb_pkts, unsigned int *total_bytes) { u32 batched, leftover, i; batched = nb_pkts & ~(PKTS_PER_BATCH - 1); leftover = nb_pkts & (PKTS_PER_BATCH - 1); for (i = 0; i < batched; i += PKTS_PER_BATCH) i40e_xmit_pkt_batch(xdp_ring, &descs[i], total_bytes); for (i = batched; i < batched + leftover; i++) i40e_xmit_pkt(xdp_ring, &descs[i], total_bytes); } static void i40e_set_rs_bit(struct i40e_ring *xdp_ring) { u16 ntu = xdp_ring->next_to_use ? xdp_ring->next_to_use - 1 : xdp_ring->count - 1; struct i40e_tx_desc *tx_desc; tx_desc = I40E_TX_DESC(xdp_ring, ntu); tx_desc->cmd_type_offset_bsz |= cpu_to_le64(I40E_TX_DESC_CMD_RS << I40E_TXD_QW1_CMD_SHIFT); } /** * i40e_xmit_zc - Performs zero-copy Tx AF_XDP * @xdp_ring: XDP Tx ring * @budget: NAPI budget * * Returns true if the work is finished. **/ static bool i40e_xmit_zc(struct i40e_ring *xdp_ring, unsigned int budget) { struct xdp_desc *descs = xdp_ring->xsk_pool->tx_descs; u32 nb_pkts, nb_processed = 0; unsigned int total_bytes = 0; nb_pkts = xsk_tx_peek_release_desc_batch(xdp_ring->xsk_pool, budget); if (!nb_pkts) return true; if (xdp_ring->next_to_use + nb_pkts >= xdp_ring->count) { nb_processed = xdp_ring->count - xdp_ring->next_to_use; i40e_fill_tx_hw_ring(xdp_ring, descs, nb_processed, &total_bytes); xdp_ring->next_to_use = 0; } i40e_fill_tx_hw_ring(xdp_ring, &descs[nb_processed], nb_pkts - nb_processed, &total_bytes); /* Request an interrupt for the last frame and bump tail ptr. */ i40e_set_rs_bit(xdp_ring); i40e_xdp_ring_update_tail(xdp_ring); i40e_update_tx_stats(xdp_ring, nb_pkts, total_bytes); return nb_pkts < budget; } /** * i40e_clean_xdp_tx_buffer - Frees and unmaps an XDP Tx entry * @tx_ring: XDP Tx ring * @tx_bi: Tx buffer info to clean **/ static void i40e_clean_xdp_tx_buffer(struct i40e_ring *tx_ring, struct i40e_tx_buffer *tx_bi) { xdp_return_frame(tx_bi->xdpf); tx_ring->xdp_tx_active--; dma_unmap_single(tx_ring->dev, dma_unmap_addr(tx_bi, dma), dma_unmap_len(tx_bi, len), DMA_TO_DEVICE); dma_unmap_len_set(tx_bi, len, 0); } /** * i40e_clean_xdp_tx_irq - Completes AF_XDP entries, and cleans XDP entries * @vsi: Current VSI * @tx_ring: XDP Tx ring * * Returns true if cleanup/transmission is done. **/ bool i40e_clean_xdp_tx_irq(struct i40e_vsi *vsi, struct i40e_ring *tx_ring) { struct xsk_buff_pool *bp = tx_ring->xsk_pool; u32 i, completed_frames, xsk_frames = 0; u32 head_idx = i40e_get_head(tx_ring); struct i40e_tx_buffer *tx_bi; unsigned int ntc; if (head_idx < tx_ring->next_to_clean) head_idx += tx_ring->count; completed_frames = head_idx - tx_ring->next_to_clean; if (completed_frames == 0) goto out_xmit; if (likely(!tx_ring->xdp_tx_active)) { xsk_frames = completed_frames; goto skip; } ntc = tx_ring->next_to_clean; for (i = 0; i < completed_frames; i++) { tx_bi = &tx_ring->tx_bi[ntc]; if (tx_bi->xdpf) { i40e_clean_xdp_tx_buffer(tx_ring, tx_bi); tx_bi->xdpf = NULL; } else { xsk_frames++; } if (++ntc >= tx_ring->count) ntc = 0; } skip: tx_ring->next_to_clean += completed_frames; if (unlikely(tx_ring->next_to_clean >= tx_ring->count)) tx_ring->next_to_clean -= tx_ring->count; if (xsk_frames) xsk_tx_completed(bp, xsk_frames); i40e_arm_wb(tx_ring, vsi, completed_frames); out_xmit: if (xsk_uses_need_wakeup(tx_ring->xsk_pool)) xsk_set_tx_need_wakeup(tx_ring->xsk_pool); return i40e_xmit_zc(tx_ring, I40E_DESC_UNUSED(tx_ring)); } /** * i40e_xsk_wakeup - Implements the ndo_xsk_wakeup * @dev: the netdevice * @queue_id: queue id to wake up * @flags: ignored in our case since we have Rx and Tx in the same NAPI. * * Returns <0 for errors, 0 otherwise. **/ int i40e_xsk_wakeup(struct net_device *dev, u32 queue_id, u32 flags) { struct i40e_netdev_priv *np = netdev_priv(dev); struct i40e_vsi *vsi = np->vsi; struct i40e_pf *pf = vsi->back; struct i40e_ring *ring; if (test_bit(__I40E_CONFIG_BUSY, pf->state)) return -EAGAIN; if (test_bit(__I40E_VSI_DOWN, vsi->state)) return -ENETDOWN; if (!i40e_enabled_xdp_vsi(vsi)) return -EINVAL; if (queue_id >= vsi->num_queue_pairs) return -EINVAL; if (!vsi->xdp_rings[queue_id]->xsk_pool) return -EINVAL; ring = vsi->xdp_rings[queue_id]; /* The idea here is that if NAPI is running, mark a miss, so * it will run again. If not, trigger an interrupt and * schedule the NAPI from interrupt context. If NAPI would be * scheduled here, the interrupt affinity would not be * honored. */ if (!napi_if_scheduled_mark_missed(&ring->q_vector->napi)) i40e_force_wb(vsi, ring->q_vector); return 0; } void i40e_xsk_clean_rx_ring(struct i40e_ring *rx_ring) { u16 count_mask = rx_ring->count - 1; u16 ntc = rx_ring->next_to_clean; u16 ntu = rx_ring->next_to_use; for ( ; ntc != ntu; ntc = (ntc + 1) & count_mask) { struct xdp_buff *rx_bi = *i40e_rx_bi(rx_ring, ntc); xsk_buff_free(rx_bi); } } /** * i40e_xsk_clean_tx_ring - Clean the XDP Tx ring on shutdown * @tx_ring: XDP Tx ring **/ void i40e_xsk_clean_tx_ring(struct i40e_ring *tx_ring) { u16 ntc = tx_ring->next_to_clean, ntu = tx_ring->next_to_use; struct xsk_buff_pool *bp = tx_ring->xsk_pool; struct i40e_tx_buffer *tx_bi; u32 xsk_frames = 0; while (ntc != ntu) { tx_bi = &tx_ring->tx_bi[ntc]; if (tx_bi->xdpf) i40e_clean_xdp_tx_buffer(tx_ring, tx_bi); else xsk_frames++; tx_bi->xdpf = NULL; ntc++; if (ntc >= tx_ring->count) ntc = 0; } if (xsk_frames) xsk_tx_completed(bp, xsk_frames); } /** * i40e_xsk_any_rx_ring_enabled - Checks if Rx rings have an AF_XDP * buffer pool attached * @vsi: vsi * * Returns true if any of the Rx rings has an AF_XDP buffer pool attached **/ bool i40e_xsk_any_rx_ring_enabled(struct i40e_vsi *vsi) { struct net_device *netdev = vsi->netdev; int i; for (i = 0; i < vsi->num_queue_pairs; i++) { if (xsk_get_pool_from_qid(netdev, i)) return true; } return false; }
linux-master
drivers/net/ethernet/intel/i40e/i40e_xsk.c
// SPDX-License-Identifier: GPL-2.0 /* Copyright(c) 2013 - 2018 Intel Corporation. */ #include "i40e.h" /*********************notification routines***********************/ /** * i40e_vc_vf_broadcast * @pf: pointer to the PF structure * @v_opcode: operation code * @v_retval: return value * @msg: pointer to the msg buffer * @msglen: msg length * * send a message to all VFs on a given PF **/ static void i40e_vc_vf_broadcast(struct i40e_pf *pf, enum virtchnl_ops v_opcode, int v_retval, u8 *msg, u16 msglen) { struct i40e_hw *hw = &pf->hw; struct i40e_vf *vf = pf->vf; int i; for (i = 0; i < pf->num_alloc_vfs; i++, vf++) { int abs_vf_id = vf->vf_id + (int)hw->func_caps.vf_base_id; /* Not all vfs are enabled so skip the ones that are not */ if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states) && !test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) continue; /* Ignore return value on purpose - a given VF may fail, but * we need to keep going and send to all of them */ i40e_aq_send_msg_to_vf(hw, abs_vf_id, v_opcode, v_retval, msg, msglen, NULL); } } /** * i40e_vc_link_speed2mbps * converts i40e_aq_link_speed to integer value of Mbps * @link_speed: the speed to convert * * return the speed as direct value of Mbps. **/ static u32 i40e_vc_link_speed2mbps(enum i40e_aq_link_speed link_speed) { switch (link_speed) { case I40E_LINK_SPEED_100MB: return SPEED_100; case I40E_LINK_SPEED_1GB: return SPEED_1000; case I40E_LINK_SPEED_2_5GB: return SPEED_2500; case I40E_LINK_SPEED_5GB: return SPEED_5000; case I40E_LINK_SPEED_10GB: return SPEED_10000; case I40E_LINK_SPEED_20GB: return SPEED_20000; case I40E_LINK_SPEED_25GB: return SPEED_25000; case I40E_LINK_SPEED_40GB: return SPEED_40000; case I40E_LINK_SPEED_UNKNOWN: return SPEED_UNKNOWN; } return SPEED_UNKNOWN; } /** * i40e_set_vf_link_state * @vf: pointer to the VF structure * @pfe: pointer to PF event structure * @ls: pointer to link status structure * * set a link state on a single vf **/ static void i40e_set_vf_link_state(struct i40e_vf *vf, struct virtchnl_pf_event *pfe, struct i40e_link_status *ls) { u8 link_status = ls->link_info & I40E_AQ_LINK_UP; if (vf->link_forced) link_status = vf->link_up; if (vf->driver_caps & VIRTCHNL_VF_CAP_ADV_LINK_SPEED) { pfe->event_data.link_event_adv.link_speed = link_status ? i40e_vc_link_speed2mbps(ls->link_speed) : 0; pfe->event_data.link_event_adv.link_status = link_status; } else { pfe->event_data.link_event.link_speed = link_status ? i40e_virtchnl_link_speed(ls->link_speed) : 0; pfe->event_data.link_event.link_status = link_status; } } /** * i40e_vc_notify_vf_link_state * @vf: pointer to the VF structure * * send a link status message to a single VF **/ static void i40e_vc_notify_vf_link_state(struct i40e_vf *vf) { struct virtchnl_pf_event pfe; struct i40e_pf *pf = vf->pf; struct i40e_hw *hw = &pf->hw; struct i40e_link_status *ls = &pf->hw.phy.link_info; int abs_vf_id = vf->vf_id + (int)hw->func_caps.vf_base_id; pfe.event = VIRTCHNL_EVENT_LINK_CHANGE; pfe.severity = PF_EVENT_SEVERITY_INFO; i40e_set_vf_link_state(vf, &pfe, ls); i40e_aq_send_msg_to_vf(hw, abs_vf_id, VIRTCHNL_OP_EVENT, 0, (u8 *)&pfe, sizeof(pfe), NULL); } /** * i40e_vc_notify_link_state * @pf: pointer to the PF structure * * send a link status message to all VFs on a given PF **/ void i40e_vc_notify_link_state(struct i40e_pf *pf) { int i; for (i = 0; i < pf->num_alloc_vfs; i++) i40e_vc_notify_vf_link_state(&pf->vf[i]); } /** * i40e_vc_notify_reset * @pf: pointer to the PF structure * * indicate a pending reset to all VFs on a given PF **/ void i40e_vc_notify_reset(struct i40e_pf *pf) { struct virtchnl_pf_event pfe; pfe.event = VIRTCHNL_EVENT_RESET_IMPENDING; pfe.severity = PF_EVENT_SEVERITY_CERTAIN_DOOM; i40e_vc_vf_broadcast(pf, VIRTCHNL_OP_EVENT, 0, (u8 *)&pfe, sizeof(struct virtchnl_pf_event)); } /** * i40e_vc_notify_vf_reset * @vf: pointer to the VF structure * * indicate a pending reset to the given VF **/ void i40e_vc_notify_vf_reset(struct i40e_vf *vf) { struct virtchnl_pf_event pfe; int abs_vf_id; /* validate the request */ if (!vf || vf->vf_id >= vf->pf->num_alloc_vfs) return; /* verify if the VF is in either init or active before proceeding */ if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states) && !test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) return; abs_vf_id = vf->vf_id + (int)vf->pf->hw.func_caps.vf_base_id; pfe.event = VIRTCHNL_EVENT_RESET_IMPENDING; pfe.severity = PF_EVENT_SEVERITY_CERTAIN_DOOM; i40e_aq_send_msg_to_vf(&vf->pf->hw, abs_vf_id, VIRTCHNL_OP_EVENT, 0, (u8 *)&pfe, sizeof(struct virtchnl_pf_event), NULL); } /***********************misc routines*****************************/ /** * i40e_vc_reset_vf * @vf: pointer to the VF info * @notify_vf: notify vf about reset or not * Reset VF handler. **/ static void i40e_vc_reset_vf(struct i40e_vf *vf, bool notify_vf) { struct i40e_pf *pf = vf->pf; int i; if (notify_vf) i40e_vc_notify_vf_reset(vf); /* We want to ensure that an actual reset occurs initiated after this * function was called. However, we do not want to wait forever, so * we'll give a reasonable time and print a message if we failed to * ensure a reset. */ for (i = 0; i < 20; i++) { /* If PF is in VFs releasing state reset VF is impossible, * so leave it. */ if (test_bit(__I40E_VFS_RELEASING, pf->state)) return; if (i40e_reset_vf(vf, false)) return; usleep_range(10000, 20000); } if (notify_vf) dev_warn(&vf->pf->pdev->dev, "Failed to initiate reset for VF %d after 200 milliseconds\n", vf->vf_id); else dev_dbg(&vf->pf->pdev->dev, "Failed to initiate reset for VF %d after 200 milliseconds\n", vf->vf_id); } /** * i40e_vc_isvalid_vsi_id * @vf: pointer to the VF info * @vsi_id: VF relative VSI id * * check for the valid VSI id **/ static inline bool i40e_vc_isvalid_vsi_id(struct i40e_vf *vf, u16 vsi_id) { struct i40e_pf *pf = vf->pf; struct i40e_vsi *vsi = i40e_find_vsi_from_id(pf, vsi_id); return (vsi && (vsi->vf_id == vf->vf_id)); } /** * i40e_vc_isvalid_queue_id * @vf: pointer to the VF info * @vsi_id: vsi id * @qid: vsi relative queue id * * check for the valid queue id **/ static inline bool i40e_vc_isvalid_queue_id(struct i40e_vf *vf, u16 vsi_id, u16 qid) { struct i40e_pf *pf = vf->pf; struct i40e_vsi *vsi = i40e_find_vsi_from_id(pf, vsi_id); return (vsi && (qid < vsi->alloc_queue_pairs)); } /** * i40e_vc_isvalid_vector_id * @vf: pointer to the VF info * @vector_id: VF relative vector id * * check for the valid vector id **/ static inline bool i40e_vc_isvalid_vector_id(struct i40e_vf *vf, u32 vector_id) { struct i40e_pf *pf = vf->pf; return vector_id < pf->hw.func_caps.num_msix_vectors_vf; } /***********************vf resource mgmt routines*****************/ /** * i40e_vc_get_pf_queue_id * @vf: pointer to the VF info * @vsi_id: id of VSI as provided by the FW * @vsi_queue_id: vsi relative queue id * * return PF relative queue id **/ static u16 i40e_vc_get_pf_queue_id(struct i40e_vf *vf, u16 vsi_id, u8 vsi_queue_id) { struct i40e_pf *pf = vf->pf; struct i40e_vsi *vsi = i40e_find_vsi_from_id(pf, vsi_id); u16 pf_queue_id = I40E_QUEUE_END_OF_LIST; if (!vsi) return pf_queue_id; if (le16_to_cpu(vsi->info.mapping_flags) & I40E_AQ_VSI_QUE_MAP_NONCONTIG) pf_queue_id = le16_to_cpu(vsi->info.queue_mapping[vsi_queue_id]); else pf_queue_id = le16_to_cpu(vsi->info.queue_mapping[0]) + vsi_queue_id; return pf_queue_id; } /** * i40e_get_real_pf_qid * @vf: pointer to the VF info * @vsi_id: vsi id * @queue_id: queue number * * wrapper function to get pf_queue_id handling ADq code as well **/ static u16 i40e_get_real_pf_qid(struct i40e_vf *vf, u16 vsi_id, u16 queue_id) { int i; if (vf->adq_enabled) { /* Although VF considers all the queues(can be 1 to 16) as its * own but they may actually belong to different VSIs(up to 4). * We need to find which queues belongs to which VSI. */ for (i = 0; i < vf->num_tc; i++) { if (queue_id < vf->ch[i].num_qps) { vsi_id = vf->ch[i].vsi_id; break; } /* find right queue id which is relative to a * given VSI. */ queue_id -= vf->ch[i].num_qps; } } return i40e_vc_get_pf_queue_id(vf, vsi_id, queue_id); } /** * i40e_config_irq_link_list * @vf: pointer to the VF info * @vsi_id: id of VSI as given by the FW * @vecmap: irq map info * * configure irq link list from the map **/ static void i40e_config_irq_link_list(struct i40e_vf *vf, u16 vsi_id, struct virtchnl_vector_map *vecmap) { unsigned long linklistmap = 0, tempmap; struct i40e_pf *pf = vf->pf; struct i40e_hw *hw = &pf->hw; u16 vsi_queue_id, pf_queue_id; enum i40e_queue_type qtype; u16 next_q, vector_id, size; u32 reg, reg_idx; u16 itr_idx = 0; vector_id = vecmap->vector_id; /* setup the head */ if (0 == vector_id) reg_idx = I40E_VPINT_LNKLST0(vf->vf_id); else reg_idx = I40E_VPINT_LNKLSTN( ((pf->hw.func_caps.num_msix_vectors_vf - 1) * vf->vf_id) + (vector_id - 1)); if (vecmap->rxq_map == 0 && vecmap->txq_map == 0) { /* Special case - No queues mapped on this vector */ wr32(hw, reg_idx, I40E_VPINT_LNKLST0_FIRSTQ_INDX_MASK); goto irq_list_done; } tempmap = vecmap->rxq_map; for_each_set_bit(vsi_queue_id, &tempmap, I40E_MAX_VSI_QP) { linklistmap |= (BIT(I40E_VIRTCHNL_SUPPORTED_QTYPES * vsi_queue_id)); } tempmap = vecmap->txq_map; for_each_set_bit(vsi_queue_id, &tempmap, I40E_MAX_VSI_QP) { linklistmap |= (BIT(I40E_VIRTCHNL_SUPPORTED_QTYPES * vsi_queue_id + 1)); } size = I40E_MAX_VSI_QP * I40E_VIRTCHNL_SUPPORTED_QTYPES; next_q = find_first_bit(&linklistmap, size); if (unlikely(next_q == size)) goto irq_list_done; vsi_queue_id = next_q / I40E_VIRTCHNL_SUPPORTED_QTYPES; qtype = next_q % I40E_VIRTCHNL_SUPPORTED_QTYPES; pf_queue_id = i40e_get_real_pf_qid(vf, vsi_id, vsi_queue_id); reg = ((qtype << I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT) | pf_queue_id); wr32(hw, reg_idx, reg); while (next_q < size) { switch (qtype) { case I40E_QUEUE_TYPE_RX: reg_idx = I40E_QINT_RQCTL(pf_queue_id); itr_idx = vecmap->rxitr_idx; break; case I40E_QUEUE_TYPE_TX: reg_idx = I40E_QINT_TQCTL(pf_queue_id); itr_idx = vecmap->txitr_idx; break; default: break; } next_q = find_next_bit(&linklistmap, size, next_q + 1); if (next_q < size) { vsi_queue_id = next_q / I40E_VIRTCHNL_SUPPORTED_QTYPES; qtype = next_q % I40E_VIRTCHNL_SUPPORTED_QTYPES; pf_queue_id = i40e_get_real_pf_qid(vf, vsi_id, vsi_queue_id); } else { pf_queue_id = I40E_QUEUE_END_OF_LIST; qtype = 0; } /* format for the RQCTL & TQCTL regs is same */ reg = (vector_id) | (qtype << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT) | (pf_queue_id << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT) | BIT(I40E_QINT_RQCTL_CAUSE_ENA_SHIFT) | (itr_idx << I40E_QINT_RQCTL_ITR_INDX_SHIFT); wr32(hw, reg_idx, reg); } /* if the vf is running in polling mode and using interrupt zero, * need to disable auto-mask on enabling zero interrupt for VFs. */ if ((vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RX_POLLING) && (vector_id == 0)) { reg = rd32(hw, I40E_GLINT_CTL); if (!(reg & I40E_GLINT_CTL_DIS_AUTOMASK_VF0_MASK)) { reg |= I40E_GLINT_CTL_DIS_AUTOMASK_VF0_MASK; wr32(hw, I40E_GLINT_CTL, reg); } } irq_list_done: i40e_flush(hw); } /** * i40e_release_rdma_qvlist * @vf: pointer to the VF. * **/ static void i40e_release_rdma_qvlist(struct i40e_vf *vf) { struct i40e_pf *pf = vf->pf; struct virtchnl_rdma_qvlist_info *qvlist_info = vf->qvlist_info; u32 msix_vf; u32 i; if (!vf->qvlist_info) return; msix_vf = pf->hw.func_caps.num_msix_vectors_vf; for (i = 0; i < qvlist_info->num_vectors; i++) { struct virtchnl_rdma_qv_info *qv_info; u32 next_q_index, next_q_type; struct i40e_hw *hw = &pf->hw; u32 v_idx, reg_idx, reg; qv_info = &qvlist_info->qv_info[i]; if (!qv_info) continue; v_idx = qv_info->v_idx; if (qv_info->ceq_idx != I40E_QUEUE_INVALID_IDX) { /* Figure out the queue after CEQ and make that the * first queue. */ reg_idx = (msix_vf - 1) * vf->vf_id + qv_info->ceq_idx; reg = rd32(hw, I40E_VPINT_CEQCTL(reg_idx)); next_q_index = (reg & I40E_VPINT_CEQCTL_NEXTQ_INDX_MASK) >> I40E_VPINT_CEQCTL_NEXTQ_INDX_SHIFT; next_q_type = (reg & I40E_VPINT_CEQCTL_NEXTQ_TYPE_MASK) >> I40E_VPINT_CEQCTL_NEXTQ_TYPE_SHIFT; reg_idx = ((msix_vf - 1) * vf->vf_id) + (v_idx - 1); reg = (next_q_index & I40E_VPINT_LNKLSTN_FIRSTQ_INDX_MASK) | (next_q_type << I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT); wr32(hw, I40E_VPINT_LNKLSTN(reg_idx), reg); } } kfree(vf->qvlist_info); vf->qvlist_info = NULL; } /** * i40e_config_rdma_qvlist * @vf: pointer to the VF info * @qvlist_info: queue and vector list * * Return 0 on success or < 0 on error **/ static int i40e_config_rdma_qvlist(struct i40e_vf *vf, struct virtchnl_rdma_qvlist_info *qvlist_info) { struct i40e_pf *pf = vf->pf; struct i40e_hw *hw = &pf->hw; struct virtchnl_rdma_qv_info *qv_info; u32 v_idx, i, reg_idx, reg; u32 next_q_idx, next_q_type; size_t size; u32 msix_vf; int ret = 0; msix_vf = pf->hw.func_caps.num_msix_vectors_vf; if (qvlist_info->num_vectors > msix_vf) { dev_warn(&pf->pdev->dev, "Incorrect number of iwarp vectors %u. Maximum %u allowed.\n", qvlist_info->num_vectors, msix_vf); ret = -EINVAL; goto err_out; } kfree(vf->qvlist_info); size = virtchnl_struct_size(vf->qvlist_info, qv_info, qvlist_info->num_vectors); vf->qvlist_info = kzalloc(size, GFP_KERNEL); if (!vf->qvlist_info) { ret = -ENOMEM; goto err_out; } vf->qvlist_info->num_vectors = qvlist_info->num_vectors; msix_vf = pf->hw.func_caps.num_msix_vectors_vf; for (i = 0; i < qvlist_info->num_vectors; i++) { qv_info = &qvlist_info->qv_info[i]; if (!qv_info) continue; /* Validate vector id belongs to this vf */ if (!i40e_vc_isvalid_vector_id(vf, qv_info->v_idx)) { ret = -EINVAL; goto err_free; } v_idx = qv_info->v_idx; vf->qvlist_info->qv_info[i] = *qv_info; reg_idx = ((msix_vf - 1) * vf->vf_id) + (v_idx - 1); /* We might be sharing the interrupt, so get the first queue * index and type, push it down the list by adding the new * queue on top. Also link it with the new queue in CEQCTL. */ reg = rd32(hw, I40E_VPINT_LNKLSTN(reg_idx)); next_q_idx = ((reg & I40E_VPINT_LNKLSTN_FIRSTQ_INDX_MASK) >> I40E_VPINT_LNKLSTN_FIRSTQ_INDX_SHIFT); next_q_type = ((reg & I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_MASK) >> I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT); if (qv_info->ceq_idx != I40E_QUEUE_INVALID_IDX) { reg_idx = (msix_vf - 1) * vf->vf_id + qv_info->ceq_idx; reg = (I40E_VPINT_CEQCTL_CAUSE_ENA_MASK | (v_idx << I40E_VPINT_CEQCTL_MSIX_INDX_SHIFT) | (qv_info->itr_idx << I40E_VPINT_CEQCTL_ITR_INDX_SHIFT) | (next_q_type << I40E_VPINT_CEQCTL_NEXTQ_TYPE_SHIFT) | (next_q_idx << I40E_VPINT_CEQCTL_NEXTQ_INDX_SHIFT)); wr32(hw, I40E_VPINT_CEQCTL(reg_idx), reg); reg_idx = ((msix_vf - 1) * vf->vf_id) + (v_idx - 1); reg = (qv_info->ceq_idx & I40E_VPINT_LNKLSTN_FIRSTQ_INDX_MASK) | (I40E_QUEUE_TYPE_PE_CEQ << I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT); wr32(hw, I40E_VPINT_LNKLSTN(reg_idx), reg); } if (qv_info->aeq_idx != I40E_QUEUE_INVALID_IDX) { reg = (I40E_VPINT_AEQCTL_CAUSE_ENA_MASK | (v_idx << I40E_VPINT_AEQCTL_MSIX_INDX_SHIFT) | (qv_info->itr_idx << I40E_VPINT_AEQCTL_ITR_INDX_SHIFT)); wr32(hw, I40E_VPINT_AEQCTL(vf->vf_id), reg); } } return 0; err_free: kfree(vf->qvlist_info); vf->qvlist_info = NULL; err_out: return ret; } /** * i40e_config_vsi_tx_queue * @vf: pointer to the VF info * @vsi_id: id of VSI as provided by the FW * @vsi_queue_id: vsi relative queue index * @info: config. info * * configure tx queue **/ static int i40e_config_vsi_tx_queue(struct i40e_vf *vf, u16 vsi_id, u16 vsi_queue_id, struct virtchnl_txq_info *info) { struct i40e_pf *pf = vf->pf; struct i40e_hw *hw = &pf->hw; struct i40e_hmc_obj_txq tx_ctx; struct i40e_vsi *vsi; u16 pf_queue_id; u32 qtx_ctl; int ret = 0; if (!i40e_vc_isvalid_vsi_id(vf, info->vsi_id)) { ret = -ENOENT; goto error_context; } pf_queue_id = i40e_vc_get_pf_queue_id(vf, vsi_id, vsi_queue_id); vsi = i40e_find_vsi_from_id(pf, vsi_id); if (!vsi) { ret = -ENOENT; goto error_context; } /* clear the context structure first */ memset(&tx_ctx, 0, sizeof(struct i40e_hmc_obj_txq)); /* only set the required fields */ tx_ctx.base = info->dma_ring_addr / 128; tx_ctx.qlen = info->ring_len; tx_ctx.rdylist = le16_to_cpu(vsi->info.qs_handle[0]); tx_ctx.rdylist_act = 0; tx_ctx.head_wb_ena = info->headwb_enabled; tx_ctx.head_wb_addr = info->dma_headwb_addr; /* clear the context in the HMC */ ret = i40e_clear_lan_tx_queue_context(hw, pf_queue_id); if (ret) { dev_err(&pf->pdev->dev, "Failed to clear VF LAN Tx queue context %d, error: %d\n", pf_queue_id, ret); ret = -ENOENT; goto error_context; } /* set the context in the HMC */ ret = i40e_set_lan_tx_queue_context(hw, pf_queue_id, &tx_ctx); if (ret) { dev_err(&pf->pdev->dev, "Failed to set VF LAN Tx queue context %d error: %d\n", pf_queue_id, ret); ret = -ENOENT; goto error_context; } /* associate this queue with the PCI VF function */ qtx_ctl = I40E_QTX_CTL_VF_QUEUE; qtx_ctl |= ((hw->pf_id << I40E_QTX_CTL_PF_INDX_SHIFT) & I40E_QTX_CTL_PF_INDX_MASK); qtx_ctl |= (((vf->vf_id + hw->func_caps.vf_base_id) << I40E_QTX_CTL_VFVM_INDX_SHIFT) & I40E_QTX_CTL_VFVM_INDX_MASK); wr32(hw, I40E_QTX_CTL(pf_queue_id), qtx_ctl); i40e_flush(hw); error_context: return ret; } /** * i40e_config_vsi_rx_queue * @vf: pointer to the VF info * @vsi_id: id of VSI as provided by the FW * @vsi_queue_id: vsi relative queue index * @info: config. info * * configure rx queue **/ static int i40e_config_vsi_rx_queue(struct i40e_vf *vf, u16 vsi_id, u16 vsi_queue_id, struct virtchnl_rxq_info *info) { u16 pf_queue_id = i40e_vc_get_pf_queue_id(vf, vsi_id, vsi_queue_id); struct i40e_pf *pf = vf->pf; struct i40e_vsi *vsi = pf->vsi[vf->lan_vsi_idx]; struct i40e_hw *hw = &pf->hw; struct i40e_hmc_obj_rxq rx_ctx; int ret = 0; /* clear the context structure first */ memset(&rx_ctx, 0, sizeof(struct i40e_hmc_obj_rxq)); /* only set the required fields */ rx_ctx.base = info->dma_ring_addr / 128; rx_ctx.qlen = info->ring_len; if (info->splithdr_enabled) { rx_ctx.hsplit_0 = I40E_RX_SPLIT_L2 | I40E_RX_SPLIT_IP | I40E_RX_SPLIT_TCP_UDP | I40E_RX_SPLIT_SCTP; /* header length validation */ if (info->hdr_size > ((2 * 1024) - 64)) { ret = -EINVAL; goto error_param; } rx_ctx.hbuff = info->hdr_size >> I40E_RXQ_CTX_HBUFF_SHIFT; /* set split mode 10b */ rx_ctx.dtype = I40E_RX_DTYPE_HEADER_SPLIT; } /* databuffer length validation */ if (info->databuffer_size > ((16 * 1024) - 128)) { ret = -EINVAL; goto error_param; } rx_ctx.dbuff = info->databuffer_size >> I40E_RXQ_CTX_DBUFF_SHIFT; /* max pkt. length validation */ if (info->max_pkt_size >= (16 * 1024) || info->max_pkt_size < 64) { ret = -EINVAL; goto error_param; } rx_ctx.rxmax = info->max_pkt_size; /* if port VLAN is configured increase the max packet size */ if (vsi->info.pvid) rx_ctx.rxmax += VLAN_HLEN; /* enable 32bytes desc always */ rx_ctx.dsize = 1; /* default values */ rx_ctx.lrxqthresh = 1; rx_ctx.crcstrip = 1; rx_ctx.prefena = 1; rx_ctx.l2tsel = 1; /* clear the context in the HMC */ ret = i40e_clear_lan_rx_queue_context(hw, pf_queue_id); if (ret) { dev_err(&pf->pdev->dev, "Failed to clear VF LAN Rx queue context %d, error: %d\n", pf_queue_id, ret); ret = -ENOENT; goto error_param; } /* set the context in the HMC */ ret = i40e_set_lan_rx_queue_context(hw, pf_queue_id, &rx_ctx); if (ret) { dev_err(&pf->pdev->dev, "Failed to set VF LAN Rx queue context %d error: %d\n", pf_queue_id, ret); ret = -ENOENT; goto error_param; } error_param: return ret; } /** * i40e_alloc_vsi_res * @vf: pointer to the VF info * @idx: VSI index, applies only for ADq mode, zero otherwise * * alloc VF vsi context & resources **/ static int i40e_alloc_vsi_res(struct i40e_vf *vf, u8 idx) { struct i40e_mac_filter *f = NULL; struct i40e_pf *pf = vf->pf; struct i40e_vsi *vsi; u64 max_tx_rate = 0; int ret = 0; vsi = i40e_vsi_setup(pf, I40E_VSI_SRIOV, pf->vsi[pf->lan_vsi]->seid, vf->vf_id); if (!vsi) { dev_err(&pf->pdev->dev, "add vsi failed for VF %d, aq_err %d\n", vf->vf_id, pf->hw.aq.asq_last_status); ret = -ENOENT; goto error_alloc_vsi_res; } if (!idx) { u64 hena = i40e_pf_get_default_rss_hena(pf); u8 broadcast[ETH_ALEN]; vf->lan_vsi_idx = vsi->idx; vf->lan_vsi_id = vsi->id; /* If the port VLAN has been configured and then the * VF driver was removed then the VSI port VLAN * configuration was destroyed. Check if there is * a port VLAN and restore the VSI configuration if * needed. */ if (vf->port_vlan_id) i40e_vsi_add_pvid(vsi, vf->port_vlan_id); spin_lock_bh(&vsi->mac_filter_hash_lock); if (is_valid_ether_addr(vf->default_lan_addr.addr)) { f = i40e_add_mac_filter(vsi, vf->default_lan_addr.addr); if (!f) dev_info(&pf->pdev->dev, "Could not add MAC filter %pM for VF %d\n", vf->default_lan_addr.addr, vf->vf_id); } eth_broadcast_addr(broadcast); f = i40e_add_mac_filter(vsi, broadcast); if (!f) dev_info(&pf->pdev->dev, "Could not allocate VF broadcast filter\n"); spin_unlock_bh(&vsi->mac_filter_hash_lock); wr32(&pf->hw, I40E_VFQF_HENA1(0, vf->vf_id), (u32)hena); wr32(&pf->hw, I40E_VFQF_HENA1(1, vf->vf_id), (u32)(hena >> 32)); /* program mac filter only for VF VSI */ ret = i40e_sync_vsi_filters(vsi); if (ret) dev_err(&pf->pdev->dev, "Unable to program ucast filters\n"); } /* storing VSI index and id for ADq and don't apply the mac filter */ if (vf->adq_enabled) { vf->ch[idx].vsi_idx = vsi->idx; vf->ch[idx].vsi_id = vsi->id; } /* Set VF bandwidth if specified */ if (vf->tx_rate) { max_tx_rate = vf->tx_rate; } else if (vf->ch[idx].max_tx_rate) { max_tx_rate = vf->ch[idx].max_tx_rate; } if (max_tx_rate) { max_tx_rate = div_u64(max_tx_rate, I40E_BW_CREDIT_DIVISOR); ret = i40e_aq_config_vsi_bw_limit(&pf->hw, vsi->seid, max_tx_rate, 0, NULL); if (ret) dev_err(&pf->pdev->dev, "Unable to set tx rate, VF %d, error code %d.\n", vf->vf_id, ret); } error_alloc_vsi_res: return ret; } /** * i40e_map_pf_queues_to_vsi * @vf: pointer to the VF info * * PF maps LQPs to a VF by programming VSILAN_QTABLE & VPLAN_QTABLE. This * function takes care of first part VSILAN_QTABLE, mapping pf queues to VSI. **/ static void i40e_map_pf_queues_to_vsi(struct i40e_vf *vf) { struct i40e_pf *pf = vf->pf; struct i40e_hw *hw = &pf->hw; u32 reg, num_tc = 1; /* VF has at least one traffic class */ u16 vsi_id, qps; int i, j; if (vf->adq_enabled) num_tc = vf->num_tc; for (i = 0; i < num_tc; i++) { if (vf->adq_enabled) { qps = vf->ch[i].num_qps; vsi_id = vf->ch[i].vsi_id; } else { qps = pf->vsi[vf->lan_vsi_idx]->alloc_queue_pairs; vsi_id = vf->lan_vsi_id; } for (j = 0; j < 7; j++) { if (j * 2 >= qps) { /* end of list */ reg = 0x07FF07FF; } else { u16 qid = i40e_vc_get_pf_queue_id(vf, vsi_id, j * 2); reg = qid; qid = i40e_vc_get_pf_queue_id(vf, vsi_id, (j * 2) + 1); reg |= qid << 16; } i40e_write_rx_ctl(hw, I40E_VSILAN_QTABLE(j, vsi_id), reg); } } } /** * i40e_map_pf_to_vf_queues * @vf: pointer to the VF info * * PF maps LQPs to a VF by programming VSILAN_QTABLE & VPLAN_QTABLE. This * function takes care of the second part VPLAN_QTABLE & completes VF mappings. **/ static void i40e_map_pf_to_vf_queues(struct i40e_vf *vf) { struct i40e_pf *pf = vf->pf; struct i40e_hw *hw = &pf->hw; u32 reg, total_qps = 0; u32 qps, num_tc = 1; /* VF has at least one traffic class */ u16 vsi_id, qid; int i, j; if (vf->adq_enabled) num_tc = vf->num_tc; for (i = 0; i < num_tc; i++) { if (vf->adq_enabled) { qps = vf->ch[i].num_qps; vsi_id = vf->ch[i].vsi_id; } else { qps = pf->vsi[vf->lan_vsi_idx]->alloc_queue_pairs; vsi_id = vf->lan_vsi_id; } for (j = 0; j < qps; j++) { qid = i40e_vc_get_pf_queue_id(vf, vsi_id, j); reg = (qid & I40E_VPLAN_QTABLE_QINDEX_MASK); wr32(hw, I40E_VPLAN_QTABLE(total_qps, vf->vf_id), reg); total_qps++; } } } /** * i40e_enable_vf_mappings * @vf: pointer to the VF info * * enable VF mappings **/ static void i40e_enable_vf_mappings(struct i40e_vf *vf) { struct i40e_pf *pf = vf->pf; struct i40e_hw *hw = &pf->hw; u32 reg; /* Tell the hardware we're using noncontiguous mapping. HW requires * that VF queues be mapped using this method, even when they are * contiguous in real life */ i40e_write_rx_ctl(hw, I40E_VSILAN_QBASE(vf->lan_vsi_id), I40E_VSILAN_QBASE_VSIQTABLE_ENA_MASK); /* enable VF vplan_qtable mappings */ reg = I40E_VPLAN_MAPENA_TXRX_ENA_MASK; wr32(hw, I40E_VPLAN_MAPENA(vf->vf_id), reg); i40e_map_pf_to_vf_queues(vf); i40e_map_pf_queues_to_vsi(vf); i40e_flush(hw); } /** * i40e_disable_vf_mappings * @vf: pointer to the VF info * * disable VF mappings **/ static void i40e_disable_vf_mappings(struct i40e_vf *vf) { struct i40e_pf *pf = vf->pf; struct i40e_hw *hw = &pf->hw; int i; /* disable qp mappings */ wr32(hw, I40E_VPLAN_MAPENA(vf->vf_id), 0); for (i = 0; i < I40E_MAX_VSI_QP; i++) wr32(hw, I40E_VPLAN_QTABLE(i, vf->vf_id), I40E_QUEUE_END_OF_LIST); i40e_flush(hw); } /** * i40e_free_vf_res * @vf: pointer to the VF info * * free VF resources **/ static void i40e_free_vf_res(struct i40e_vf *vf) { struct i40e_pf *pf = vf->pf; struct i40e_hw *hw = &pf->hw; u32 reg_idx, reg; int i, j, msix_vf; /* Start by disabling VF's configuration API to prevent the OS from * accessing the VF's VSI after it's freed / invalidated. */ clear_bit(I40E_VF_STATE_INIT, &vf->vf_states); /* It's possible the VF had requeuested more queues than the default so * do the accounting here when we're about to free them. */ if (vf->num_queue_pairs > I40E_DEFAULT_QUEUES_PER_VF) { pf->queues_left += vf->num_queue_pairs - I40E_DEFAULT_QUEUES_PER_VF; } /* free vsi & disconnect it from the parent uplink */ if (vf->lan_vsi_idx) { i40e_vsi_release(pf->vsi[vf->lan_vsi_idx]); vf->lan_vsi_idx = 0; vf->lan_vsi_id = 0; } /* do the accounting and remove additional ADq VSI's */ if (vf->adq_enabled && vf->ch[0].vsi_idx) { for (j = 0; j < vf->num_tc; j++) { /* At this point VSI0 is already released so don't * release it again and only clear their values in * structure variables */ if (j) i40e_vsi_release(pf->vsi[vf->ch[j].vsi_idx]); vf->ch[j].vsi_idx = 0; vf->ch[j].vsi_id = 0; } } msix_vf = pf->hw.func_caps.num_msix_vectors_vf; /* disable interrupts so the VF starts in a known state */ for (i = 0; i < msix_vf; i++) { /* format is same for both registers */ if (0 == i) reg_idx = I40E_VFINT_DYN_CTL0(vf->vf_id); else reg_idx = I40E_VFINT_DYN_CTLN(((msix_vf - 1) * (vf->vf_id)) + (i - 1)); wr32(hw, reg_idx, I40E_VFINT_DYN_CTLN_CLEARPBA_MASK); i40e_flush(hw); } /* clear the irq settings */ for (i = 0; i < msix_vf; i++) { /* format is same for both registers */ if (0 == i) reg_idx = I40E_VPINT_LNKLST0(vf->vf_id); else reg_idx = I40E_VPINT_LNKLSTN(((msix_vf - 1) * (vf->vf_id)) + (i - 1)); reg = (I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_MASK | I40E_VPINT_LNKLSTN_FIRSTQ_INDX_MASK); wr32(hw, reg_idx, reg); i40e_flush(hw); } /* reset some of the state variables keeping track of the resources */ vf->num_queue_pairs = 0; clear_bit(I40E_VF_STATE_MC_PROMISC, &vf->vf_states); clear_bit(I40E_VF_STATE_UC_PROMISC, &vf->vf_states); } /** * i40e_alloc_vf_res * @vf: pointer to the VF info * * allocate VF resources **/ static int i40e_alloc_vf_res(struct i40e_vf *vf) { struct i40e_pf *pf = vf->pf; int total_queue_pairs = 0; int ret, idx; if (vf->num_req_queues && vf->num_req_queues <= pf->queues_left + I40E_DEFAULT_QUEUES_PER_VF) pf->num_vf_qps = vf->num_req_queues; else pf->num_vf_qps = I40E_DEFAULT_QUEUES_PER_VF; /* allocate hw vsi context & associated resources */ ret = i40e_alloc_vsi_res(vf, 0); if (ret) goto error_alloc; total_queue_pairs += pf->vsi[vf->lan_vsi_idx]->alloc_queue_pairs; /* allocate additional VSIs based on tc information for ADq */ if (vf->adq_enabled) { if (pf->queues_left >= (I40E_MAX_VF_QUEUES - I40E_DEFAULT_QUEUES_PER_VF)) { /* TC 0 always belongs to VF VSI */ for (idx = 1; idx < vf->num_tc; idx++) { ret = i40e_alloc_vsi_res(vf, idx); if (ret) goto error_alloc; } /* send correct number of queues */ total_queue_pairs = I40E_MAX_VF_QUEUES; } else { dev_info(&pf->pdev->dev, "VF %d: Not enough queues to allocate, disabling ADq\n", vf->vf_id); vf->adq_enabled = false; } } /* We account for each VF to get a default number of queue pairs. If * the VF has now requested more, we need to account for that to make * certain we never request more queues than we actually have left in * HW. */ if (total_queue_pairs > I40E_DEFAULT_QUEUES_PER_VF) pf->queues_left -= total_queue_pairs - I40E_DEFAULT_QUEUES_PER_VF; if (vf->trusted) set_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps); else clear_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps); /* store the total qps number for the runtime * VF req validation */ vf->num_queue_pairs = total_queue_pairs; /* VF is now completely initialized */ set_bit(I40E_VF_STATE_INIT, &vf->vf_states); error_alloc: if (ret) i40e_free_vf_res(vf); return ret; } #define VF_DEVICE_STATUS 0xAA #define VF_TRANS_PENDING_MASK 0x20 /** * i40e_quiesce_vf_pci * @vf: pointer to the VF structure * * Wait for VF PCI transactions to be cleared after reset. Returns -EIO * if the transactions never clear. **/ static int i40e_quiesce_vf_pci(struct i40e_vf *vf) { struct i40e_pf *pf = vf->pf; struct i40e_hw *hw = &pf->hw; int vf_abs_id, i; u32 reg; vf_abs_id = vf->vf_id + hw->func_caps.vf_base_id; wr32(hw, I40E_PF_PCI_CIAA, VF_DEVICE_STATUS | (vf_abs_id << I40E_PF_PCI_CIAA_VF_NUM_SHIFT)); for (i = 0; i < 100; i++) { reg = rd32(hw, I40E_PF_PCI_CIAD); if ((reg & VF_TRANS_PENDING_MASK) == 0) return 0; udelay(1); } return -EIO; } /** * __i40e_getnum_vf_vsi_vlan_filters * @vsi: pointer to the vsi * * called to get the number of VLANs offloaded on this VF **/ static int __i40e_getnum_vf_vsi_vlan_filters(struct i40e_vsi *vsi) { struct i40e_mac_filter *f; u16 num_vlans = 0, bkt; hash_for_each(vsi->mac_filter_hash, bkt, f, hlist) { if (f->vlan >= 0 && f->vlan <= I40E_MAX_VLANID) num_vlans++; } return num_vlans; } /** * i40e_getnum_vf_vsi_vlan_filters * @vsi: pointer to the vsi * * wrapper for __i40e_getnum_vf_vsi_vlan_filters() with spinlock held **/ static int i40e_getnum_vf_vsi_vlan_filters(struct i40e_vsi *vsi) { int num_vlans; spin_lock_bh(&vsi->mac_filter_hash_lock); num_vlans = __i40e_getnum_vf_vsi_vlan_filters(vsi); spin_unlock_bh(&vsi->mac_filter_hash_lock); return num_vlans; } /** * i40e_get_vlan_list_sync * @vsi: pointer to the VSI * @num_vlans: number of VLANs in mac_filter_hash, returned to caller * @vlan_list: list of VLANs present in mac_filter_hash, returned to caller. * This array is allocated here, but has to be freed in caller. * * Called to get number of VLANs and VLAN list present in mac_filter_hash. **/ static void i40e_get_vlan_list_sync(struct i40e_vsi *vsi, u16 *num_vlans, s16 **vlan_list) { struct i40e_mac_filter *f; int i = 0; int bkt; spin_lock_bh(&vsi->mac_filter_hash_lock); *num_vlans = __i40e_getnum_vf_vsi_vlan_filters(vsi); *vlan_list = kcalloc(*num_vlans, sizeof(**vlan_list), GFP_ATOMIC); if (!(*vlan_list)) goto err; hash_for_each(vsi->mac_filter_hash, bkt, f, hlist) { if (f->vlan < 0 || f->vlan > I40E_MAX_VLANID) continue; (*vlan_list)[i++] = f->vlan; } err: spin_unlock_bh(&vsi->mac_filter_hash_lock); } /** * i40e_set_vsi_promisc * @vf: pointer to the VF struct * @seid: VSI number * @multi_enable: set MAC L2 layer multicast promiscuous enable/disable * for a given VLAN * @unicast_enable: set MAC L2 layer unicast promiscuous enable/disable * for a given VLAN * @vl: List of VLANs - apply filter for given VLANs * @num_vlans: Number of elements in @vl **/ static int i40e_set_vsi_promisc(struct i40e_vf *vf, u16 seid, bool multi_enable, bool unicast_enable, s16 *vl, u16 num_vlans) { struct i40e_pf *pf = vf->pf; struct i40e_hw *hw = &pf->hw; int aq_ret, aq_tmp = 0; int i; /* No VLAN to set promisc on, set on VSI */ if (!num_vlans || !vl) { aq_ret = i40e_aq_set_vsi_multicast_promiscuous(hw, seid, multi_enable, NULL); if (aq_ret) { int aq_err = pf->hw.aq.asq_last_status; dev_err(&pf->pdev->dev, "VF %d failed to set multicast promiscuous mode err %pe aq_err %s\n", vf->vf_id, ERR_PTR(aq_ret), i40e_aq_str(&pf->hw, aq_err)); return aq_ret; } aq_ret = i40e_aq_set_vsi_unicast_promiscuous(hw, seid, unicast_enable, NULL, true); if (aq_ret) { int aq_err = pf->hw.aq.asq_last_status; dev_err(&pf->pdev->dev, "VF %d failed to set unicast promiscuous mode err %pe aq_err %s\n", vf->vf_id, ERR_PTR(aq_ret), i40e_aq_str(&pf->hw, aq_err)); } return aq_ret; } for (i = 0; i < num_vlans; i++) { aq_ret = i40e_aq_set_vsi_mc_promisc_on_vlan(hw, seid, multi_enable, vl[i], NULL); if (aq_ret) { int aq_err = pf->hw.aq.asq_last_status; dev_err(&pf->pdev->dev, "VF %d failed to set multicast promiscuous mode err %pe aq_err %s\n", vf->vf_id, ERR_PTR(aq_ret), i40e_aq_str(&pf->hw, aq_err)); if (!aq_tmp) aq_tmp = aq_ret; } aq_ret = i40e_aq_set_vsi_uc_promisc_on_vlan(hw, seid, unicast_enable, vl[i], NULL); if (aq_ret) { int aq_err = pf->hw.aq.asq_last_status; dev_err(&pf->pdev->dev, "VF %d failed to set unicast promiscuous mode err %pe aq_err %s\n", vf->vf_id, ERR_PTR(aq_ret), i40e_aq_str(&pf->hw, aq_err)); if (!aq_tmp) aq_tmp = aq_ret; } } if (aq_tmp) aq_ret = aq_tmp; return aq_ret; } /** * i40e_config_vf_promiscuous_mode * @vf: pointer to the VF info * @vsi_id: VSI id * @allmulti: set MAC L2 layer multicast promiscuous enable/disable * @alluni: set MAC L2 layer unicast promiscuous enable/disable * * Called from the VF to configure the promiscuous mode of * VF vsis and from the VF reset path to reset promiscuous mode. **/ static int i40e_config_vf_promiscuous_mode(struct i40e_vf *vf, u16 vsi_id, bool allmulti, bool alluni) { struct i40e_pf *pf = vf->pf; struct i40e_vsi *vsi; int aq_ret = 0; u16 num_vlans; s16 *vl; vsi = i40e_find_vsi_from_id(pf, vsi_id); if (!i40e_vc_isvalid_vsi_id(vf, vsi_id) || !vsi) return -EINVAL; if (vf->port_vlan_id) { aq_ret = i40e_set_vsi_promisc(vf, vsi->seid, allmulti, alluni, &vf->port_vlan_id, 1); return aq_ret; } else if (i40e_getnum_vf_vsi_vlan_filters(vsi)) { i40e_get_vlan_list_sync(vsi, &num_vlans, &vl); if (!vl) return -ENOMEM; aq_ret = i40e_set_vsi_promisc(vf, vsi->seid, allmulti, alluni, vl, num_vlans); kfree(vl); return aq_ret; } /* no VLANs to set on, set on VSI */ aq_ret = i40e_set_vsi_promisc(vf, vsi->seid, allmulti, alluni, NULL, 0); return aq_ret; } /** * i40e_sync_vfr_reset * @hw: pointer to hw struct * @vf_id: VF identifier * * Before trigger hardware reset, we need to know if no other process has * reserved the hardware for any reset operations. This check is done by * examining the status of the RSTAT1 register used to signal the reset. **/ static int i40e_sync_vfr_reset(struct i40e_hw *hw, int vf_id) { u32 reg; int i; for (i = 0; i < I40E_VFR_WAIT_COUNT; i++) { reg = rd32(hw, I40E_VFINT_ICR0_ENA(vf_id)) & I40E_VFINT_ICR0_ADMINQ_MASK; if (reg) return 0; usleep_range(100, 200); } return -EAGAIN; } /** * i40e_trigger_vf_reset * @vf: pointer to the VF structure * @flr: VFLR was issued or not * * Trigger hardware to start a reset for a particular VF. Expects the caller * to wait the proper amount of time to allow hardware to reset the VF before * it cleans up and restores VF functionality. **/ static void i40e_trigger_vf_reset(struct i40e_vf *vf, bool flr) { struct i40e_pf *pf = vf->pf; struct i40e_hw *hw = &pf->hw; u32 reg, reg_idx, bit_idx; bool vf_active; u32 radq; /* warn the VF */ vf_active = test_and_clear_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states); /* Disable VF's configuration API during reset. The flag is re-enabled * in i40e_alloc_vf_res(), when it's safe again to access VF's VSI. * It's normally disabled in i40e_free_vf_res(), but it's safer * to do it earlier to give some time to finish to any VF config * functions that may still be running at this point. */ clear_bit(I40E_VF_STATE_INIT, &vf->vf_states); /* In the case of a VFLR, the HW has already reset the VF and we * just need to clean up, so don't hit the VFRTRIG register. */ if (!flr) { /* Sync VFR reset before trigger next one */ radq = rd32(hw, I40E_VFINT_ICR0_ENA(vf->vf_id)) & I40E_VFINT_ICR0_ADMINQ_MASK; if (vf_active && !radq) /* waiting for finish reset by virtual driver */ if (i40e_sync_vfr_reset(hw, vf->vf_id)) dev_info(&pf->pdev->dev, "Reset VF %d never finished\n", vf->vf_id); /* Reset VF using VPGEN_VFRTRIG reg. It is also setting * in progress state in rstat1 register. */ reg = rd32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id)); reg |= I40E_VPGEN_VFRTRIG_VFSWR_MASK; wr32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id), reg); i40e_flush(hw); } /* clear the VFLR bit in GLGEN_VFLRSTAT */ reg_idx = (hw->func_caps.vf_base_id + vf->vf_id) / 32; bit_idx = (hw->func_caps.vf_base_id + vf->vf_id) % 32; wr32(hw, I40E_GLGEN_VFLRSTAT(reg_idx), BIT(bit_idx)); i40e_flush(hw); if (i40e_quiesce_vf_pci(vf)) dev_err(&pf->pdev->dev, "VF %d PCI transactions stuck\n", vf->vf_id); } /** * i40e_cleanup_reset_vf * @vf: pointer to the VF structure * * Cleanup a VF after the hardware reset is finished. Expects the caller to * have verified whether the reset is finished properly, and ensure the * minimum amount of wait time has passed. **/ static void i40e_cleanup_reset_vf(struct i40e_vf *vf) { struct i40e_pf *pf = vf->pf; struct i40e_hw *hw = &pf->hw; u32 reg; /* disable promisc modes in case they were enabled */ i40e_config_vf_promiscuous_mode(vf, vf->lan_vsi_id, false, false); /* free VF resources to begin resetting the VSI state */ i40e_free_vf_res(vf); /* Enable hardware by clearing the reset bit in the VPGEN_VFRTRIG reg. * By doing this we allow HW to access VF memory at any point. If we * did it any sooner, HW could access memory while it was being freed * in i40e_free_vf_res(), causing an IOMMU fault. * * On the other hand, this needs to be done ASAP, because the VF driver * is waiting for this to happen and may report a timeout. It's * harmless, but it gets logged into Guest OS kernel log, so best avoid * it. */ reg = rd32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id)); reg &= ~I40E_VPGEN_VFRTRIG_VFSWR_MASK; wr32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id), reg); /* reallocate VF resources to finish resetting the VSI state */ if (!i40e_alloc_vf_res(vf)) { int abs_vf_id = vf->vf_id + hw->func_caps.vf_base_id; i40e_enable_vf_mappings(vf); set_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states); clear_bit(I40E_VF_STATE_DISABLED, &vf->vf_states); /* Do not notify the client during VF init */ if (!test_and_clear_bit(I40E_VF_STATE_PRE_ENABLE, &vf->vf_states)) i40e_notify_client_of_vf_reset(pf, abs_vf_id); vf->num_vlan = 0; } /* Tell the VF driver the reset is done. This needs to be done only * after VF has been fully initialized, because the VF driver may * request resources immediately after setting this flag. */ wr32(hw, I40E_VFGEN_RSTAT1(vf->vf_id), VIRTCHNL_VFR_VFACTIVE); } /** * i40e_reset_vf * @vf: pointer to the VF structure * @flr: VFLR was issued or not * * Returns true if the VF is in reset, resets successfully, or resets * are disabled and false otherwise. **/ bool i40e_reset_vf(struct i40e_vf *vf, bool flr) { struct i40e_pf *pf = vf->pf; struct i40e_hw *hw = &pf->hw; bool rsd = false; u32 reg; int i; if (test_bit(__I40E_VF_RESETS_DISABLED, pf->state)) return true; /* Bail out if VFs are disabled. */ if (test_bit(__I40E_VF_DISABLE, pf->state)) return true; /* If VF is being reset already we don't need to continue. */ if (test_and_set_bit(I40E_VF_STATE_RESETTING, &vf->vf_states)) return true; i40e_trigger_vf_reset(vf, flr); /* poll VPGEN_VFRSTAT reg to make sure * that reset is complete */ for (i = 0; i < 10; i++) { /* VF reset requires driver to first reset the VF and then * poll the status register to make sure that the reset * completed successfully. Due to internal HW FIFO flushes, * we must wait 10ms before the register will be valid. */ usleep_range(10000, 20000); reg = rd32(hw, I40E_VPGEN_VFRSTAT(vf->vf_id)); if (reg & I40E_VPGEN_VFRSTAT_VFRD_MASK) { rsd = true; break; } } if (flr) usleep_range(10000, 20000); if (!rsd) dev_err(&pf->pdev->dev, "VF reset check timeout on VF %d\n", vf->vf_id); usleep_range(10000, 20000); /* On initial reset, we don't have any queues to disable */ if (vf->lan_vsi_idx != 0) i40e_vsi_stop_rings(pf->vsi[vf->lan_vsi_idx]); i40e_cleanup_reset_vf(vf); i40e_flush(hw); usleep_range(20000, 40000); clear_bit(I40E_VF_STATE_RESETTING, &vf->vf_states); return true; } /** * i40e_reset_all_vfs * @pf: pointer to the PF structure * @flr: VFLR was issued or not * * Reset all allocated VFs in one go. First, tell the hardware to reset each * VF, then do all the waiting in one chunk, and finally finish restoring each * VF after the wait. This is useful during PF routines which need to reset * all VFs, as otherwise it must perform these resets in a serialized fashion. * * Returns true if any VFs were reset, and false otherwise. **/ bool i40e_reset_all_vfs(struct i40e_pf *pf, bool flr) { struct i40e_hw *hw = &pf->hw; struct i40e_vf *vf; int i, v; u32 reg; /* If we don't have any VFs, then there is nothing to reset */ if (!pf->num_alloc_vfs) return false; /* If VFs have been disabled, there is no need to reset */ if (test_and_set_bit(__I40E_VF_DISABLE, pf->state)) return false; /* Begin reset on all VFs at once */ for (v = 0; v < pf->num_alloc_vfs; v++) { vf = &pf->vf[v]; /* If VF is being reset no need to trigger reset again */ if (!test_bit(I40E_VF_STATE_RESETTING, &vf->vf_states)) i40e_trigger_vf_reset(&pf->vf[v], flr); } /* HW requires some time to make sure it can flush the FIFO for a VF * when it resets it. Poll the VPGEN_VFRSTAT register for each VF in * sequence to make sure that it has completed. We'll keep track of * the VFs using a simple iterator that increments once that VF has * finished resetting. */ for (i = 0, v = 0; i < 10 && v < pf->num_alloc_vfs; i++) { usleep_range(10000, 20000); /* Check each VF in sequence, beginning with the VF to fail * the previous check. */ while (v < pf->num_alloc_vfs) { vf = &pf->vf[v]; if (!test_bit(I40E_VF_STATE_RESETTING, &vf->vf_states)) { reg = rd32(hw, I40E_VPGEN_VFRSTAT(vf->vf_id)); if (!(reg & I40E_VPGEN_VFRSTAT_VFRD_MASK)) break; } /* If the current VF has finished resetting, move on * to the next VF in sequence. */ v++; } } if (flr) usleep_range(10000, 20000); /* Display a warning if at least one VF didn't manage to reset in * time, but continue on with the operation. */ if (v < pf->num_alloc_vfs) dev_err(&pf->pdev->dev, "VF reset check timeout on VF %d\n", pf->vf[v].vf_id); usleep_range(10000, 20000); /* Begin disabling all the rings associated with VFs, but do not wait * between each VF. */ for (v = 0; v < pf->num_alloc_vfs; v++) { /* On initial reset, we don't have any queues to disable */ if (pf->vf[v].lan_vsi_idx == 0) continue; /* If VF is reset in another thread just continue */ if (test_bit(I40E_VF_STATE_RESETTING, &vf->vf_states)) continue; i40e_vsi_stop_rings_no_wait(pf->vsi[pf->vf[v].lan_vsi_idx]); } /* Now that we've notified HW to disable all of the VF rings, wait * until they finish. */ for (v = 0; v < pf->num_alloc_vfs; v++) { /* On initial reset, we don't have any queues to disable */ if (pf->vf[v].lan_vsi_idx == 0) continue; /* If VF is reset in another thread just continue */ if (test_bit(I40E_VF_STATE_RESETTING, &vf->vf_states)) continue; i40e_vsi_wait_queues_disabled(pf->vsi[pf->vf[v].lan_vsi_idx]); } /* Hw may need up to 50ms to finish disabling the RX queues. We * minimize the wait by delaying only once for all VFs. */ mdelay(50); /* Finish the reset on each VF */ for (v = 0; v < pf->num_alloc_vfs; v++) { /* If VF is reset in another thread just continue */ if (test_bit(I40E_VF_STATE_RESETTING, &vf->vf_states)) continue; i40e_cleanup_reset_vf(&pf->vf[v]); } i40e_flush(hw); usleep_range(20000, 40000); clear_bit(__I40E_VF_DISABLE, pf->state); return true; } /** * i40e_free_vfs * @pf: pointer to the PF structure * * free VF resources **/ void i40e_free_vfs(struct i40e_pf *pf) { struct i40e_hw *hw = &pf->hw; u32 reg_idx, bit_idx; int i, tmp, vf_id; if (!pf->vf) return; set_bit(__I40E_VFS_RELEASING, pf->state); while (test_and_set_bit(__I40E_VF_DISABLE, pf->state)) usleep_range(1000, 2000); i40e_notify_client_of_vf_enable(pf, 0); /* Disable IOV before freeing resources. This lets any VF drivers * running in the host get themselves cleaned up before we yank * the carpet out from underneath their feet. */ if (!pci_vfs_assigned(pf->pdev)) pci_disable_sriov(pf->pdev); else dev_warn(&pf->pdev->dev, "VFs are assigned - not disabling SR-IOV\n"); /* Amortize wait time by stopping all VFs at the same time */ for (i = 0; i < pf->num_alloc_vfs; i++) { if (test_bit(I40E_VF_STATE_INIT, &pf->vf[i].vf_states)) continue; i40e_vsi_stop_rings_no_wait(pf->vsi[pf->vf[i].lan_vsi_idx]); } for (i = 0; i < pf->num_alloc_vfs; i++) { if (test_bit(I40E_VF_STATE_INIT, &pf->vf[i].vf_states)) continue; i40e_vsi_wait_queues_disabled(pf->vsi[pf->vf[i].lan_vsi_idx]); } /* free up VF resources */ tmp = pf->num_alloc_vfs; pf->num_alloc_vfs = 0; for (i = 0; i < tmp; i++) { if (test_bit(I40E_VF_STATE_INIT, &pf->vf[i].vf_states)) i40e_free_vf_res(&pf->vf[i]); /* disable qp mappings */ i40e_disable_vf_mappings(&pf->vf[i]); } kfree(pf->vf); pf->vf = NULL; /* This check is for when the driver is unloaded while VFs are * assigned. Setting the number of VFs to 0 through sysfs is caught * before this function ever gets called. */ if (!pci_vfs_assigned(pf->pdev)) { /* Acknowledge VFLR for all VFS. Without this, VFs will fail to * work correctly when SR-IOV gets re-enabled. */ for (vf_id = 0; vf_id < tmp; vf_id++) { reg_idx = (hw->func_caps.vf_base_id + vf_id) / 32; bit_idx = (hw->func_caps.vf_base_id + vf_id) % 32; wr32(hw, I40E_GLGEN_VFLRSTAT(reg_idx), BIT(bit_idx)); } } clear_bit(__I40E_VF_DISABLE, pf->state); clear_bit(__I40E_VFS_RELEASING, pf->state); } #ifdef CONFIG_PCI_IOV /** * i40e_alloc_vfs * @pf: pointer to the PF structure * @num_alloc_vfs: number of VFs to allocate * * allocate VF resources **/ int i40e_alloc_vfs(struct i40e_pf *pf, u16 num_alloc_vfs) { struct i40e_vf *vfs; int i, ret = 0; /* Disable interrupt 0 so we don't try to handle the VFLR. */ i40e_irq_dynamic_disable_icr0(pf); /* Check to see if we're just allocating resources for extant VFs */ if (pci_num_vf(pf->pdev) != num_alloc_vfs) { ret = pci_enable_sriov(pf->pdev, num_alloc_vfs); if (ret) { pf->flags &= ~I40E_FLAG_VEB_MODE_ENABLED; pf->num_alloc_vfs = 0; goto err_iov; } } /* allocate memory */ vfs = kcalloc(num_alloc_vfs, sizeof(struct i40e_vf), GFP_KERNEL); if (!vfs) { ret = -ENOMEM; goto err_alloc; } pf->vf = vfs; /* apply default profile */ for (i = 0; i < num_alloc_vfs; i++) { vfs[i].pf = pf; vfs[i].parent_type = I40E_SWITCH_ELEMENT_TYPE_VEB; vfs[i].vf_id = i; /* assign default capabilities */ set_bit(I40E_VIRTCHNL_VF_CAP_L2, &vfs[i].vf_caps); vfs[i].spoofchk = true; set_bit(I40E_VF_STATE_PRE_ENABLE, &vfs[i].vf_states); } pf->num_alloc_vfs = num_alloc_vfs; /* VF resources get allocated during reset */ i40e_reset_all_vfs(pf, false); i40e_notify_client_of_vf_enable(pf, num_alloc_vfs); err_alloc: if (ret) i40e_free_vfs(pf); err_iov: /* Re-enable interrupt 0. */ i40e_irq_dynamic_enable_icr0(pf); return ret; } #endif /** * i40e_pci_sriov_enable * @pdev: pointer to a pci_dev structure * @num_vfs: number of VFs to allocate * * Enable or change the number of VFs **/ static int i40e_pci_sriov_enable(struct pci_dev *pdev, int num_vfs) { #ifdef CONFIG_PCI_IOV struct i40e_pf *pf = pci_get_drvdata(pdev); int pre_existing_vfs = pci_num_vf(pdev); int err = 0; if (test_bit(__I40E_TESTING, pf->state)) { dev_warn(&pdev->dev, "Cannot enable SR-IOV virtual functions while the device is undergoing diagnostic testing\n"); err = -EPERM; goto err_out; } if (pre_existing_vfs && pre_existing_vfs != num_vfs) i40e_free_vfs(pf); else if (pre_existing_vfs && pre_existing_vfs == num_vfs) goto out; if (num_vfs > pf->num_req_vfs) { dev_warn(&pdev->dev, "Unable to enable %d VFs. Limited to %d VFs due to device resource constraints.\n", num_vfs, pf->num_req_vfs); err = -EPERM; goto err_out; } dev_info(&pdev->dev, "Allocating %d VFs.\n", num_vfs); err = i40e_alloc_vfs(pf, num_vfs); if (err) { dev_warn(&pdev->dev, "Failed to enable SR-IOV: %d\n", err); goto err_out; } out: return num_vfs; err_out: return err; #endif return 0; } /** * i40e_pci_sriov_configure * @pdev: pointer to a pci_dev structure * @num_vfs: number of VFs to allocate * * Enable or change the number of VFs. Called when the user updates the number * of VFs in sysfs. **/ int i40e_pci_sriov_configure(struct pci_dev *pdev, int num_vfs) { struct i40e_pf *pf = pci_get_drvdata(pdev); int ret = 0; if (test_and_set_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state)) { dev_warn(&pdev->dev, "Unable to configure VFs, other operation is pending.\n"); return -EAGAIN; } if (num_vfs) { if (!(pf->flags & I40E_FLAG_VEB_MODE_ENABLED)) { pf->flags |= I40E_FLAG_VEB_MODE_ENABLED; i40e_do_reset_safe(pf, I40E_PF_RESET_AND_REBUILD_FLAG); } ret = i40e_pci_sriov_enable(pdev, num_vfs); goto sriov_configure_out; } if (!pci_vfs_assigned(pf->pdev)) { i40e_free_vfs(pf); pf->flags &= ~I40E_FLAG_VEB_MODE_ENABLED; i40e_do_reset_safe(pf, I40E_PF_RESET_AND_REBUILD_FLAG); } else { dev_warn(&pdev->dev, "Unable to free VFs because some are assigned to VMs.\n"); ret = -EINVAL; goto sriov_configure_out; } sriov_configure_out: clear_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state); return ret; } /***********************virtual channel routines******************/ /** * i40e_vc_send_msg_to_vf * @vf: pointer to the VF info * @v_opcode: virtual channel opcode * @v_retval: virtual channel return value * @msg: pointer to the msg buffer * @msglen: msg length * * send msg to VF **/ static int i40e_vc_send_msg_to_vf(struct i40e_vf *vf, u32 v_opcode, u32 v_retval, u8 *msg, u16 msglen) { struct i40e_pf *pf; struct i40e_hw *hw; int abs_vf_id; int aq_ret; /* validate the request */ if (!vf || vf->vf_id >= vf->pf->num_alloc_vfs) return -EINVAL; pf = vf->pf; hw = &pf->hw; abs_vf_id = vf->vf_id + hw->func_caps.vf_base_id; aq_ret = i40e_aq_send_msg_to_vf(hw, abs_vf_id, v_opcode, v_retval, msg, msglen, NULL); if (aq_ret) { dev_info(&pf->pdev->dev, "Unable to send the message to VF %d aq_err %d\n", vf->vf_id, pf->hw.aq.asq_last_status); return -EIO; } return 0; } /** * i40e_vc_send_resp_to_vf * @vf: pointer to the VF info * @opcode: operation code * @retval: return value * * send resp msg to VF **/ static int i40e_vc_send_resp_to_vf(struct i40e_vf *vf, enum virtchnl_ops opcode, int retval) { return i40e_vc_send_msg_to_vf(vf, opcode, retval, NULL, 0); } /** * i40e_sync_vf_state * @vf: pointer to the VF info * @state: VF state * * Called from a VF message to synchronize the service with a potential * VF reset state **/ static bool i40e_sync_vf_state(struct i40e_vf *vf, enum i40e_vf_states state) { int i; /* When handling some messages, it needs VF state to be set. * It is possible that this flag is cleared during VF reset, * so there is a need to wait until the end of the reset to * handle the request message correctly. */ for (i = 0; i < I40E_VF_STATE_WAIT_COUNT; i++) { if (test_bit(state, &vf->vf_states)) return true; usleep_range(10000, 20000); } return test_bit(state, &vf->vf_states); } /** * i40e_vc_get_version_msg * @vf: pointer to the VF info * @msg: pointer to the msg buffer * * called from the VF to request the API version used by the PF **/ static int i40e_vc_get_version_msg(struct i40e_vf *vf, u8 *msg) { struct virtchnl_version_info info = { VIRTCHNL_VERSION_MAJOR, VIRTCHNL_VERSION_MINOR }; vf->vf_ver = *(struct virtchnl_version_info *)msg; /* VFs running the 1.0 API expect to get 1.0 back or they will cry. */ if (VF_IS_V10(&vf->vf_ver)) info.minor = VIRTCHNL_VERSION_MINOR_NO_VF_CAPS; return i40e_vc_send_msg_to_vf(vf, VIRTCHNL_OP_VERSION, 0, (u8 *)&info, sizeof(struct virtchnl_version_info)); } /** * i40e_del_qch - delete all the additional VSIs created as a part of ADq * @vf: pointer to VF structure **/ static void i40e_del_qch(struct i40e_vf *vf) { struct i40e_pf *pf = vf->pf; int i; /* first element in the array belongs to primary VF VSI and we shouldn't * delete it. We should however delete the rest of the VSIs created */ for (i = 1; i < vf->num_tc; i++) { if (vf->ch[i].vsi_idx) { i40e_vsi_release(pf->vsi[vf->ch[i].vsi_idx]); vf->ch[i].vsi_idx = 0; vf->ch[i].vsi_id = 0; } } } /** * i40e_vc_get_max_frame_size * @vf: pointer to the VF * * Max frame size is determined based on the current port's max frame size and * whether a port VLAN is configured on this VF. The VF is not aware whether * it's in a port VLAN so the PF needs to account for this in max frame size * checks and sending the max frame size to the VF. **/ static u16 i40e_vc_get_max_frame_size(struct i40e_vf *vf) { u16 max_frame_size = vf->pf->hw.phy.link_info.max_frame_size; if (vf->port_vlan_id) max_frame_size -= VLAN_HLEN; return max_frame_size; } /** * i40e_vc_get_vf_resources_msg * @vf: pointer to the VF info * @msg: pointer to the msg buffer * * called from the VF to request its resources **/ static int i40e_vc_get_vf_resources_msg(struct i40e_vf *vf, u8 *msg) { struct virtchnl_vf_resource *vfres = NULL; struct i40e_pf *pf = vf->pf; struct i40e_vsi *vsi; int num_vsis = 1; int aq_ret = 0; size_t len = 0; int ret; if (!i40e_sync_vf_state(vf, I40E_VF_STATE_INIT)) { aq_ret = -EINVAL; goto err; } len = virtchnl_struct_size(vfres, vsi_res, num_vsis); vfres = kzalloc(len, GFP_KERNEL); if (!vfres) { aq_ret = -ENOMEM; len = 0; goto err; } if (VF_IS_V11(&vf->vf_ver)) vf->driver_caps = *(u32 *)msg; else vf->driver_caps = VIRTCHNL_VF_OFFLOAD_L2 | VIRTCHNL_VF_OFFLOAD_RSS_REG | VIRTCHNL_VF_OFFLOAD_VLAN; vfres->vf_cap_flags = VIRTCHNL_VF_OFFLOAD_L2; vfres->vf_cap_flags |= VIRTCHNL_VF_CAP_ADV_LINK_SPEED; vsi = pf->vsi[vf->lan_vsi_idx]; if (!vsi->info.pvid) vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_VLAN; if (i40e_vf_client_capable(pf, vf->vf_id) && (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RDMA)) { vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RDMA; set_bit(I40E_VF_STATE_RDMAENA, &vf->vf_states); } else { clear_bit(I40E_VF_STATE_RDMAENA, &vf->vf_states); } if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RSS_PF) { vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_PF; } else { if ((pf->hw_features & I40E_HW_RSS_AQ_CAPABLE) && (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RSS_AQ)) vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_AQ; else vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_REG; } if (pf->hw_features & I40E_HW_MULTIPLE_TCP_UDP_RSS_PCTYPE) { if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2) vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2; } if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_ENCAP) vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_ENCAP; if ((pf->hw_features & I40E_HW_OUTER_UDP_CSUM_CAPABLE) && (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM)) vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM; if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RX_POLLING) { if (pf->flags & I40E_FLAG_MFP_ENABLED) { dev_err(&pf->pdev->dev, "VF %d requested polling mode: this feature is supported only when the device is running in single function per port (SFP) mode\n", vf->vf_id); aq_ret = -EINVAL; goto err; } vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RX_POLLING; } if (pf->hw_features & I40E_HW_WB_ON_ITR_CAPABLE) { if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_WB_ON_ITR) vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_WB_ON_ITR; } if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_REQ_QUEUES) vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_REQ_QUEUES; if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_ADQ) vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_ADQ; vfres->num_vsis = num_vsis; vfres->num_queue_pairs = vf->num_queue_pairs; vfres->max_vectors = pf->hw.func_caps.num_msix_vectors_vf; vfres->rss_key_size = I40E_HKEY_ARRAY_SIZE; vfres->rss_lut_size = I40E_VF_HLUT_ARRAY_SIZE; vfres->max_mtu = i40e_vc_get_max_frame_size(vf); if (vf->lan_vsi_idx) { vfres->vsi_res[0].vsi_id = vf->lan_vsi_id; vfres->vsi_res[0].vsi_type = VIRTCHNL_VSI_SRIOV; vfres->vsi_res[0].num_queue_pairs = vsi->alloc_queue_pairs; /* VFs only use TC 0 */ vfres->vsi_res[0].qset_handle = le16_to_cpu(vsi->info.qs_handle[0]); if (!(vf->driver_caps & VIRTCHNL_VF_OFFLOAD_USO) && !vf->pf_set_mac) { i40e_del_mac_filter(vsi, vf->default_lan_addr.addr); eth_zero_addr(vf->default_lan_addr.addr); } ether_addr_copy(vfres->vsi_res[0].default_mac_addr, vf->default_lan_addr.addr); } set_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states); err: /* send the response back to the VF */ ret = i40e_vc_send_msg_to_vf(vf, VIRTCHNL_OP_GET_VF_RESOURCES, aq_ret, (u8 *)vfres, len); kfree(vfres); return ret; } /** * i40e_vc_config_promiscuous_mode_msg * @vf: pointer to the VF info * @msg: pointer to the msg buffer * * called from the VF to configure the promiscuous mode of * VF vsis **/ static int i40e_vc_config_promiscuous_mode_msg(struct i40e_vf *vf, u8 *msg) { struct virtchnl_promisc_info *info = (struct virtchnl_promisc_info *)msg; struct i40e_pf *pf = vf->pf; bool allmulti = false; bool alluni = false; int aq_ret = 0; if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) { aq_ret = -EINVAL; goto err_out; } if (!test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps)) { dev_err(&pf->pdev->dev, "Unprivileged VF %d is attempting to configure promiscuous mode\n", vf->vf_id); /* Lie to the VF on purpose, because this is an error we can * ignore. Unprivileged VF is not a virtual channel error. */ aq_ret = 0; goto err_out; } if (info->flags > I40E_MAX_VF_PROMISC_FLAGS) { aq_ret = -EINVAL; goto err_out; } if (!i40e_vc_isvalid_vsi_id(vf, info->vsi_id)) { aq_ret = -EINVAL; goto err_out; } /* Multicast promiscuous handling*/ if (info->flags & FLAG_VF_MULTICAST_PROMISC) allmulti = true; if (info->flags & FLAG_VF_UNICAST_PROMISC) alluni = true; aq_ret = i40e_config_vf_promiscuous_mode(vf, info->vsi_id, allmulti, alluni); if (aq_ret) goto err_out; if (allmulti) { if (!test_and_set_bit(I40E_VF_STATE_MC_PROMISC, &vf->vf_states)) dev_info(&pf->pdev->dev, "VF %d successfully set multicast promiscuous mode\n", vf->vf_id); } else if (test_and_clear_bit(I40E_VF_STATE_MC_PROMISC, &vf->vf_states)) dev_info(&pf->pdev->dev, "VF %d successfully unset multicast promiscuous mode\n", vf->vf_id); if (alluni) { if (!test_and_set_bit(I40E_VF_STATE_UC_PROMISC, &vf->vf_states)) dev_info(&pf->pdev->dev, "VF %d successfully set unicast promiscuous mode\n", vf->vf_id); } else if (test_and_clear_bit(I40E_VF_STATE_UC_PROMISC, &vf->vf_states)) dev_info(&pf->pdev->dev, "VF %d successfully unset unicast promiscuous mode\n", vf->vf_id); err_out: /* send the response to the VF */ return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE, aq_ret); } /** * i40e_vc_config_queues_msg * @vf: pointer to the VF info * @msg: pointer to the msg buffer * * called from the VF to configure the rx/tx * queues **/ static int i40e_vc_config_queues_msg(struct i40e_vf *vf, u8 *msg) { struct virtchnl_vsi_queue_config_info *qci = (struct virtchnl_vsi_queue_config_info *)msg; struct virtchnl_queue_pair_info *qpi; u16 vsi_id, vsi_queue_id = 0; struct i40e_pf *pf = vf->pf; int i, j = 0, idx = 0; struct i40e_vsi *vsi; u16 num_qps_all = 0; int aq_ret = 0; if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) { aq_ret = -EINVAL; goto error_param; } if (!i40e_vc_isvalid_vsi_id(vf, qci->vsi_id)) { aq_ret = -EINVAL; goto error_param; } if (qci->num_queue_pairs > I40E_MAX_VF_QUEUES) { aq_ret = -EINVAL; goto error_param; } if (vf->adq_enabled) { for (i = 0; i < vf->num_tc; i++) num_qps_all += vf->ch[i].num_qps; if (num_qps_all != qci->num_queue_pairs) { aq_ret = -EINVAL; goto error_param; } } vsi_id = qci->vsi_id; for (i = 0; i < qci->num_queue_pairs; i++) { qpi = &qci->qpair[i]; if (!vf->adq_enabled) { if (!i40e_vc_isvalid_queue_id(vf, vsi_id, qpi->txq.queue_id)) { aq_ret = -EINVAL; goto error_param; } vsi_queue_id = qpi->txq.queue_id; if (qpi->txq.vsi_id != qci->vsi_id || qpi->rxq.vsi_id != qci->vsi_id || qpi->rxq.queue_id != vsi_queue_id) { aq_ret = -EINVAL; goto error_param; } } if (vf->adq_enabled) { if (idx >= ARRAY_SIZE(vf->ch)) { aq_ret = -ENODEV; goto error_param; } vsi_id = vf->ch[idx].vsi_id; } if (i40e_config_vsi_rx_queue(vf, vsi_id, vsi_queue_id, &qpi->rxq) || i40e_config_vsi_tx_queue(vf, vsi_id, vsi_queue_id, &qpi->txq)) { aq_ret = -EINVAL; goto error_param; } /* For ADq there can be up to 4 VSIs with max 4 queues each. * VF does not know about these additional VSIs and all * it cares is about its own queues. PF configures these queues * to its appropriate VSIs based on TC mapping */ if (vf->adq_enabled) { if (idx >= ARRAY_SIZE(vf->ch)) { aq_ret = -ENODEV; goto error_param; } if (j == (vf->ch[idx].num_qps - 1)) { idx++; j = 0; /* resetting the queue count */ vsi_queue_id = 0; } else { j++; vsi_queue_id++; } } } /* set vsi num_queue_pairs in use to num configured by VF */ if (!vf->adq_enabled) { pf->vsi[vf->lan_vsi_idx]->num_queue_pairs = qci->num_queue_pairs; } else { for (i = 0; i < vf->num_tc; i++) { vsi = pf->vsi[vf->ch[i].vsi_idx]; vsi->num_queue_pairs = vf->ch[i].num_qps; if (i40e_update_adq_vsi_queues(vsi, i)) { aq_ret = -EIO; goto error_param; } } } error_param: /* send the response to the VF */ return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_CONFIG_VSI_QUEUES, aq_ret); } /** * i40e_validate_queue_map - check queue map is valid * @vf: the VF structure pointer * @vsi_id: vsi id * @queuemap: Tx or Rx queue map * * check if Tx or Rx queue map is valid **/ static int i40e_validate_queue_map(struct i40e_vf *vf, u16 vsi_id, unsigned long queuemap) { u16 vsi_queue_id, queue_id; for_each_set_bit(vsi_queue_id, &queuemap, I40E_MAX_VSI_QP) { if (vf->adq_enabled) { vsi_id = vf->ch[vsi_queue_id / I40E_MAX_VF_VSI].vsi_id; queue_id = (vsi_queue_id % I40E_DEFAULT_QUEUES_PER_VF); } else { queue_id = vsi_queue_id; } if (!i40e_vc_isvalid_queue_id(vf, vsi_id, queue_id)) return -EINVAL; } return 0; } /** * i40e_vc_config_irq_map_msg * @vf: pointer to the VF info * @msg: pointer to the msg buffer * * called from the VF to configure the irq to * queue map **/ static int i40e_vc_config_irq_map_msg(struct i40e_vf *vf, u8 *msg) { struct virtchnl_irq_map_info *irqmap_info = (struct virtchnl_irq_map_info *)msg; struct virtchnl_vector_map *map; int aq_ret = 0; u16 vsi_id; int i; if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) { aq_ret = -EINVAL; goto error_param; } if (irqmap_info->num_vectors > vf->pf->hw.func_caps.num_msix_vectors_vf) { aq_ret = -EINVAL; goto error_param; } for (i = 0; i < irqmap_info->num_vectors; i++) { map = &irqmap_info->vecmap[i]; /* validate msg params */ if (!i40e_vc_isvalid_vector_id(vf, map->vector_id) || !i40e_vc_isvalid_vsi_id(vf, map->vsi_id)) { aq_ret = -EINVAL; goto error_param; } vsi_id = map->vsi_id; if (i40e_validate_queue_map(vf, vsi_id, map->rxq_map)) { aq_ret = -EINVAL; goto error_param; } if (i40e_validate_queue_map(vf, vsi_id, map->txq_map)) { aq_ret = -EINVAL; goto error_param; } i40e_config_irq_link_list(vf, vsi_id, map); } error_param: /* send the response to the VF */ return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_CONFIG_IRQ_MAP, aq_ret); } /** * i40e_ctrl_vf_tx_rings * @vsi: the SRIOV VSI being configured * @q_map: bit map of the queues to be enabled * @enable: start or stop the queue **/ static int i40e_ctrl_vf_tx_rings(struct i40e_vsi *vsi, unsigned long q_map, bool enable) { struct i40e_pf *pf = vsi->back; int ret = 0; u16 q_id; for_each_set_bit(q_id, &q_map, I40E_MAX_VF_QUEUES) { ret = i40e_control_wait_tx_q(vsi->seid, pf, vsi->base_queue + q_id, false /*is xdp*/, enable); if (ret) break; } return ret; } /** * i40e_ctrl_vf_rx_rings * @vsi: the SRIOV VSI being configured * @q_map: bit map of the queues to be enabled * @enable: start or stop the queue **/ static int i40e_ctrl_vf_rx_rings(struct i40e_vsi *vsi, unsigned long q_map, bool enable) { struct i40e_pf *pf = vsi->back; int ret = 0; u16 q_id; for_each_set_bit(q_id, &q_map, I40E_MAX_VF_QUEUES) { ret = i40e_control_wait_rx_q(pf, vsi->base_queue + q_id, enable); if (ret) break; } return ret; } /** * i40e_vc_validate_vqs_bitmaps - validate Rx/Tx queue bitmaps from VIRTHCHNL * @vqs: virtchnl_queue_select structure containing bitmaps to validate * * Returns true if validation was successful, else false. */ static bool i40e_vc_validate_vqs_bitmaps(struct virtchnl_queue_select *vqs) { if ((!vqs->rx_queues && !vqs->tx_queues) || vqs->rx_queues >= BIT(I40E_MAX_VF_QUEUES) || vqs->tx_queues >= BIT(I40E_MAX_VF_QUEUES)) return false; return true; } /** * i40e_vc_enable_queues_msg * @vf: pointer to the VF info * @msg: pointer to the msg buffer * * called from the VF to enable all or specific queue(s) **/ static int i40e_vc_enable_queues_msg(struct i40e_vf *vf, u8 *msg) { struct virtchnl_queue_select *vqs = (struct virtchnl_queue_select *)msg; struct i40e_pf *pf = vf->pf; int aq_ret = 0; int i; if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) { aq_ret = -EINVAL; goto error_param; } if (!i40e_vc_isvalid_vsi_id(vf, vqs->vsi_id)) { aq_ret = -EINVAL; goto error_param; } if (!i40e_vc_validate_vqs_bitmaps(vqs)) { aq_ret = -EINVAL; goto error_param; } /* Use the queue bit map sent by the VF */ if (i40e_ctrl_vf_rx_rings(pf->vsi[vf->lan_vsi_idx], vqs->rx_queues, true)) { aq_ret = -EIO; goto error_param; } if (i40e_ctrl_vf_tx_rings(pf->vsi[vf->lan_vsi_idx], vqs->tx_queues, true)) { aq_ret = -EIO; goto error_param; } /* need to start the rings for additional ADq VSI's as well */ if (vf->adq_enabled) { /* zero belongs to LAN VSI */ for (i = 1; i < vf->num_tc; i++) { if (i40e_vsi_start_rings(pf->vsi[vf->ch[i].vsi_idx])) aq_ret = -EIO; } } error_param: /* send the response to the VF */ return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_ENABLE_QUEUES, aq_ret); } /** * i40e_vc_disable_queues_msg * @vf: pointer to the VF info * @msg: pointer to the msg buffer * * called from the VF to disable all or specific * queue(s) **/ static int i40e_vc_disable_queues_msg(struct i40e_vf *vf, u8 *msg) { struct virtchnl_queue_select *vqs = (struct virtchnl_queue_select *)msg; struct i40e_pf *pf = vf->pf; int aq_ret = 0; if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) { aq_ret = -EINVAL; goto error_param; } if (!i40e_vc_isvalid_vsi_id(vf, vqs->vsi_id)) { aq_ret = -EINVAL; goto error_param; } if (!i40e_vc_validate_vqs_bitmaps(vqs)) { aq_ret = -EINVAL; goto error_param; } /* Use the queue bit map sent by the VF */ if (i40e_ctrl_vf_tx_rings(pf->vsi[vf->lan_vsi_idx], vqs->tx_queues, false)) { aq_ret = -EIO; goto error_param; } if (i40e_ctrl_vf_rx_rings(pf->vsi[vf->lan_vsi_idx], vqs->rx_queues, false)) { aq_ret = -EIO; goto error_param; } error_param: /* send the response to the VF */ return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_DISABLE_QUEUES, aq_ret); } /** * i40e_check_enough_queue - find big enough queue number * @vf: pointer to the VF info * @needed: the number of items needed * * Returns the base item index of the queue, or negative for error **/ static int i40e_check_enough_queue(struct i40e_vf *vf, u16 needed) { unsigned int i, cur_queues, more, pool_size; struct i40e_lump_tracking *pile; struct i40e_pf *pf = vf->pf; struct i40e_vsi *vsi; vsi = pf->vsi[vf->lan_vsi_idx]; cur_queues = vsi->alloc_queue_pairs; /* if current allocated queues are enough for need */ if (cur_queues >= needed) return vsi->base_queue; pile = pf->qp_pile; if (cur_queues > 0) { /* if the allocated queues are not zero * just check if there are enough queues for more * behind the allocated queues. */ more = needed - cur_queues; for (i = vsi->base_queue + cur_queues; i < pile->num_entries; i++) { if (pile->list[i] & I40E_PILE_VALID_BIT) break; if (more-- == 1) /* there is enough */ return vsi->base_queue; } } pool_size = 0; for (i = 0; i < pile->num_entries; i++) { if (pile->list[i] & I40E_PILE_VALID_BIT) { pool_size = 0; continue; } if (needed <= ++pool_size) /* there is enough */ return i; } return -ENOMEM; } /** * i40e_vc_request_queues_msg * @vf: pointer to the VF info * @msg: pointer to the msg buffer * * VFs get a default number of queues but can use this message to request a * different number. If the request is successful, PF will reset the VF and * return 0. If unsuccessful, PF will send message informing VF of number of * available queues and return result of sending VF a message. **/ static int i40e_vc_request_queues_msg(struct i40e_vf *vf, u8 *msg) { struct virtchnl_vf_res_request *vfres = (struct virtchnl_vf_res_request *)msg; u16 req_pairs = vfres->num_queue_pairs; u8 cur_pairs = vf->num_queue_pairs; struct i40e_pf *pf = vf->pf; if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) return -EINVAL; if (req_pairs > I40E_MAX_VF_QUEUES) { dev_err(&pf->pdev->dev, "VF %d tried to request more than %d queues.\n", vf->vf_id, I40E_MAX_VF_QUEUES); vfres->num_queue_pairs = I40E_MAX_VF_QUEUES; } else if (req_pairs - cur_pairs > pf->queues_left) { dev_warn(&pf->pdev->dev, "VF %d requested %d more queues, but only %d left.\n", vf->vf_id, req_pairs - cur_pairs, pf->queues_left); vfres->num_queue_pairs = pf->queues_left + cur_pairs; } else if (i40e_check_enough_queue(vf, req_pairs) < 0) { dev_warn(&pf->pdev->dev, "VF %d requested %d more queues, but there is not enough for it.\n", vf->vf_id, req_pairs - cur_pairs); vfres->num_queue_pairs = cur_pairs; } else { /* successful request */ vf->num_req_queues = req_pairs; i40e_vc_reset_vf(vf, true); return 0; } return i40e_vc_send_msg_to_vf(vf, VIRTCHNL_OP_REQUEST_QUEUES, 0, (u8 *)vfres, sizeof(*vfres)); } /** * i40e_vc_get_stats_msg * @vf: pointer to the VF info * @msg: pointer to the msg buffer * * called from the VF to get vsi stats **/ static int i40e_vc_get_stats_msg(struct i40e_vf *vf, u8 *msg) { struct virtchnl_queue_select *vqs = (struct virtchnl_queue_select *)msg; struct i40e_pf *pf = vf->pf; struct i40e_eth_stats stats; int aq_ret = 0; struct i40e_vsi *vsi; memset(&stats, 0, sizeof(struct i40e_eth_stats)); if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) { aq_ret = -EINVAL; goto error_param; } if (!i40e_vc_isvalid_vsi_id(vf, vqs->vsi_id)) { aq_ret = -EINVAL; goto error_param; } vsi = pf->vsi[vf->lan_vsi_idx]; if (!vsi) { aq_ret = -EINVAL; goto error_param; } i40e_update_eth_stats(vsi); stats = vsi->eth_stats; error_param: /* send the response back to the VF */ return i40e_vc_send_msg_to_vf(vf, VIRTCHNL_OP_GET_STATS, aq_ret, (u8 *)&stats, sizeof(stats)); } #define I40E_MAX_MACVLAN_PER_HW 3072 #define I40E_MAX_MACVLAN_PER_PF(num_ports) (I40E_MAX_MACVLAN_PER_HW / \ (num_ports)) /* If the VF is not trusted restrict the number of MAC/VLAN it can program * MAC filters: 16 for multicast, 1 for MAC, 1 for broadcast */ #define I40E_VC_MAX_MAC_ADDR_PER_VF (16 + 1 + 1) #define I40E_VC_MAX_VLAN_PER_VF 16 #define I40E_VC_MAX_MACVLAN_PER_TRUSTED_VF(vf_num, num_ports) \ ({ typeof(vf_num) vf_num_ = (vf_num); \ typeof(num_ports) num_ports_ = (num_ports); \ ((I40E_MAX_MACVLAN_PER_PF(num_ports_) - vf_num_ * \ I40E_VC_MAX_MAC_ADDR_PER_VF) / vf_num_) + \ I40E_VC_MAX_MAC_ADDR_PER_VF; }) /** * i40e_check_vf_permission * @vf: pointer to the VF info * @al: MAC address list from virtchnl * * Check that the given list of MAC addresses is allowed. Will return -EPERM * if any address in the list is not valid. Checks the following conditions: * * 1) broadcast and zero addresses are never valid * 2) unicast addresses are not allowed if the VMM has administratively set * the VF MAC address, unless the VF is marked as privileged. * 3) There is enough space to add all the addresses. * * Note that to guarantee consistency, it is expected this function be called * while holding the mac_filter_hash_lock, as otherwise the current number of * addresses might not be accurate. **/ static inline int i40e_check_vf_permission(struct i40e_vf *vf, struct virtchnl_ether_addr_list *al) { struct i40e_pf *pf = vf->pf; struct i40e_vsi *vsi = pf->vsi[vf->lan_vsi_idx]; struct i40e_hw *hw = &pf->hw; int mac2add_cnt = 0; int i; for (i = 0; i < al->num_elements; i++) { struct i40e_mac_filter *f; u8 *addr = al->list[i].addr; if (is_broadcast_ether_addr(addr) || is_zero_ether_addr(addr)) { dev_err(&pf->pdev->dev, "invalid VF MAC addr %pM\n", addr); return -EINVAL; } /* If the host VMM administrator has set the VF MAC address * administratively via the ndo_set_vf_mac command then deny * permission to the VF to add or delete unicast MAC addresses. * Unless the VF is privileged and then it can do whatever. * The VF may request to set the MAC address filter already * assigned to it so do not return an error in that case. */ if (!test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps) && !is_multicast_ether_addr(addr) && vf->pf_set_mac && !ether_addr_equal(addr, vf->default_lan_addr.addr)) { dev_err(&pf->pdev->dev, "VF attempting to override administratively set MAC address, bring down and up the VF interface to resume normal operation\n"); return -EPERM; } /*count filters that really will be added*/ f = i40e_find_mac(vsi, addr); if (!f) ++mac2add_cnt; } /* If this VF is not privileged, then we can't add more than a limited * number of addresses. Check to make sure that the additions do not * push us over the limit. */ if (!test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps)) { if ((i40e_count_filters(vsi) + mac2add_cnt) > I40E_VC_MAX_MAC_ADDR_PER_VF) { dev_err(&pf->pdev->dev, "Cannot add more MAC addresses, VF is not trusted, switch the VF to trusted to add more functionality\n"); return -EPERM; } /* If this VF is trusted, it can use more resources than untrusted. * However to ensure that every trusted VF has appropriate number of * resources, divide whole pool of resources per port and then across * all VFs. */ } else { if ((i40e_count_filters(vsi) + mac2add_cnt) > I40E_VC_MAX_MACVLAN_PER_TRUSTED_VF(pf->num_alloc_vfs, hw->num_ports)) { dev_err(&pf->pdev->dev, "Cannot add more MAC addresses, trusted VF exhausted it's resources\n"); return -EPERM; } } return 0; } /** * i40e_vc_ether_addr_type - get type of virtchnl_ether_addr * @vc_ether_addr: used to extract the type **/ static u8 i40e_vc_ether_addr_type(struct virtchnl_ether_addr *vc_ether_addr) { return vc_ether_addr->type & VIRTCHNL_ETHER_ADDR_TYPE_MASK; } /** * i40e_is_vc_addr_legacy * @vc_ether_addr: VIRTCHNL structure that contains MAC and type * * check if the MAC address is from an older VF **/ static bool i40e_is_vc_addr_legacy(struct virtchnl_ether_addr *vc_ether_addr) { return i40e_vc_ether_addr_type(vc_ether_addr) == VIRTCHNL_ETHER_ADDR_LEGACY; } /** * i40e_is_vc_addr_primary * @vc_ether_addr: VIRTCHNL structure that contains MAC and type * * check if the MAC address is the VF's primary MAC * This function should only be called when the MAC address in * virtchnl_ether_addr is a valid unicast MAC **/ static bool i40e_is_vc_addr_primary(struct virtchnl_ether_addr *vc_ether_addr) { return i40e_vc_ether_addr_type(vc_ether_addr) == VIRTCHNL_ETHER_ADDR_PRIMARY; } /** * i40e_update_vf_mac_addr * @vf: VF to update * @vc_ether_addr: structure from VIRTCHNL with MAC to add * * update the VF's cached hardware MAC if allowed **/ static void i40e_update_vf_mac_addr(struct i40e_vf *vf, struct virtchnl_ether_addr *vc_ether_addr) { u8 *mac_addr = vc_ether_addr->addr; if (!is_valid_ether_addr(mac_addr)) return; /* If request to add MAC filter is a primary request update its default * MAC address with the requested one. If it is a legacy request then * check if current default is empty if so update the default MAC */ if (i40e_is_vc_addr_primary(vc_ether_addr)) { ether_addr_copy(vf->default_lan_addr.addr, mac_addr); } else if (i40e_is_vc_addr_legacy(vc_ether_addr)) { if (is_zero_ether_addr(vf->default_lan_addr.addr)) ether_addr_copy(vf->default_lan_addr.addr, mac_addr); } } /** * i40e_vc_add_mac_addr_msg * @vf: pointer to the VF info * @msg: pointer to the msg buffer * * add guest mac address filter **/ static int i40e_vc_add_mac_addr_msg(struct i40e_vf *vf, u8 *msg) { struct virtchnl_ether_addr_list *al = (struct virtchnl_ether_addr_list *)msg; struct i40e_pf *pf = vf->pf; struct i40e_vsi *vsi = NULL; int ret = 0; int i; if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE) || !i40e_vc_isvalid_vsi_id(vf, al->vsi_id)) { ret = -EINVAL; goto error_param; } vsi = pf->vsi[vf->lan_vsi_idx]; /* Lock once, because all function inside for loop accesses VSI's * MAC filter list which needs to be protected using same lock. */ spin_lock_bh(&vsi->mac_filter_hash_lock); ret = i40e_check_vf_permission(vf, al); if (ret) { spin_unlock_bh(&vsi->mac_filter_hash_lock); goto error_param; } /* add new addresses to the list */ for (i = 0; i < al->num_elements; i++) { struct i40e_mac_filter *f; f = i40e_find_mac(vsi, al->list[i].addr); if (!f) { f = i40e_add_mac_filter(vsi, al->list[i].addr); if (!f) { dev_err(&pf->pdev->dev, "Unable to add MAC filter %pM for VF %d\n", al->list[i].addr, vf->vf_id); ret = -EINVAL; spin_unlock_bh(&vsi->mac_filter_hash_lock); goto error_param; } } i40e_update_vf_mac_addr(vf, &al->list[i]); } spin_unlock_bh(&vsi->mac_filter_hash_lock); /* program the updated filter list */ ret = i40e_sync_vsi_filters(vsi); if (ret) dev_err(&pf->pdev->dev, "Unable to program VF %d MAC filters, error %d\n", vf->vf_id, ret); error_param: /* send the response to the VF */ return i40e_vc_send_msg_to_vf(vf, VIRTCHNL_OP_ADD_ETH_ADDR, ret, NULL, 0); } /** * i40e_vc_del_mac_addr_msg * @vf: pointer to the VF info * @msg: pointer to the msg buffer * * remove guest mac address filter **/ static int i40e_vc_del_mac_addr_msg(struct i40e_vf *vf, u8 *msg) { struct virtchnl_ether_addr_list *al = (struct virtchnl_ether_addr_list *)msg; bool was_unimac_deleted = false; struct i40e_pf *pf = vf->pf; struct i40e_vsi *vsi = NULL; int ret = 0; int i; if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE) || !i40e_vc_isvalid_vsi_id(vf, al->vsi_id)) { ret = -EINVAL; goto error_param; } for (i = 0; i < al->num_elements; i++) { if (is_broadcast_ether_addr(al->list[i].addr) || is_zero_ether_addr(al->list[i].addr)) { dev_err(&pf->pdev->dev, "Invalid MAC addr %pM for VF %d\n", al->list[i].addr, vf->vf_id); ret = -EINVAL; goto error_param; } if (ether_addr_equal(al->list[i].addr, vf->default_lan_addr.addr)) was_unimac_deleted = true; } vsi = pf->vsi[vf->lan_vsi_idx]; spin_lock_bh(&vsi->mac_filter_hash_lock); /* delete addresses from the list */ for (i = 0; i < al->num_elements; i++) if (i40e_del_mac_filter(vsi, al->list[i].addr)) { ret = -EINVAL; spin_unlock_bh(&vsi->mac_filter_hash_lock); goto error_param; } spin_unlock_bh(&vsi->mac_filter_hash_lock); if (was_unimac_deleted) eth_zero_addr(vf->default_lan_addr.addr); /* program the updated filter list */ ret = i40e_sync_vsi_filters(vsi); if (ret) dev_err(&pf->pdev->dev, "Unable to program VF %d MAC filters, error %d\n", vf->vf_id, ret); if (vf->trusted && was_unimac_deleted) { struct i40e_mac_filter *f; struct hlist_node *h; u8 *macaddr = NULL; int bkt; /* set last unicast mac address as default */ spin_lock_bh(&vsi->mac_filter_hash_lock); hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) { if (is_valid_ether_addr(f->macaddr)) macaddr = f->macaddr; } if (macaddr) ether_addr_copy(vf->default_lan_addr.addr, macaddr); spin_unlock_bh(&vsi->mac_filter_hash_lock); } error_param: /* send the response to the VF */ return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_DEL_ETH_ADDR, ret); } /** * i40e_vc_add_vlan_msg * @vf: pointer to the VF info * @msg: pointer to the msg buffer * * program guest vlan id **/ static int i40e_vc_add_vlan_msg(struct i40e_vf *vf, u8 *msg) { struct virtchnl_vlan_filter_list *vfl = (struct virtchnl_vlan_filter_list *)msg; struct i40e_pf *pf = vf->pf; struct i40e_vsi *vsi = NULL; int aq_ret = 0; int i; if ((vf->num_vlan >= I40E_VC_MAX_VLAN_PER_VF) && !test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps)) { dev_err(&pf->pdev->dev, "VF is not trusted, switch the VF to trusted to add more VLAN addresses\n"); goto error_param; } if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) || !i40e_vc_isvalid_vsi_id(vf, vfl->vsi_id)) { aq_ret = -EINVAL; goto error_param; } for (i = 0; i < vfl->num_elements; i++) { if (vfl->vlan_id[i] > I40E_MAX_VLANID) { aq_ret = -EINVAL; dev_err(&pf->pdev->dev, "invalid VF VLAN id %d\n", vfl->vlan_id[i]); goto error_param; } } vsi = pf->vsi[vf->lan_vsi_idx]; if (vsi->info.pvid) { aq_ret = -EINVAL; goto error_param; } i40e_vlan_stripping_enable(vsi); for (i = 0; i < vfl->num_elements; i++) { /* add new VLAN filter */ int ret = i40e_vsi_add_vlan(vsi, vfl->vlan_id[i]); if (!ret) vf->num_vlan++; if (test_bit(I40E_VF_STATE_UC_PROMISC, &vf->vf_states)) i40e_aq_set_vsi_uc_promisc_on_vlan(&pf->hw, vsi->seid, true, vfl->vlan_id[i], NULL); if (test_bit(I40E_VF_STATE_MC_PROMISC, &vf->vf_states)) i40e_aq_set_vsi_mc_promisc_on_vlan(&pf->hw, vsi->seid, true, vfl->vlan_id[i], NULL); if (ret) dev_err(&pf->pdev->dev, "Unable to add VLAN filter %d for VF %d, error %d\n", vfl->vlan_id[i], vf->vf_id, ret); } error_param: /* send the response to the VF */ return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_ADD_VLAN, aq_ret); } /** * i40e_vc_remove_vlan_msg * @vf: pointer to the VF info * @msg: pointer to the msg buffer * * remove programmed guest vlan id **/ static int i40e_vc_remove_vlan_msg(struct i40e_vf *vf, u8 *msg) { struct virtchnl_vlan_filter_list *vfl = (struct virtchnl_vlan_filter_list *)msg; struct i40e_pf *pf = vf->pf; struct i40e_vsi *vsi = NULL; int aq_ret = 0; int i; if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE) || !i40e_vc_isvalid_vsi_id(vf, vfl->vsi_id)) { aq_ret = -EINVAL; goto error_param; } for (i = 0; i < vfl->num_elements; i++) { if (vfl->vlan_id[i] > I40E_MAX_VLANID) { aq_ret = -EINVAL; goto error_param; } } vsi = pf->vsi[vf->lan_vsi_idx]; if (vsi->info.pvid) { if (vfl->num_elements > 1 || vfl->vlan_id[0]) aq_ret = -EINVAL; goto error_param; } for (i = 0; i < vfl->num_elements; i++) { i40e_vsi_kill_vlan(vsi, vfl->vlan_id[i]); vf->num_vlan--; if (test_bit(I40E_VF_STATE_UC_PROMISC, &vf->vf_states)) i40e_aq_set_vsi_uc_promisc_on_vlan(&pf->hw, vsi->seid, false, vfl->vlan_id[i], NULL); if (test_bit(I40E_VF_STATE_MC_PROMISC, &vf->vf_states)) i40e_aq_set_vsi_mc_promisc_on_vlan(&pf->hw, vsi->seid, false, vfl->vlan_id[i], NULL); } error_param: /* send the response to the VF */ return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_DEL_VLAN, aq_ret); } /** * i40e_vc_rdma_msg * @vf: pointer to the VF info * @msg: pointer to the msg buffer * @msglen: msg length * * called from the VF for the iwarp msgs **/ static int i40e_vc_rdma_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) { struct i40e_pf *pf = vf->pf; int abs_vf_id = vf->vf_id + pf->hw.func_caps.vf_base_id; int aq_ret = 0; if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) || !test_bit(I40E_VF_STATE_RDMAENA, &vf->vf_states)) { aq_ret = -EINVAL; goto error_param; } i40e_notify_client_of_vf_msg(pf->vsi[pf->lan_vsi], abs_vf_id, msg, msglen); error_param: /* send the response to the VF */ return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_RDMA, aq_ret); } /** * i40e_vc_rdma_qvmap_msg * @vf: pointer to the VF info * @msg: pointer to the msg buffer * @config: config qvmap or release it * * called from the VF for the iwarp msgs **/ static int i40e_vc_rdma_qvmap_msg(struct i40e_vf *vf, u8 *msg, bool config) { struct virtchnl_rdma_qvlist_info *qvlist_info = (struct virtchnl_rdma_qvlist_info *)msg; int aq_ret = 0; if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) || !test_bit(I40E_VF_STATE_RDMAENA, &vf->vf_states)) { aq_ret = -EINVAL; goto error_param; } if (config) { if (i40e_config_rdma_qvlist(vf, qvlist_info)) aq_ret = -EINVAL; } else { i40e_release_rdma_qvlist(vf); } error_param: /* send the response to the VF */ return i40e_vc_send_resp_to_vf(vf, config ? VIRTCHNL_OP_CONFIG_RDMA_IRQ_MAP : VIRTCHNL_OP_RELEASE_RDMA_IRQ_MAP, aq_ret); } /** * i40e_vc_config_rss_key * @vf: pointer to the VF info * @msg: pointer to the msg buffer * * Configure the VF's RSS key **/ static int i40e_vc_config_rss_key(struct i40e_vf *vf, u8 *msg) { struct virtchnl_rss_key *vrk = (struct virtchnl_rss_key *)msg; struct i40e_pf *pf = vf->pf; struct i40e_vsi *vsi = NULL; int aq_ret = 0; if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE) || !i40e_vc_isvalid_vsi_id(vf, vrk->vsi_id) || vrk->key_len != I40E_HKEY_ARRAY_SIZE) { aq_ret = -EINVAL; goto err; } vsi = pf->vsi[vf->lan_vsi_idx]; aq_ret = i40e_config_rss(vsi, vrk->key, NULL, 0); err: /* send the response to the VF */ return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_CONFIG_RSS_KEY, aq_ret); } /** * i40e_vc_config_rss_lut * @vf: pointer to the VF info * @msg: pointer to the msg buffer * * Configure the VF's RSS LUT **/ static int i40e_vc_config_rss_lut(struct i40e_vf *vf, u8 *msg) { struct virtchnl_rss_lut *vrl = (struct virtchnl_rss_lut *)msg; struct i40e_pf *pf = vf->pf; struct i40e_vsi *vsi = NULL; int aq_ret = 0; u16 i; if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE) || !i40e_vc_isvalid_vsi_id(vf, vrl->vsi_id) || vrl->lut_entries != I40E_VF_HLUT_ARRAY_SIZE) { aq_ret = -EINVAL; goto err; } for (i = 0; i < vrl->lut_entries; i++) if (vrl->lut[i] >= vf->num_queue_pairs) { aq_ret = -EINVAL; goto err; } vsi = pf->vsi[vf->lan_vsi_idx]; aq_ret = i40e_config_rss(vsi, NULL, vrl->lut, I40E_VF_HLUT_ARRAY_SIZE); /* send the response to the VF */ err: return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_CONFIG_RSS_LUT, aq_ret); } /** * i40e_vc_get_rss_hena * @vf: pointer to the VF info * @msg: pointer to the msg buffer * * Return the RSS HENA bits allowed by the hardware **/ static int i40e_vc_get_rss_hena(struct i40e_vf *vf, u8 *msg) { struct virtchnl_rss_hena *vrh = NULL; struct i40e_pf *pf = vf->pf; int aq_ret = 0; int len = 0; if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) { aq_ret = -EINVAL; goto err; } len = sizeof(struct virtchnl_rss_hena); vrh = kzalloc(len, GFP_KERNEL); if (!vrh) { aq_ret = -ENOMEM; len = 0; goto err; } vrh->hena = i40e_pf_get_default_rss_hena(pf); err: /* send the response back to the VF */ aq_ret = i40e_vc_send_msg_to_vf(vf, VIRTCHNL_OP_GET_RSS_HENA_CAPS, aq_ret, (u8 *)vrh, len); kfree(vrh); return aq_ret; } /** * i40e_vc_set_rss_hena * @vf: pointer to the VF info * @msg: pointer to the msg buffer * * Set the RSS HENA bits for the VF **/ static int i40e_vc_set_rss_hena(struct i40e_vf *vf, u8 *msg) { struct virtchnl_rss_hena *vrh = (struct virtchnl_rss_hena *)msg; struct i40e_pf *pf = vf->pf; struct i40e_hw *hw = &pf->hw; int aq_ret = 0; if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) { aq_ret = -EINVAL; goto err; } i40e_write_rx_ctl(hw, I40E_VFQF_HENA1(0, vf->vf_id), (u32)vrh->hena); i40e_write_rx_ctl(hw, I40E_VFQF_HENA1(1, vf->vf_id), (u32)(vrh->hena >> 32)); /* send the response to the VF */ err: return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_SET_RSS_HENA, aq_ret); } /** * i40e_vc_enable_vlan_stripping * @vf: pointer to the VF info * @msg: pointer to the msg buffer * * Enable vlan header stripping for the VF **/ static int i40e_vc_enable_vlan_stripping(struct i40e_vf *vf, u8 *msg) { struct i40e_vsi *vsi; int aq_ret = 0; if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) { aq_ret = -EINVAL; goto err; } vsi = vf->pf->vsi[vf->lan_vsi_idx]; i40e_vlan_stripping_enable(vsi); /* send the response to the VF */ err: return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_ENABLE_VLAN_STRIPPING, aq_ret); } /** * i40e_vc_disable_vlan_stripping * @vf: pointer to the VF info * @msg: pointer to the msg buffer * * Disable vlan header stripping for the VF **/ static int i40e_vc_disable_vlan_stripping(struct i40e_vf *vf, u8 *msg) { struct i40e_vsi *vsi; int aq_ret = 0; if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) { aq_ret = -EINVAL; goto err; } vsi = vf->pf->vsi[vf->lan_vsi_idx]; i40e_vlan_stripping_disable(vsi); /* send the response to the VF */ err: return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_DISABLE_VLAN_STRIPPING, aq_ret); } /** * i40e_validate_cloud_filter * @vf: pointer to VF structure * @tc_filter: pointer to filter requested * * This function validates cloud filter programmed as TC filter for ADq **/ static int i40e_validate_cloud_filter(struct i40e_vf *vf, struct virtchnl_filter *tc_filter) { struct virtchnl_l4_spec mask = tc_filter->mask.tcp_spec; struct virtchnl_l4_spec data = tc_filter->data.tcp_spec; struct i40e_pf *pf = vf->pf; struct i40e_vsi *vsi = NULL; struct i40e_mac_filter *f; struct hlist_node *h; bool found = false; int bkt; if (!tc_filter->action) { dev_info(&pf->pdev->dev, "VF %d: Currently ADq doesn't support Drop Action\n", vf->vf_id); goto err; } /* action_meta is TC number here to which the filter is applied */ if (!tc_filter->action_meta || tc_filter->action_meta > I40E_MAX_VF_VSI) { dev_info(&pf->pdev->dev, "VF %d: Invalid TC number %u\n", vf->vf_id, tc_filter->action_meta); goto err; } /* Check filter if it's programmed for advanced mode or basic mode. * There are two ADq modes (for VF only), * 1. Basic mode: intended to allow as many filter options as possible * to be added to a VF in Non-trusted mode. Main goal is * to add filters to its own MAC and VLAN id. * 2. Advanced mode: is for allowing filters to be applied other than * its own MAC or VLAN. This mode requires the VF to be * Trusted. */ if (mask.dst_mac[0] && !mask.dst_ip[0]) { vsi = pf->vsi[vf->lan_vsi_idx]; f = i40e_find_mac(vsi, data.dst_mac); if (!f) { dev_info(&pf->pdev->dev, "Destination MAC %pM doesn't belong to VF %d\n", data.dst_mac, vf->vf_id); goto err; } if (mask.vlan_id) { hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) { if (f->vlan == ntohs(data.vlan_id)) { found = true; break; } } if (!found) { dev_info(&pf->pdev->dev, "VF %d doesn't have any VLAN id %u\n", vf->vf_id, ntohs(data.vlan_id)); goto err; } } } else { /* Check if VF is trusted */ if (!test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps)) { dev_err(&pf->pdev->dev, "VF %d not trusted, make VF trusted to add advanced mode ADq cloud filters\n", vf->vf_id); return -EIO; } } if (mask.dst_mac[0] & data.dst_mac[0]) { if (is_broadcast_ether_addr(data.dst_mac) || is_zero_ether_addr(data.dst_mac)) { dev_info(&pf->pdev->dev, "VF %d: Invalid Dest MAC addr %pM\n", vf->vf_id, data.dst_mac); goto err; } } if (mask.src_mac[0] & data.src_mac[0]) { if (is_broadcast_ether_addr(data.src_mac) || is_zero_ether_addr(data.src_mac)) { dev_info(&pf->pdev->dev, "VF %d: Invalid Source MAC addr %pM\n", vf->vf_id, data.src_mac); goto err; } } if (mask.dst_port & data.dst_port) { if (!data.dst_port) { dev_info(&pf->pdev->dev, "VF %d: Invalid Dest port\n", vf->vf_id); goto err; } } if (mask.src_port & data.src_port) { if (!data.src_port) { dev_info(&pf->pdev->dev, "VF %d: Invalid Source port\n", vf->vf_id); goto err; } } if (tc_filter->flow_type != VIRTCHNL_TCP_V6_FLOW && tc_filter->flow_type != VIRTCHNL_TCP_V4_FLOW) { dev_info(&pf->pdev->dev, "VF %d: Invalid Flow type\n", vf->vf_id); goto err; } if (mask.vlan_id & data.vlan_id) { if (ntohs(data.vlan_id) > I40E_MAX_VLANID) { dev_info(&pf->pdev->dev, "VF %d: invalid VLAN ID\n", vf->vf_id); goto err; } } return 0; err: return -EIO; } /** * i40e_find_vsi_from_seid - searches for the vsi with the given seid * @vf: pointer to the VF info * @seid: seid of the vsi it is searching for **/ static struct i40e_vsi *i40e_find_vsi_from_seid(struct i40e_vf *vf, u16 seid) { struct i40e_pf *pf = vf->pf; struct i40e_vsi *vsi = NULL; int i; for (i = 0; i < vf->num_tc ; i++) { vsi = i40e_find_vsi_from_id(pf, vf->ch[i].vsi_id); if (vsi && vsi->seid == seid) return vsi; } return NULL; } /** * i40e_del_all_cloud_filters * @vf: pointer to the VF info * * This function deletes all cloud filters **/ static void i40e_del_all_cloud_filters(struct i40e_vf *vf) { struct i40e_cloud_filter *cfilter = NULL; struct i40e_pf *pf = vf->pf; struct i40e_vsi *vsi = NULL; struct hlist_node *node; int ret; hlist_for_each_entry_safe(cfilter, node, &vf->cloud_filter_list, cloud_node) { vsi = i40e_find_vsi_from_seid(vf, cfilter->seid); if (!vsi) { dev_err(&pf->pdev->dev, "VF %d: no VSI found for matching %u seid, can't delete cloud filter\n", vf->vf_id, cfilter->seid); continue; } if (cfilter->dst_port) ret = i40e_add_del_cloud_filter_big_buf(vsi, cfilter, false); else ret = i40e_add_del_cloud_filter(vsi, cfilter, false); if (ret) dev_err(&pf->pdev->dev, "VF %d: Failed to delete cloud filter, err %pe aq_err %s\n", vf->vf_id, ERR_PTR(ret), i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); hlist_del(&cfilter->cloud_node); kfree(cfilter); vf->num_cloud_filters--; } } /** * i40e_vc_del_cloud_filter * @vf: pointer to the VF info * @msg: pointer to the msg buffer * * This function deletes a cloud filter programmed as TC filter for ADq **/ static int i40e_vc_del_cloud_filter(struct i40e_vf *vf, u8 *msg) { struct virtchnl_filter *vcf = (struct virtchnl_filter *)msg; struct virtchnl_l4_spec mask = vcf->mask.tcp_spec; struct virtchnl_l4_spec tcf = vcf->data.tcp_spec; struct i40e_cloud_filter cfilter, *cf = NULL; struct i40e_pf *pf = vf->pf; struct i40e_vsi *vsi = NULL; struct hlist_node *node; int aq_ret = 0; int i, ret; if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) { aq_ret = -EINVAL; goto err; } if (!vf->adq_enabled) { dev_info(&pf->pdev->dev, "VF %d: ADq not enabled, can't apply cloud filter\n", vf->vf_id); aq_ret = -EINVAL; goto err; } if (i40e_validate_cloud_filter(vf, vcf)) { dev_info(&pf->pdev->dev, "VF %d: Invalid input, can't apply cloud filter\n", vf->vf_id); aq_ret = -EINVAL; goto err; } memset(&cfilter, 0, sizeof(cfilter)); /* parse destination mac address */ for (i = 0; i < ETH_ALEN; i++) cfilter.dst_mac[i] = mask.dst_mac[i] & tcf.dst_mac[i]; /* parse source mac address */ for (i = 0; i < ETH_ALEN; i++) cfilter.src_mac[i] = mask.src_mac[i] & tcf.src_mac[i]; cfilter.vlan_id = mask.vlan_id & tcf.vlan_id; cfilter.dst_port = mask.dst_port & tcf.dst_port; cfilter.src_port = mask.src_port & tcf.src_port; switch (vcf->flow_type) { case VIRTCHNL_TCP_V4_FLOW: cfilter.n_proto = ETH_P_IP; if (mask.dst_ip[0] & tcf.dst_ip[0]) memcpy(&cfilter.ip.v4.dst_ip, tcf.dst_ip, ARRAY_SIZE(tcf.dst_ip)); else if (mask.src_ip[0] & tcf.dst_ip[0]) memcpy(&cfilter.ip.v4.src_ip, tcf.src_ip, ARRAY_SIZE(tcf.dst_ip)); break; case VIRTCHNL_TCP_V6_FLOW: cfilter.n_proto = ETH_P_IPV6; if (mask.dst_ip[3] & tcf.dst_ip[3]) memcpy(&cfilter.ip.v6.dst_ip6, tcf.dst_ip, sizeof(cfilter.ip.v6.dst_ip6)); if (mask.src_ip[3] & tcf.src_ip[3]) memcpy(&cfilter.ip.v6.src_ip6, tcf.src_ip, sizeof(cfilter.ip.v6.src_ip6)); break; default: /* TC filter can be configured based on different combinations * and in this case IP is not a part of filter config */ dev_info(&pf->pdev->dev, "VF %d: Flow type not configured\n", vf->vf_id); } /* get the vsi to which the tc belongs to */ vsi = pf->vsi[vf->ch[vcf->action_meta].vsi_idx]; cfilter.seid = vsi->seid; cfilter.flags = vcf->field_flags; /* Deleting TC filter */ if (tcf.dst_port) ret = i40e_add_del_cloud_filter_big_buf(vsi, &cfilter, false); else ret = i40e_add_del_cloud_filter(vsi, &cfilter, false); if (ret) { dev_err(&pf->pdev->dev, "VF %d: Failed to delete cloud filter, err %pe aq_err %s\n", vf->vf_id, ERR_PTR(ret), i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); goto err; } hlist_for_each_entry_safe(cf, node, &vf->cloud_filter_list, cloud_node) { if (cf->seid != cfilter.seid) continue; if (mask.dst_port) if (cfilter.dst_port != cf->dst_port) continue; if (mask.dst_mac[0]) if (!ether_addr_equal(cf->src_mac, cfilter.src_mac)) continue; /* for ipv4 data to be valid, only first byte of mask is set */ if (cfilter.n_proto == ETH_P_IP && mask.dst_ip[0]) if (memcmp(&cfilter.ip.v4.dst_ip, &cf->ip.v4.dst_ip, ARRAY_SIZE(tcf.dst_ip))) continue; /* for ipv6, mask is set for all sixteen bytes (4 words) */ if (cfilter.n_proto == ETH_P_IPV6 && mask.dst_ip[3]) if (memcmp(&cfilter.ip.v6.dst_ip6, &cf->ip.v6.dst_ip6, sizeof(cfilter.ip.v6.src_ip6))) continue; if (mask.vlan_id) if (cfilter.vlan_id != cf->vlan_id) continue; hlist_del(&cf->cloud_node); kfree(cf); vf->num_cloud_filters--; } err: return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_DEL_CLOUD_FILTER, aq_ret); } /** * i40e_vc_add_cloud_filter * @vf: pointer to the VF info * @msg: pointer to the msg buffer * * This function adds a cloud filter programmed as TC filter for ADq **/ static int i40e_vc_add_cloud_filter(struct i40e_vf *vf, u8 *msg) { struct virtchnl_filter *vcf = (struct virtchnl_filter *)msg; struct virtchnl_l4_spec mask = vcf->mask.tcp_spec; struct virtchnl_l4_spec tcf = vcf->data.tcp_spec; struct i40e_cloud_filter *cfilter = NULL; struct i40e_pf *pf = vf->pf; struct i40e_vsi *vsi = NULL; int aq_ret = 0; int i, ret; if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) { aq_ret = -EINVAL; goto err_out; } if (!vf->adq_enabled) { dev_info(&pf->pdev->dev, "VF %d: ADq is not enabled, can't apply cloud filter\n", vf->vf_id); aq_ret = -EINVAL; goto err_out; } if (i40e_validate_cloud_filter(vf, vcf)) { dev_info(&pf->pdev->dev, "VF %d: Invalid input/s, can't apply cloud filter\n", vf->vf_id); aq_ret = -EINVAL; goto err_out; } cfilter = kzalloc(sizeof(*cfilter), GFP_KERNEL); if (!cfilter) return -ENOMEM; /* parse destination mac address */ for (i = 0; i < ETH_ALEN; i++) cfilter->dst_mac[i] = mask.dst_mac[i] & tcf.dst_mac[i]; /* parse source mac address */ for (i = 0; i < ETH_ALEN; i++) cfilter->src_mac[i] = mask.src_mac[i] & tcf.src_mac[i]; cfilter->vlan_id = mask.vlan_id & tcf.vlan_id; cfilter->dst_port = mask.dst_port & tcf.dst_port; cfilter->src_port = mask.src_port & tcf.src_port; switch (vcf->flow_type) { case VIRTCHNL_TCP_V4_FLOW: cfilter->n_proto = ETH_P_IP; if (mask.dst_ip[0] & tcf.dst_ip[0]) memcpy(&cfilter->ip.v4.dst_ip, tcf.dst_ip, ARRAY_SIZE(tcf.dst_ip)); else if (mask.src_ip[0] & tcf.dst_ip[0]) memcpy(&cfilter->ip.v4.src_ip, tcf.src_ip, ARRAY_SIZE(tcf.dst_ip)); break; case VIRTCHNL_TCP_V6_FLOW: cfilter->n_proto = ETH_P_IPV6; if (mask.dst_ip[3] & tcf.dst_ip[3]) memcpy(&cfilter->ip.v6.dst_ip6, tcf.dst_ip, sizeof(cfilter->ip.v6.dst_ip6)); if (mask.src_ip[3] & tcf.src_ip[3]) memcpy(&cfilter->ip.v6.src_ip6, tcf.src_ip, sizeof(cfilter->ip.v6.src_ip6)); break; default: /* TC filter can be configured based on different combinations * and in this case IP is not a part of filter config */ dev_info(&pf->pdev->dev, "VF %d: Flow type not configured\n", vf->vf_id); } /* get the VSI to which the TC belongs to */ vsi = pf->vsi[vf->ch[vcf->action_meta].vsi_idx]; cfilter->seid = vsi->seid; cfilter->flags = vcf->field_flags; /* Adding cloud filter programmed as TC filter */ if (tcf.dst_port) ret = i40e_add_del_cloud_filter_big_buf(vsi, cfilter, true); else ret = i40e_add_del_cloud_filter(vsi, cfilter, true); if (ret) { dev_err(&pf->pdev->dev, "VF %d: Failed to add cloud filter, err %pe aq_err %s\n", vf->vf_id, ERR_PTR(ret), i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); goto err_free; } INIT_HLIST_NODE(&cfilter->cloud_node); hlist_add_head(&cfilter->cloud_node, &vf->cloud_filter_list); /* release the pointer passing it to the collection */ cfilter = NULL; vf->num_cloud_filters++; err_free: kfree(cfilter); err_out: return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_ADD_CLOUD_FILTER, aq_ret); } /** * i40e_vc_add_qch_msg: Add queue channel and enable ADq * @vf: pointer to the VF info * @msg: pointer to the msg buffer **/ static int i40e_vc_add_qch_msg(struct i40e_vf *vf, u8 *msg) { struct virtchnl_tc_info *tci = (struct virtchnl_tc_info *)msg; struct i40e_pf *pf = vf->pf; struct i40e_link_status *ls = &pf->hw.phy.link_info; int i, adq_request_qps = 0; int aq_ret = 0; u64 speed = 0; if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) { aq_ret = -EINVAL; goto err; } /* ADq cannot be applied if spoof check is ON */ if (vf->spoofchk) { dev_err(&pf->pdev->dev, "Spoof check is ON, turn it OFF to enable ADq\n"); aq_ret = -EINVAL; goto err; } if (!(vf->driver_caps & VIRTCHNL_VF_OFFLOAD_ADQ)) { dev_err(&pf->pdev->dev, "VF %d attempting to enable ADq, but hasn't properly negotiated that capability\n", vf->vf_id); aq_ret = -EINVAL; goto err; } /* max number of traffic classes for VF currently capped at 4 */ if (!tci->num_tc || tci->num_tc > I40E_MAX_VF_VSI) { dev_err(&pf->pdev->dev, "VF %d trying to set %u TCs, valid range 1-%u TCs per VF\n", vf->vf_id, tci->num_tc, I40E_MAX_VF_VSI); aq_ret = -EINVAL; goto err; } /* validate queues for each TC */ for (i = 0; i < tci->num_tc; i++) if (!tci->list[i].count || tci->list[i].count > I40E_DEFAULT_QUEUES_PER_VF) { dev_err(&pf->pdev->dev, "VF %d: TC %d trying to set %u queues, valid range 1-%u queues per TC\n", vf->vf_id, i, tci->list[i].count, I40E_DEFAULT_QUEUES_PER_VF); aq_ret = -EINVAL; goto err; } /* need Max VF queues but already have default number of queues */ adq_request_qps = I40E_MAX_VF_QUEUES - I40E_DEFAULT_QUEUES_PER_VF; if (pf->queues_left < adq_request_qps) { dev_err(&pf->pdev->dev, "No queues left to allocate to VF %d\n", vf->vf_id); aq_ret = -EINVAL; goto err; } else { /* we need to allocate max VF queues to enable ADq so as to * make sure ADq enabled VF always gets back queues when it * goes through a reset. */ vf->num_queue_pairs = I40E_MAX_VF_QUEUES; } /* get link speed in MB to validate rate limit */ speed = i40e_vc_link_speed2mbps(ls->link_speed); if (speed == SPEED_UNKNOWN) { dev_err(&pf->pdev->dev, "Cannot detect link speed\n"); aq_ret = -EINVAL; goto err; } /* parse data from the queue channel info */ vf->num_tc = tci->num_tc; for (i = 0; i < vf->num_tc; i++) { if (tci->list[i].max_tx_rate) { if (tci->list[i].max_tx_rate > speed) { dev_err(&pf->pdev->dev, "Invalid max tx rate %llu specified for VF %d.", tci->list[i].max_tx_rate, vf->vf_id); aq_ret = -EINVAL; goto err; } else { vf->ch[i].max_tx_rate = tci->list[i].max_tx_rate; } } vf->ch[i].num_qps = tci->list[i].count; } /* set this flag only after making sure all inputs are sane */ vf->adq_enabled = true; /* reset the VF in order to allocate resources */ i40e_vc_reset_vf(vf, true); return 0; /* send the response to the VF */ err: return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_ENABLE_CHANNELS, aq_ret); } /** * i40e_vc_del_qch_msg * @vf: pointer to the VF info * @msg: pointer to the msg buffer **/ static int i40e_vc_del_qch_msg(struct i40e_vf *vf, u8 *msg) { struct i40e_pf *pf = vf->pf; int aq_ret = 0; if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) { aq_ret = -EINVAL; goto err; } if (vf->adq_enabled) { i40e_del_all_cloud_filters(vf); i40e_del_qch(vf); vf->adq_enabled = false; vf->num_tc = 0; dev_info(&pf->pdev->dev, "Deleting Queue Channels and cloud filters for ADq on VF %d\n", vf->vf_id); } else { dev_info(&pf->pdev->dev, "VF %d trying to delete queue channels but ADq isn't enabled\n", vf->vf_id); aq_ret = -EINVAL; } /* reset the VF in order to allocate resources */ i40e_vc_reset_vf(vf, true); return 0; err: return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_DISABLE_CHANNELS, aq_ret); } /** * i40e_vc_process_vf_msg * @pf: pointer to the PF structure * @vf_id: source VF id * @v_opcode: operation code * @v_retval: unused return value code * @msg: pointer to the msg buffer * @msglen: msg length * * called from the common aeq/arq handler to * process request from VF **/ int i40e_vc_process_vf_msg(struct i40e_pf *pf, s16 vf_id, u32 v_opcode, u32 __always_unused v_retval, u8 *msg, u16 msglen) { struct i40e_hw *hw = &pf->hw; int local_vf_id = vf_id - (s16)hw->func_caps.vf_base_id; struct i40e_vf *vf; int ret; pf->vf_aq_requests++; if (local_vf_id < 0 || local_vf_id >= pf->num_alloc_vfs) return -EINVAL; vf = &(pf->vf[local_vf_id]); /* Check if VF is disabled. */ if (test_bit(I40E_VF_STATE_DISABLED, &vf->vf_states)) return -EINVAL; /* perform basic checks on the msg */ ret = virtchnl_vc_validate_vf_msg(&vf->vf_ver, v_opcode, msg, msglen); if (ret) { i40e_vc_send_resp_to_vf(vf, v_opcode, -EINVAL); dev_err(&pf->pdev->dev, "Invalid message from VF %d, opcode %d, len %d\n", local_vf_id, v_opcode, msglen); return ret; } switch (v_opcode) { case VIRTCHNL_OP_VERSION: ret = i40e_vc_get_version_msg(vf, msg); break; case VIRTCHNL_OP_GET_VF_RESOURCES: ret = i40e_vc_get_vf_resources_msg(vf, msg); i40e_vc_notify_vf_link_state(vf); break; case VIRTCHNL_OP_RESET_VF: i40e_vc_reset_vf(vf, false); ret = 0; break; case VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE: ret = i40e_vc_config_promiscuous_mode_msg(vf, msg); break; case VIRTCHNL_OP_CONFIG_VSI_QUEUES: ret = i40e_vc_config_queues_msg(vf, msg); break; case VIRTCHNL_OP_CONFIG_IRQ_MAP: ret = i40e_vc_config_irq_map_msg(vf, msg); break; case VIRTCHNL_OP_ENABLE_QUEUES: ret = i40e_vc_enable_queues_msg(vf, msg); i40e_vc_notify_vf_link_state(vf); break; case VIRTCHNL_OP_DISABLE_QUEUES: ret = i40e_vc_disable_queues_msg(vf, msg); break; case VIRTCHNL_OP_ADD_ETH_ADDR: ret = i40e_vc_add_mac_addr_msg(vf, msg); break; case VIRTCHNL_OP_DEL_ETH_ADDR: ret = i40e_vc_del_mac_addr_msg(vf, msg); break; case VIRTCHNL_OP_ADD_VLAN: ret = i40e_vc_add_vlan_msg(vf, msg); break; case VIRTCHNL_OP_DEL_VLAN: ret = i40e_vc_remove_vlan_msg(vf, msg); break; case VIRTCHNL_OP_GET_STATS: ret = i40e_vc_get_stats_msg(vf, msg); break; case VIRTCHNL_OP_RDMA: ret = i40e_vc_rdma_msg(vf, msg, msglen); break; case VIRTCHNL_OP_CONFIG_RDMA_IRQ_MAP: ret = i40e_vc_rdma_qvmap_msg(vf, msg, true); break; case VIRTCHNL_OP_RELEASE_RDMA_IRQ_MAP: ret = i40e_vc_rdma_qvmap_msg(vf, msg, false); break; case VIRTCHNL_OP_CONFIG_RSS_KEY: ret = i40e_vc_config_rss_key(vf, msg); break; case VIRTCHNL_OP_CONFIG_RSS_LUT: ret = i40e_vc_config_rss_lut(vf, msg); break; case VIRTCHNL_OP_GET_RSS_HENA_CAPS: ret = i40e_vc_get_rss_hena(vf, msg); break; case VIRTCHNL_OP_SET_RSS_HENA: ret = i40e_vc_set_rss_hena(vf, msg); break; case VIRTCHNL_OP_ENABLE_VLAN_STRIPPING: ret = i40e_vc_enable_vlan_stripping(vf, msg); break; case VIRTCHNL_OP_DISABLE_VLAN_STRIPPING: ret = i40e_vc_disable_vlan_stripping(vf, msg); break; case VIRTCHNL_OP_REQUEST_QUEUES: ret = i40e_vc_request_queues_msg(vf, msg); break; case VIRTCHNL_OP_ENABLE_CHANNELS: ret = i40e_vc_add_qch_msg(vf, msg); break; case VIRTCHNL_OP_DISABLE_CHANNELS: ret = i40e_vc_del_qch_msg(vf, msg); break; case VIRTCHNL_OP_ADD_CLOUD_FILTER: ret = i40e_vc_add_cloud_filter(vf, msg); break; case VIRTCHNL_OP_DEL_CLOUD_FILTER: ret = i40e_vc_del_cloud_filter(vf, msg); break; case VIRTCHNL_OP_UNKNOWN: default: dev_err(&pf->pdev->dev, "Unsupported opcode %d from VF %d\n", v_opcode, local_vf_id); ret = i40e_vc_send_resp_to_vf(vf, v_opcode, -EOPNOTSUPP); break; } return ret; } /** * i40e_vc_process_vflr_event * @pf: pointer to the PF structure * * called from the vlfr irq handler to * free up VF resources and state variables **/ int i40e_vc_process_vflr_event(struct i40e_pf *pf) { struct i40e_hw *hw = &pf->hw; u32 reg, reg_idx, bit_idx; struct i40e_vf *vf; int vf_id; if (!test_bit(__I40E_VFLR_EVENT_PENDING, pf->state)) return 0; /* Re-enable the VFLR interrupt cause here, before looking for which * VF got reset. Otherwise, if another VF gets a reset while the * first one is being processed, that interrupt will be lost, and * that VF will be stuck in reset forever. */ reg = rd32(hw, I40E_PFINT_ICR0_ENA); reg |= I40E_PFINT_ICR0_ENA_VFLR_MASK; wr32(hw, I40E_PFINT_ICR0_ENA, reg); i40e_flush(hw); clear_bit(__I40E_VFLR_EVENT_PENDING, pf->state); for (vf_id = 0; vf_id < pf->num_alloc_vfs; vf_id++) { reg_idx = (hw->func_caps.vf_base_id + vf_id) / 32; bit_idx = (hw->func_caps.vf_base_id + vf_id) % 32; /* read GLGEN_VFLRSTAT register to find out the flr VFs */ vf = &pf->vf[vf_id]; reg = rd32(hw, I40E_GLGEN_VFLRSTAT(reg_idx)); if (reg & BIT(bit_idx)) /* i40e_reset_vf will clear the bit in GLGEN_VFLRSTAT */ i40e_reset_vf(vf, true); } return 0; } /** * i40e_validate_vf * @pf: the physical function * @vf_id: VF identifier * * Check that the VF is enabled and the VSI exists. * * Returns 0 on success, negative on failure **/ static int i40e_validate_vf(struct i40e_pf *pf, int vf_id) { struct i40e_vsi *vsi; struct i40e_vf *vf; int ret = 0; if (vf_id >= pf->num_alloc_vfs) { dev_err(&pf->pdev->dev, "Invalid VF Identifier %d\n", vf_id); ret = -EINVAL; goto err_out; } vf = &pf->vf[vf_id]; vsi = i40e_find_vsi_from_id(pf, vf->lan_vsi_id); if (!vsi) ret = -EINVAL; err_out: return ret; } /** * i40e_check_vf_init_timeout * @vf: the virtual function * * Check that the VF's initialization was successfully done and if not * wait up to 300ms for its finish. * * Returns true when VF is initialized, false on timeout **/ static bool i40e_check_vf_init_timeout(struct i40e_vf *vf) { int i; /* When the VF is resetting wait until it is done. * It can take up to 200 milliseconds, but wait for * up to 300 milliseconds to be safe. */ for (i = 0; i < 15; i++) { if (test_bit(I40E_VF_STATE_INIT, &vf->vf_states)) return true; msleep(20); } if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states)) { dev_err(&vf->pf->pdev->dev, "VF %d still in reset. Try again.\n", vf->vf_id); return false; } return true; } /** * i40e_ndo_set_vf_mac * @netdev: network interface device structure * @vf_id: VF identifier * @mac: mac address * * program VF mac address **/ int i40e_ndo_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac) { struct i40e_netdev_priv *np = netdev_priv(netdev); struct i40e_vsi *vsi = np->vsi; struct i40e_pf *pf = vsi->back; struct i40e_mac_filter *f; struct i40e_vf *vf; int ret = 0; struct hlist_node *h; int bkt; if (test_and_set_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state)) { dev_warn(&pf->pdev->dev, "Unable to configure VFs, other operation is pending.\n"); return -EAGAIN; } /* validate the request */ ret = i40e_validate_vf(pf, vf_id); if (ret) goto error_param; vf = &pf->vf[vf_id]; if (!i40e_check_vf_init_timeout(vf)) { ret = -EAGAIN; goto error_param; } vsi = pf->vsi[vf->lan_vsi_idx]; if (is_multicast_ether_addr(mac)) { dev_err(&pf->pdev->dev, "Invalid Ethernet address %pM for VF %d\n", mac, vf_id); ret = -EINVAL; goto error_param; } /* Lock once because below invoked function add/del_filter requires * mac_filter_hash_lock to be held */ spin_lock_bh(&vsi->mac_filter_hash_lock); /* delete the temporary mac address */ if (!is_zero_ether_addr(vf->default_lan_addr.addr)) i40e_del_mac_filter(vsi, vf->default_lan_addr.addr); /* Delete all the filters for this VSI - we're going to kill it * anyway. */ hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) __i40e_del_filter(vsi, f); spin_unlock_bh(&vsi->mac_filter_hash_lock); /* program mac filter */ if (i40e_sync_vsi_filters(vsi)) { dev_err(&pf->pdev->dev, "Unable to program ucast filters\n"); ret = -EIO; goto error_param; } ether_addr_copy(vf->default_lan_addr.addr, mac); if (is_zero_ether_addr(mac)) { vf->pf_set_mac = false; dev_info(&pf->pdev->dev, "Removing MAC on VF %d\n", vf_id); } else { vf->pf_set_mac = true; dev_info(&pf->pdev->dev, "Setting MAC %pM on VF %d\n", mac, vf_id); } /* Force the VF interface down so it has to bring up with new MAC * address */ i40e_vc_reset_vf(vf, true); dev_info(&pf->pdev->dev, "Bring down and up the VF interface to make this change effective.\n"); error_param: clear_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state); return ret; } /** * i40e_ndo_set_vf_port_vlan * @netdev: network interface device structure * @vf_id: VF identifier * @vlan_id: mac address * @qos: priority setting * @vlan_proto: vlan protocol * * program VF vlan id and/or qos **/ int i40e_ndo_set_vf_port_vlan(struct net_device *netdev, int vf_id, u16 vlan_id, u8 qos, __be16 vlan_proto) { u16 vlanprio = vlan_id | (qos << I40E_VLAN_PRIORITY_SHIFT); struct i40e_netdev_priv *np = netdev_priv(netdev); bool allmulti = false, alluni = false; struct i40e_pf *pf = np->vsi->back; struct i40e_vsi *vsi; struct i40e_vf *vf; int ret = 0; if (test_and_set_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state)) { dev_warn(&pf->pdev->dev, "Unable to configure VFs, other operation is pending.\n"); return -EAGAIN; } /* validate the request */ ret = i40e_validate_vf(pf, vf_id); if (ret) goto error_pvid; if ((vlan_id > I40E_MAX_VLANID) || (qos > 7)) { dev_err(&pf->pdev->dev, "Invalid VF Parameters\n"); ret = -EINVAL; goto error_pvid; } if (vlan_proto != htons(ETH_P_8021Q)) { dev_err(&pf->pdev->dev, "VF VLAN protocol is not supported\n"); ret = -EPROTONOSUPPORT; goto error_pvid; } vf = &pf->vf[vf_id]; if (!i40e_check_vf_init_timeout(vf)) { ret = -EAGAIN; goto error_pvid; } vsi = pf->vsi[vf->lan_vsi_idx]; if (le16_to_cpu(vsi->info.pvid) == vlanprio) /* duplicate request, so just return success */ goto error_pvid; i40e_vlan_stripping_enable(vsi); /* Locked once because multiple functions below iterate list */ spin_lock_bh(&vsi->mac_filter_hash_lock); /* Check for condition where there was already a port VLAN ID * filter set and now it is being deleted by setting it to zero. * Additionally check for the condition where there was a port * VLAN but now there is a new and different port VLAN being set. * Before deleting all the old VLAN filters we must add new ones * with -1 (I40E_VLAN_ANY) or otherwise we're left with all our * MAC addresses deleted. */ if ((!(vlan_id || qos) || vlanprio != le16_to_cpu(vsi->info.pvid)) && vsi->info.pvid) { ret = i40e_add_vlan_all_mac(vsi, I40E_VLAN_ANY); if (ret) { dev_info(&vsi->back->pdev->dev, "add VF VLAN failed, ret=%d aq_err=%d\n", ret, vsi->back->hw.aq.asq_last_status); spin_unlock_bh(&vsi->mac_filter_hash_lock); goto error_pvid; } } if (vsi->info.pvid) { /* remove all filters on the old VLAN */ i40e_rm_vlan_all_mac(vsi, (le16_to_cpu(vsi->info.pvid) & VLAN_VID_MASK)); } spin_unlock_bh(&vsi->mac_filter_hash_lock); /* disable promisc modes in case they were enabled */ ret = i40e_config_vf_promiscuous_mode(vf, vf->lan_vsi_id, allmulti, alluni); if (ret) { dev_err(&pf->pdev->dev, "Unable to config VF promiscuous mode\n"); goto error_pvid; } if (vlan_id || qos) ret = i40e_vsi_add_pvid(vsi, vlanprio); else i40e_vsi_remove_pvid(vsi); spin_lock_bh(&vsi->mac_filter_hash_lock); if (vlan_id) { dev_info(&pf->pdev->dev, "Setting VLAN %d, QOS 0x%x on VF %d\n", vlan_id, qos, vf_id); /* add new VLAN filter for each MAC */ ret = i40e_add_vlan_all_mac(vsi, vlan_id); if (ret) { dev_info(&vsi->back->pdev->dev, "add VF VLAN failed, ret=%d aq_err=%d\n", ret, vsi->back->hw.aq.asq_last_status); spin_unlock_bh(&vsi->mac_filter_hash_lock); goto error_pvid; } /* remove the previously added non-VLAN MAC filters */ i40e_rm_vlan_all_mac(vsi, I40E_VLAN_ANY); } spin_unlock_bh(&vsi->mac_filter_hash_lock); if (test_bit(I40E_VF_STATE_UC_PROMISC, &vf->vf_states)) alluni = true; if (test_bit(I40E_VF_STATE_MC_PROMISC, &vf->vf_states)) allmulti = true; /* Schedule the worker thread to take care of applying changes */ i40e_service_event_schedule(vsi->back); if (ret) { dev_err(&pf->pdev->dev, "Unable to update VF vsi context\n"); goto error_pvid; } /* The Port VLAN needs to be saved across resets the same as the * default LAN MAC address. */ vf->port_vlan_id = le16_to_cpu(vsi->info.pvid); i40e_vc_reset_vf(vf, true); /* During reset the VF got a new VSI, so refresh a pointer. */ vsi = pf->vsi[vf->lan_vsi_idx]; ret = i40e_config_vf_promiscuous_mode(vf, vsi->id, allmulti, alluni); if (ret) { dev_err(&pf->pdev->dev, "Unable to config vf promiscuous mode\n"); goto error_pvid; } ret = 0; error_pvid: clear_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state); return ret; } /** * i40e_ndo_set_vf_bw * @netdev: network interface device structure * @vf_id: VF identifier * @min_tx_rate: Minimum Tx rate * @max_tx_rate: Maximum Tx rate * * configure VF Tx rate **/ int i40e_ndo_set_vf_bw(struct net_device *netdev, int vf_id, int min_tx_rate, int max_tx_rate) { struct i40e_netdev_priv *np = netdev_priv(netdev); struct i40e_pf *pf = np->vsi->back; struct i40e_vsi *vsi; struct i40e_vf *vf; int ret = 0; if (test_and_set_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state)) { dev_warn(&pf->pdev->dev, "Unable to configure VFs, other operation is pending.\n"); return -EAGAIN; } /* validate the request */ ret = i40e_validate_vf(pf, vf_id); if (ret) goto error; if (min_tx_rate) { dev_err(&pf->pdev->dev, "Invalid min tx rate (%d) (greater than 0) specified for VF %d.\n", min_tx_rate, vf_id); ret = -EINVAL; goto error; } vf = &pf->vf[vf_id]; if (!i40e_check_vf_init_timeout(vf)) { ret = -EAGAIN; goto error; } vsi = pf->vsi[vf->lan_vsi_idx]; ret = i40e_set_bw_limit(vsi, vsi->seid, max_tx_rate); if (ret) goto error; vf->tx_rate = max_tx_rate; error: clear_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state); return ret; } /** * i40e_ndo_get_vf_config * @netdev: network interface device structure * @vf_id: VF identifier * @ivi: VF configuration structure * * return VF configuration **/ int i40e_ndo_get_vf_config(struct net_device *netdev, int vf_id, struct ifla_vf_info *ivi) { struct i40e_netdev_priv *np = netdev_priv(netdev); struct i40e_vsi *vsi = np->vsi; struct i40e_pf *pf = vsi->back; struct i40e_vf *vf; int ret = 0; if (test_and_set_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state)) { dev_warn(&pf->pdev->dev, "Unable to configure VFs, other operation is pending.\n"); return -EAGAIN; } /* validate the request */ ret = i40e_validate_vf(pf, vf_id); if (ret) goto error_param; vf = &pf->vf[vf_id]; /* first vsi is always the LAN vsi */ vsi = pf->vsi[vf->lan_vsi_idx]; if (!vsi) { ret = -ENOENT; goto error_param; } ivi->vf = vf_id; ether_addr_copy(ivi->mac, vf->default_lan_addr.addr); ivi->max_tx_rate = vf->tx_rate; ivi->min_tx_rate = 0; ivi->vlan = le16_to_cpu(vsi->info.pvid) & I40E_VLAN_MASK; ivi->qos = (le16_to_cpu(vsi->info.pvid) & I40E_PRIORITY_MASK) >> I40E_VLAN_PRIORITY_SHIFT; if (vf->link_forced == false) ivi->linkstate = IFLA_VF_LINK_STATE_AUTO; else if (vf->link_up == true) ivi->linkstate = IFLA_VF_LINK_STATE_ENABLE; else ivi->linkstate = IFLA_VF_LINK_STATE_DISABLE; ivi->spoofchk = vf->spoofchk; ivi->trusted = vf->trusted; ret = 0; error_param: clear_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state); return ret; } /** * i40e_ndo_set_vf_link_state * @netdev: network interface device structure * @vf_id: VF identifier * @link: required link state * * Set the link state of a specified VF, regardless of physical link state **/ int i40e_ndo_set_vf_link_state(struct net_device *netdev, int vf_id, int link) { struct i40e_netdev_priv *np = netdev_priv(netdev); struct i40e_pf *pf = np->vsi->back; struct i40e_link_status *ls = &pf->hw.phy.link_info; struct virtchnl_pf_event pfe; struct i40e_hw *hw = &pf->hw; struct i40e_vf *vf; int abs_vf_id; int ret = 0; if (test_and_set_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state)) { dev_warn(&pf->pdev->dev, "Unable to configure VFs, other operation is pending.\n"); return -EAGAIN; } /* validate the request */ if (vf_id >= pf->num_alloc_vfs) { dev_err(&pf->pdev->dev, "Invalid VF Identifier %d\n", vf_id); ret = -EINVAL; goto error_out; } vf = &pf->vf[vf_id]; abs_vf_id = vf->vf_id + hw->func_caps.vf_base_id; pfe.event = VIRTCHNL_EVENT_LINK_CHANGE; pfe.severity = PF_EVENT_SEVERITY_INFO; switch (link) { case IFLA_VF_LINK_STATE_AUTO: vf->link_forced = false; i40e_set_vf_link_state(vf, &pfe, ls); break; case IFLA_VF_LINK_STATE_ENABLE: vf->link_forced = true; vf->link_up = true; i40e_set_vf_link_state(vf, &pfe, ls); break; case IFLA_VF_LINK_STATE_DISABLE: vf->link_forced = true; vf->link_up = false; i40e_set_vf_link_state(vf, &pfe, ls); break; default: ret = -EINVAL; goto error_out; } /* Notify the VF of its new link state */ i40e_aq_send_msg_to_vf(hw, abs_vf_id, VIRTCHNL_OP_EVENT, 0, (u8 *)&pfe, sizeof(pfe), NULL); error_out: clear_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state); return ret; } /** * i40e_ndo_set_vf_spoofchk * @netdev: network interface device structure * @vf_id: VF identifier * @enable: flag to enable or disable feature * * Enable or disable VF spoof checking **/ int i40e_ndo_set_vf_spoofchk(struct net_device *netdev, int vf_id, bool enable) { struct i40e_netdev_priv *np = netdev_priv(netdev); struct i40e_vsi *vsi = np->vsi; struct i40e_pf *pf = vsi->back; struct i40e_vsi_context ctxt; struct i40e_hw *hw = &pf->hw; struct i40e_vf *vf; int ret = 0; if (test_and_set_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state)) { dev_warn(&pf->pdev->dev, "Unable to configure VFs, other operation is pending.\n"); return -EAGAIN; } /* validate the request */ if (vf_id >= pf->num_alloc_vfs) { dev_err(&pf->pdev->dev, "Invalid VF Identifier %d\n", vf_id); ret = -EINVAL; goto out; } vf = &(pf->vf[vf_id]); if (!i40e_check_vf_init_timeout(vf)) { ret = -EAGAIN; goto out; } if (enable == vf->spoofchk) goto out; vf->spoofchk = enable; memset(&ctxt, 0, sizeof(ctxt)); ctxt.seid = pf->vsi[vf->lan_vsi_idx]->seid; ctxt.pf_num = pf->hw.pf_id; ctxt.info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_SECURITY_VALID); if (enable) ctxt.info.sec_flags |= (I40E_AQ_VSI_SEC_FLAG_ENABLE_VLAN_CHK | I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK); ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL); if (ret) { dev_err(&pf->pdev->dev, "Error %d updating VSI parameters\n", ret); ret = -EIO; } out: clear_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state); return ret; } /** * i40e_ndo_set_vf_trust * @netdev: network interface device structure of the pf * @vf_id: VF identifier * @setting: trust setting * * Enable or disable VF trust setting **/ int i40e_ndo_set_vf_trust(struct net_device *netdev, int vf_id, bool setting) { struct i40e_netdev_priv *np = netdev_priv(netdev); struct i40e_pf *pf = np->vsi->back; struct i40e_vf *vf; int ret = 0; if (test_and_set_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state)) { dev_warn(&pf->pdev->dev, "Unable to configure VFs, other operation is pending.\n"); return -EAGAIN; } /* validate the request */ if (vf_id >= pf->num_alloc_vfs) { dev_err(&pf->pdev->dev, "Invalid VF Identifier %d\n", vf_id); ret = -EINVAL; goto out; } if (pf->flags & I40E_FLAG_MFP_ENABLED) { dev_err(&pf->pdev->dev, "Trusted VF not supported in MFP mode.\n"); ret = -EINVAL; goto out; } vf = &pf->vf[vf_id]; if (setting == vf->trusted) goto out; vf->trusted = setting; /* request PF to sync mac/vlan filters for the VF */ set_bit(__I40E_MACVLAN_SYNC_PENDING, pf->state); pf->vsi[vf->lan_vsi_idx]->flags |= I40E_VSI_FLAG_FILTER_CHANGED; i40e_vc_reset_vf(vf, true); dev_info(&pf->pdev->dev, "VF %u is now %strusted\n", vf_id, setting ? "" : "un"); if (vf->adq_enabled) { if (!vf->trusted) { dev_info(&pf->pdev->dev, "VF %u no longer Trusted, deleting all cloud filters\n", vf_id); i40e_del_all_cloud_filters(vf); } } out: clear_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state); return ret; } /** * i40e_get_vf_stats - populate some stats for the VF * @netdev: the netdev of the PF * @vf_id: the host OS identifier (0-127) * @vf_stats: pointer to the OS memory to be initialized */ int i40e_get_vf_stats(struct net_device *netdev, int vf_id, struct ifla_vf_stats *vf_stats) { struct i40e_netdev_priv *np = netdev_priv(netdev); struct i40e_pf *pf = np->vsi->back; struct i40e_eth_stats *stats; struct i40e_vsi *vsi; struct i40e_vf *vf; /* validate the request */ if (i40e_validate_vf(pf, vf_id)) return -EINVAL; vf = &pf->vf[vf_id]; if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states)) { dev_err(&pf->pdev->dev, "VF %d in reset. Try again.\n", vf_id); return -EBUSY; } vsi = pf->vsi[vf->lan_vsi_idx]; if (!vsi) return -EINVAL; i40e_update_eth_stats(vsi); stats = &vsi->eth_stats; memset(vf_stats, 0, sizeof(*vf_stats)); vf_stats->rx_packets = stats->rx_unicast + stats->rx_broadcast + stats->rx_multicast; vf_stats->tx_packets = stats->tx_unicast + stats->tx_broadcast + stats->tx_multicast; vf_stats->rx_bytes = stats->rx_bytes; vf_stats->tx_bytes = stats->tx_bytes; vf_stats->broadcast = stats->rx_broadcast; vf_stats->multicast = stats->rx_multicast; vf_stats->rx_dropped = stats->rx_discards; vf_stats->tx_dropped = stats->tx_discards; return 0; }
linux-master
drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
// SPDX-License-Identifier: GPL-2.0 /* Copyright(c) 2013 - 2018 Intel Corporation. */ #include "i40e.h" #include "i40e_osdep.h" #include "i40e_register.h" #include "i40e_type.h" #include "i40e_hmc.h" #include "i40e_lan_hmc.h" #include "i40e_prototype.h" /* lan specific interface functions */ /** * i40e_align_l2obj_base - aligns base object pointer to 512 bytes * @offset: base address offset needing alignment * * Aligns the layer 2 function private memory so it's 512-byte aligned. **/ static u64 i40e_align_l2obj_base(u64 offset) { u64 aligned_offset = offset; if ((offset % I40E_HMC_L2OBJ_BASE_ALIGNMENT) > 0) aligned_offset += (I40E_HMC_L2OBJ_BASE_ALIGNMENT - (offset % I40E_HMC_L2OBJ_BASE_ALIGNMENT)); return aligned_offset; } /** * i40e_calculate_l2fpm_size - calculates layer 2 FPM memory size * @txq_num: number of Tx queues needing backing context * @rxq_num: number of Rx queues needing backing context * @fcoe_cntx_num: amount of FCoE statefull contexts needing backing context * @fcoe_filt_num: number of FCoE filters needing backing context * * Calculates the maximum amount of memory for the function required, based * on the number of resources it must provide context for. **/ static u64 i40e_calculate_l2fpm_size(u32 txq_num, u32 rxq_num, u32 fcoe_cntx_num, u32 fcoe_filt_num) { u64 fpm_size = 0; fpm_size = txq_num * I40E_HMC_OBJ_SIZE_TXQ; fpm_size = i40e_align_l2obj_base(fpm_size); fpm_size += (rxq_num * I40E_HMC_OBJ_SIZE_RXQ); fpm_size = i40e_align_l2obj_base(fpm_size); fpm_size += (fcoe_cntx_num * I40E_HMC_OBJ_SIZE_FCOE_CNTX); fpm_size = i40e_align_l2obj_base(fpm_size); fpm_size += (fcoe_filt_num * I40E_HMC_OBJ_SIZE_FCOE_FILT); fpm_size = i40e_align_l2obj_base(fpm_size); return fpm_size; } /** * i40e_init_lan_hmc - initialize i40e_hmc_info struct * @hw: pointer to the HW structure * @txq_num: number of Tx queues needing backing context * @rxq_num: number of Rx queues needing backing context * @fcoe_cntx_num: amount of FCoE statefull contexts needing backing context * @fcoe_filt_num: number of FCoE filters needing backing context * * This function will be called once per physical function initialization. * It will fill out the i40e_hmc_obj_info structure for LAN objects based on * the driver's provided input, as well as information from the HMC itself * loaded from NVRAM. * * Assumptions: * - HMC Resource Profile has been selected before calling this function. **/ int i40e_init_lan_hmc(struct i40e_hw *hw, u32 txq_num, u32 rxq_num, u32 fcoe_cntx_num, u32 fcoe_filt_num) { struct i40e_hmc_obj_info *obj, *full_obj; int ret_code = 0; u64 l2fpm_size; u32 size_exp; hw->hmc.signature = I40E_HMC_INFO_SIGNATURE; hw->hmc.hmc_fn_id = hw->pf_id; /* allocate memory for hmc_obj */ ret_code = i40e_allocate_virt_mem(hw, &hw->hmc.hmc_obj_virt_mem, sizeof(struct i40e_hmc_obj_info) * I40E_HMC_LAN_MAX); if (ret_code) goto init_lan_hmc_out; hw->hmc.hmc_obj = (struct i40e_hmc_obj_info *) hw->hmc.hmc_obj_virt_mem.va; /* The full object will be used to create the LAN HMC SD */ full_obj = &hw->hmc.hmc_obj[I40E_HMC_LAN_FULL]; full_obj->max_cnt = 0; full_obj->cnt = 0; full_obj->base = 0; full_obj->size = 0; /* Tx queue context information */ obj = &hw->hmc.hmc_obj[I40E_HMC_LAN_TX]; obj->max_cnt = rd32(hw, I40E_GLHMC_LANQMAX); obj->cnt = txq_num; obj->base = 0; size_exp = rd32(hw, I40E_GLHMC_LANTXOBJSZ); obj->size = BIT_ULL(size_exp); /* validate values requested by driver don't exceed HMC capacity */ if (txq_num > obj->max_cnt) { ret_code = -EINVAL; hw_dbg(hw, "i40e_init_lan_hmc: Tx context: asks for 0x%x but max allowed is 0x%x, returns error %d\n", txq_num, obj->max_cnt, ret_code); goto init_lan_hmc_out; } /* aggregate values into the full LAN object for later */ full_obj->max_cnt += obj->max_cnt; full_obj->cnt += obj->cnt; /* Rx queue context information */ obj = &hw->hmc.hmc_obj[I40E_HMC_LAN_RX]; obj->max_cnt = rd32(hw, I40E_GLHMC_LANQMAX); obj->cnt = rxq_num; obj->base = hw->hmc.hmc_obj[I40E_HMC_LAN_TX].base + (hw->hmc.hmc_obj[I40E_HMC_LAN_TX].cnt * hw->hmc.hmc_obj[I40E_HMC_LAN_TX].size); obj->base = i40e_align_l2obj_base(obj->base); size_exp = rd32(hw, I40E_GLHMC_LANRXOBJSZ); obj->size = BIT_ULL(size_exp); /* validate values requested by driver don't exceed HMC capacity */ if (rxq_num > obj->max_cnt) { ret_code = -EINVAL; hw_dbg(hw, "i40e_init_lan_hmc: Rx context: asks for 0x%x but max allowed is 0x%x, returns error %d\n", rxq_num, obj->max_cnt, ret_code); goto init_lan_hmc_out; } /* aggregate values into the full LAN object for later */ full_obj->max_cnt += obj->max_cnt; full_obj->cnt += obj->cnt; /* FCoE context information */ obj = &hw->hmc.hmc_obj[I40E_HMC_FCOE_CTX]; obj->max_cnt = rd32(hw, I40E_GLHMC_FCOEMAX); obj->cnt = fcoe_cntx_num; obj->base = hw->hmc.hmc_obj[I40E_HMC_LAN_RX].base + (hw->hmc.hmc_obj[I40E_HMC_LAN_RX].cnt * hw->hmc.hmc_obj[I40E_HMC_LAN_RX].size); obj->base = i40e_align_l2obj_base(obj->base); size_exp = rd32(hw, I40E_GLHMC_FCOEDDPOBJSZ); obj->size = BIT_ULL(size_exp); /* validate values requested by driver don't exceed HMC capacity */ if (fcoe_cntx_num > obj->max_cnt) { ret_code = -EINVAL; hw_dbg(hw, "i40e_init_lan_hmc: FCoE context: asks for 0x%x but max allowed is 0x%x, returns error %d\n", fcoe_cntx_num, obj->max_cnt, ret_code); goto init_lan_hmc_out; } /* aggregate values into the full LAN object for later */ full_obj->max_cnt += obj->max_cnt; full_obj->cnt += obj->cnt; /* FCoE filter information */ obj = &hw->hmc.hmc_obj[I40E_HMC_FCOE_FILT]; obj->max_cnt = rd32(hw, I40E_GLHMC_FCOEFMAX); obj->cnt = fcoe_filt_num; obj->base = hw->hmc.hmc_obj[I40E_HMC_FCOE_CTX].base + (hw->hmc.hmc_obj[I40E_HMC_FCOE_CTX].cnt * hw->hmc.hmc_obj[I40E_HMC_FCOE_CTX].size); obj->base = i40e_align_l2obj_base(obj->base); size_exp = rd32(hw, I40E_GLHMC_FCOEFOBJSZ); obj->size = BIT_ULL(size_exp); /* validate values requested by driver don't exceed HMC capacity */ if (fcoe_filt_num > obj->max_cnt) { ret_code = -EINVAL; hw_dbg(hw, "i40e_init_lan_hmc: FCoE filter: asks for 0x%x but max allowed is 0x%x, returns error %d\n", fcoe_filt_num, obj->max_cnt, ret_code); goto init_lan_hmc_out; } /* aggregate values into the full LAN object for later */ full_obj->max_cnt += obj->max_cnt; full_obj->cnt += obj->cnt; hw->hmc.first_sd_index = 0; hw->hmc.sd_table.ref_cnt = 0; l2fpm_size = i40e_calculate_l2fpm_size(txq_num, rxq_num, fcoe_cntx_num, fcoe_filt_num); if (NULL == hw->hmc.sd_table.sd_entry) { hw->hmc.sd_table.sd_cnt = (u32) (l2fpm_size + I40E_HMC_DIRECT_BP_SIZE - 1) / I40E_HMC_DIRECT_BP_SIZE; /* allocate the sd_entry members in the sd_table */ ret_code = i40e_allocate_virt_mem(hw, &hw->hmc.sd_table.addr, (sizeof(struct i40e_hmc_sd_entry) * hw->hmc.sd_table.sd_cnt)); if (ret_code) goto init_lan_hmc_out; hw->hmc.sd_table.sd_entry = (struct i40e_hmc_sd_entry *)hw->hmc.sd_table.addr.va; } /* store in the LAN full object for later */ full_obj->size = l2fpm_size; init_lan_hmc_out: return ret_code; } /** * i40e_remove_pd_page - Remove a page from the page descriptor table * @hw: pointer to the HW structure * @hmc_info: pointer to the HMC configuration information structure * @idx: segment descriptor index to find the relevant page descriptor * * This function: * 1. Marks the entry in pd table (for paged address mode) invalid * 2. write to register PMPDINV to invalidate the backing page in FV cache * 3. Decrement the ref count for pd_entry * assumptions: * 1. caller can deallocate the memory used by pd after this function * returns. **/ static int i40e_remove_pd_page(struct i40e_hw *hw, struct i40e_hmc_info *hmc_info, u32 idx) { int ret_code = 0; if (!i40e_prep_remove_pd_page(hmc_info, idx)) ret_code = i40e_remove_pd_page_new(hw, hmc_info, idx, true); return ret_code; } /** * i40e_remove_sd_bp - remove a backing page from a segment descriptor * @hw: pointer to our HW structure * @hmc_info: pointer to the HMC configuration information structure * @idx: the page index * * This function: * 1. Marks the entry in sd table (for direct address mode) invalid * 2. write to register PMSDCMD, PMSDDATALOW(PMSDDATALOW.PMSDVALID set * to 0) and PMSDDATAHIGH to invalidate the sd page * 3. Decrement the ref count for the sd_entry * assumptions: * 1. caller can deallocate the memory used by backing storage after this * function returns. **/ static int i40e_remove_sd_bp(struct i40e_hw *hw, struct i40e_hmc_info *hmc_info, u32 idx) { int ret_code = 0; if (!i40e_prep_remove_sd_bp(hmc_info, idx)) ret_code = i40e_remove_sd_bp_new(hw, hmc_info, idx, true); return ret_code; } /** * i40e_create_lan_hmc_object - allocate backing store for hmc objects * @hw: pointer to the HW structure * @info: pointer to i40e_hmc_create_obj_info struct * * This will allocate memory for PDs and backing pages and populate * the sd and pd entries. **/ static int i40e_create_lan_hmc_object(struct i40e_hw *hw, struct i40e_hmc_lan_create_obj_info *info) { struct i40e_hmc_sd_entry *sd_entry; u32 pd_idx1 = 0, pd_lmt1 = 0; u32 pd_idx = 0, pd_lmt = 0; bool pd_error = false; u32 sd_idx, sd_lmt; int ret_code = 0; u64 sd_size; u32 i, j; if (NULL == info) { ret_code = -EINVAL; hw_dbg(hw, "i40e_create_lan_hmc_object: bad info ptr\n"); goto exit; } if (NULL == info->hmc_info) { ret_code = -EINVAL; hw_dbg(hw, "i40e_create_lan_hmc_object: bad hmc_info ptr\n"); goto exit; } if (I40E_HMC_INFO_SIGNATURE != info->hmc_info->signature) { ret_code = -EINVAL; hw_dbg(hw, "i40e_create_lan_hmc_object: bad signature\n"); goto exit; } if (info->start_idx >= info->hmc_info->hmc_obj[info->rsrc_type].cnt) { ret_code = -EINVAL; hw_dbg(hw, "i40e_create_lan_hmc_object: returns error %d\n", ret_code); goto exit; } if ((info->start_idx + info->count) > info->hmc_info->hmc_obj[info->rsrc_type].cnt) { ret_code = -EINVAL; hw_dbg(hw, "i40e_create_lan_hmc_object: returns error %d\n", ret_code); goto exit; } /* find sd index and limit */ I40E_FIND_SD_INDEX_LIMIT(info->hmc_info, info->rsrc_type, info->start_idx, info->count, &sd_idx, &sd_lmt); if (sd_idx >= info->hmc_info->sd_table.sd_cnt || sd_lmt > info->hmc_info->sd_table.sd_cnt) { ret_code = -EINVAL; goto exit; } /* find pd index */ I40E_FIND_PD_INDEX_LIMIT(info->hmc_info, info->rsrc_type, info->start_idx, info->count, &pd_idx, &pd_lmt); /* This is to cover for cases where you may not want to have an SD with * the full 2M memory but something smaller. By not filling out any * size, the function will default the SD size to be 2M. */ if (info->direct_mode_sz == 0) sd_size = I40E_HMC_DIRECT_BP_SIZE; else sd_size = info->direct_mode_sz; /* check if all the sds are valid. If not, allocate a page and * initialize it. */ for (j = sd_idx; j < sd_lmt; j++) { /* update the sd table entry */ ret_code = i40e_add_sd_table_entry(hw, info->hmc_info, j, info->entry_type, sd_size); if (ret_code) goto exit_sd_error; sd_entry = &info->hmc_info->sd_table.sd_entry[j]; if (I40E_SD_TYPE_PAGED == sd_entry->entry_type) { /* check if all the pds in this sd are valid. If not, * allocate a page and initialize it. */ /* find pd_idx and pd_lmt in this sd */ pd_idx1 = max(pd_idx, (j * I40E_HMC_MAX_BP_COUNT)); pd_lmt1 = min(pd_lmt, ((j + 1) * I40E_HMC_MAX_BP_COUNT)); for (i = pd_idx1; i < pd_lmt1; i++) { /* update the pd table entry */ ret_code = i40e_add_pd_table_entry(hw, info->hmc_info, i, NULL); if (ret_code) { pd_error = true; break; } } if (pd_error) { /* remove the backing pages from pd_idx1 to i */ while (i && (i > pd_idx1)) { i40e_remove_pd_bp(hw, info->hmc_info, (i - 1)); i--; } } } if (!sd_entry->valid) { sd_entry->valid = true; switch (sd_entry->entry_type) { case I40E_SD_TYPE_PAGED: I40E_SET_PF_SD_ENTRY(hw, sd_entry->u.pd_table.pd_page_addr.pa, j, sd_entry->entry_type); break; case I40E_SD_TYPE_DIRECT: I40E_SET_PF_SD_ENTRY(hw, sd_entry->u.bp.addr.pa, j, sd_entry->entry_type); break; default: ret_code = -EINVAL; goto exit; } } } goto exit; exit_sd_error: /* cleanup for sd entries from j to sd_idx */ while (j && (j > sd_idx)) { sd_entry = &info->hmc_info->sd_table.sd_entry[j - 1]; switch (sd_entry->entry_type) { case I40E_SD_TYPE_PAGED: pd_idx1 = max(pd_idx, ((j - 1) * I40E_HMC_MAX_BP_COUNT)); pd_lmt1 = min(pd_lmt, (j * I40E_HMC_MAX_BP_COUNT)); for (i = pd_idx1; i < pd_lmt1; i++) i40e_remove_pd_bp(hw, info->hmc_info, i); i40e_remove_pd_page(hw, info->hmc_info, (j - 1)); break; case I40E_SD_TYPE_DIRECT: i40e_remove_sd_bp(hw, info->hmc_info, (j - 1)); break; default: ret_code = -EINVAL; break; } j--; } exit: return ret_code; } /** * i40e_configure_lan_hmc - prepare the HMC backing store * @hw: pointer to the hw structure * @model: the model for the layout of the SD/PD tables * * - This function will be called once per physical function initialization. * - This function will be called after i40e_init_lan_hmc() and before * any LAN/FCoE HMC objects can be created. **/ int i40e_configure_lan_hmc(struct i40e_hw *hw, enum i40e_hmc_model model) { struct i40e_hmc_lan_create_obj_info info; u8 hmc_fn_id = hw->hmc.hmc_fn_id; struct i40e_hmc_obj_info *obj; int ret_code = 0; /* Initialize part of the create object info struct */ info.hmc_info = &hw->hmc; info.rsrc_type = I40E_HMC_LAN_FULL; info.start_idx = 0; info.direct_mode_sz = hw->hmc.hmc_obj[I40E_HMC_LAN_FULL].size; /* Build the SD entry for the LAN objects */ switch (model) { case I40E_HMC_MODEL_DIRECT_PREFERRED: case I40E_HMC_MODEL_DIRECT_ONLY: info.entry_type = I40E_SD_TYPE_DIRECT; /* Make one big object, a single SD */ info.count = 1; ret_code = i40e_create_lan_hmc_object(hw, &info); if (ret_code && (model == I40E_HMC_MODEL_DIRECT_PREFERRED)) goto try_type_paged; else if (ret_code) goto configure_lan_hmc_out; /* else clause falls through the break */ break; case I40E_HMC_MODEL_PAGED_ONLY: try_type_paged: info.entry_type = I40E_SD_TYPE_PAGED; /* Make one big object in the PD table */ info.count = 1; ret_code = i40e_create_lan_hmc_object(hw, &info); if (ret_code) goto configure_lan_hmc_out; break; default: /* unsupported type */ ret_code = -EINVAL; hw_dbg(hw, "i40e_configure_lan_hmc: Unknown SD type: %d\n", ret_code); goto configure_lan_hmc_out; } /* Configure and program the FPM registers so objects can be created */ /* Tx contexts */ obj = &hw->hmc.hmc_obj[I40E_HMC_LAN_TX]; wr32(hw, I40E_GLHMC_LANTXBASE(hmc_fn_id), (u32)((obj->base & I40E_GLHMC_LANTXBASE_FPMLANTXBASE_MASK) / 512)); wr32(hw, I40E_GLHMC_LANTXCNT(hmc_fn_id), obj->cnt); /* Rx contexts */ obj = &hw->hmc.hmc_obj[I40E_HMC_LAN_RX]; wr32(hw, I40E_GLHMC_LANRXBASE(hmc_fn_id), (u32)((obj->base & I40E_GLHMC_LANRXBASE_FPMLANRXBASE_MASK) / 512)); wr32(hw, I40E_GLHMC_LANRXCNT(hmc_fn_id), obj->cnt); /* FCoE contexts */ obj = &hw->hmc.hmc_obj[I40E_HMC_FCOE_CTX]; wr32(hw, I40E_GLHMC_FCOEDDPBASE(hmc_fn_id), (u32)((obj->base & I40E_GLHMC_FCOEDDPBASE_FPMFCOEDDPBASE_MASK) / 512)); wr32(hw, I40E_GLHMC_FCOEDDPCNT(hmc_fn_id), obj->cnt); /* FCoE filters */ obj = &hw->hmc.hmc_obj[I40E_HMC_FCOE_FILT]; wr32(hw, I40E_GLHMC_FCOEFBASE(hmc_fn_id), (u32)((obj->base & I40E_GLHMC_FCOEFBASE_FPMFCOEFBASE_MASK) / 512)); wr32(hw, I40E_GLHMC_FCOEFCNT(hmc_fn_id), obj->cnt); configure_lan_hmc_out: return ret_code; } /** * i40e_delete_lan_hmc_object - remove hmc objects * @hw: pointer to the HW structure * @info: pointer to i40e_hmc_delete_obj_info struct * * This will de-populate the SDs and PDs. It frees * the memory for PDS and backing storage. After this function is returned, * caller should deallocate memory allocated previously for * book-keeping information about PDs and backing storage. **/ static int i40e_delete_lan_hmc_object(struct i40e_hw *hw, struct i40e_hmc_lan_delete_obj_info *info) { struct i40e_hmc_pd_table *pd_table; u32 pd_idx, pd_lmt, rel_pd_idx; u32 sd_idx, sd_lmt; int ret_code = 0; u32 i, j; if (NULL == info) { ret_code = -EINVAL; hw_dbg(hw, "i40e_delete_hmc_object: bad info ptr\n"); goto exit; } if (NULL == info->hmc_info) { ret_code = -EINVAL; hw_dbg(hw, "i40e_delete_hmc_object: bad info->hmc_info ptr\n"); goto exit; } if (I40E_HMC_INFO_SIGNATURE != info->hmc_info->signature) { ret_code = -EINVAL; hw_dbg(hw, "i40e_delete_hmc_object: bad hmc_info->signature\n"); goto exit; } if (NULL == info->hmc_info->sd_table.sd_entry) { ret_code = -EINVAL; hw_dbg(hw, "i40e_delete_hmc_object: bad sd_entry\n"); goto exit; } if (NULL == info->hmc_info->hmc_obj) { ret_code = -EINVAL; hw_dbg(hw, "i40e_delete_hmc_object: bad hmc_info->hmc_obj\n"); goto exit; } if (info->start_idx >= info->hmc_info->hmc_obj[info->rsrc_type].cnt) { ret_code = -EINVAL; hw_dbg(hw, "i40e_delete_hmc_object: returns error %d\n", ret_code); goto exit; } if ((info->start_idx + info->count) > info->hmc_info->hmc_obj[info->rsrc_type].cnt) { ret_code = -EINVAL; hw_dbg(hw, "i40e_delete_hmc_object: returns error %d\n", ret_code); goto exit; } I40E_FIND_PD_INDEX_LIMIT(info->hmc_info, info->rsrc_type, info->start_idx, info->count, &pd_idx, &pd_lmt); for (j = pd_idx; j < pd_lmt; j++) { sd_idx = j / I40E_HMC_PD_CNT_IN_SD; if (I40E_SD_TYPE_PAGED != info->hmc_info->sd_table.sd_entry[sd_idx].entry_type) continue; rel_pd_idx = j % I40E_HMC_PD_CNT_IN_SD; pd_table = &info->hmc_info->sd_table.sd_entry[sd_idx].u.pd_table; if (pd_table->pd_entry[rel_pd_idx].valid) { ret_code = i40e_remove_pd_bp(hw, info->hmc_info, j); if (ret_code) goto exit; } } /* find sd index and limit */ I40E_FIND_SD_INDEX_LIMIT(info->hmc_info, info->rsrc_type, info->start_idx, info->count, &sd_idx, &sd_lmt); if (sd_idx >= info->hmc_info->sd_table.sd_cnt || sd_lmt > info->hmc_info->sd_table.sd_cnt) { ret_code = -EINVAL; goto exit; } for (i = sd_idx; i < sd_lmt; i++) { if (!info->hmc_info->sd_table.sd_entry[i].valid) continue; switch (info->hmc_info->sd_table.sd_entry[i].entry_type) { case I40E_SD_TYPE_DIRECT: ret_code = i40e_remove_sd_bp(hw, info->hmc_info, i); if (ret_code) goto exit; break; case I40E_SD_TYPE_PAGED: ret_code = i40e_remove_pd_page(hw, info->hmc_info, i); if (ret_code) goto exit; break; default: break; } } exit: return ret_code; } /** * i40e_shutdown_lan_hmc - Remove HMC backing store, free allocated memory * @hw: pointer to the hw structure * * This must be called by drivers as they are shutting down and being * removed from the OS. **/ int i40e_shutdown_lan_hmc(struct i40e_hw *hw) { struct i40e_hmc_lan_delete_obj_info info; int ret_code; info.hmc_info = &hw->hmc; info.rsrc_type = I40E_HMC_LAN_FULL; info.start_idx = 0; info.count = 1; /* delete the object */ ret_code = i40e_delete_lan_hmc_object(hw, &info); /* free the SD table entry for LAN */ i40e_free_virt_mem(hw, &hw->hmc.sd_table.addr); hw->hmc.sd_table.sd_cnt = 0; hw->hmc.sd_table.sd_entry = NULL; /* free memory used for hmc_obj */ i40e_free_virt_mem(hw, &hw->hmc.hmc_obj_virt_mem); hw->hmc.hmc_obj = NULL; return ret_code; } #define I40E_HMC_STORE(_struct, _ele) \ offsetof(struct _struct, _ele), \ sizeof_field(struct _struct, _ele) struct i40e_context_ele { u16 offset; u16 size_of; u16 width; u16 lsb; }; /* LAN Tx Queue Context */ static struct i40e_context_ele i40e_hmc_txq_ce_info[] = { /* Field Width LSB */ {I40E_HMC_STORE(i40e_hmc_obj_txq, head), 13, 0 }, {I40E_HMC_STORE(i40e_hmc_obj_txq, new_context), 1, 30 }, {I40E_HMC_STORE(i40e_hmc_obj_txq, base), 57, 32 }, {I40E_HMC_STORE(i40e_hmc_obj_txq, fc_ena), 1, 89 }, {I40E_HMC_STORE(i40e_hmc_obj_txq, timesync_ena), 1, 90 }, {I40E_HMC_STORE(i40e_hmc_obj_txq, fd_ena), 1, 91 }, {I40E_HMC_STORE(i40e_hmc_obj_txq, alt_vlan_ena), 1, 92 }, {I40E_HMC_STORE(i40e_hmc_obj_txq, cpuid), 8, 96 }, /* line 1 */ {I40E_HMC_STORE(i40e_hmc_obj_txq, thead_wb), 13, 0 + 128 }, {I40E_HMC_STORE(i40e_hmc_obj_txq, head_wb_ena), 1, 32 + 128 }, {I40E_HMC_STORE(i40e_hmc_obj_txq, qlen), 13, 33 + 128 }, {I40E_HMC_STORE(i40e_hmc_obj_txq, tphrdesc_ena), 1, 46 + 128 }, {I40E_HMC_STORE(i40e_hmc_obj_txq, tphrpacket_ena), 1, 47 + 128 }, {I40E_HMC_STORE(i40e_hmc_obj_txq, tphwdesc_ena), 1, 48 + 128 }, {I40E_HMC_STORE(i40e_hmc_obj_txq, head_wb_addr), 64, 64 + 128 }, /* line 7 */ {I40E_HMC_STORE(i40e_hmc_obj_txq, crc), 32, 0 + (7 * 128) }, {I40E_HMC_STORE(i40e_hmc_obj_txq, rdylist), 10, 84 + (7 * 128) }, {I40E_HMC_STORE(i40e_hmc_obj_txq, rdylist_act), 1, 94 + (7 * 128) }, { 0 } }; /* LAN Rx Queue Context */ static struct i40e_context_ele i40e_hmc_rxq_ce_info[] = { /* Field Width LSB */ { I40E_HMC_STORE(i40e_hmc_obj_rxq, head), 13, 0 }, { I40E_HMC_STORE(i40e_hmc_obj_rxq, cpuid), 8, 13 }, { I40E_HMC_STORE(i40e_hmc_obj_rxq, base), 57, 32 }, { I40E_HMC_STORE(i40e_hmc_obj_rxq, qlen), 13, 89 }, { I40E_HMC_STORE(i40e_hmc_obj_rxq, dbuff), 7, 102 }, { I40E_HMC_STORE(i40e_hmc_obj_rxq, hbuff), 5, 109 }, { I40E_HMC_STORE(i40e_hmc_obj_rxq, dtype), 2, 114 }, { I40E_HMC_STORE(i40e_hmc_obj_rxq, dsize), 1, 116 }, { I40E_HMC_STORE(i40e_hmc_obj_rxq, crcstrip), 1, 117 }, { I40E_HMC_STORE(i40e_hmc_obj_rxq, fc_ena), 1, 118 }, { I40E_HMC_STORE(i40e_hmc_obj_rxq, l2tsel), 1, 119 }, { I40E_HMC_STORE(i40e_hmc_obj_rxq, hsplit_0), 4, 120 }, { I40E_HMC_STORE(i40e_hmc_obj_rxq, hsplit_1), 2, 124 }, { I40E_HMC_STORE(i40e_hmc_obj_rxq, showiv), 1, 127 }, { I40E_HMC_STORE(i40e_hmc_obj_rxq, rxmax), 14, 174 }, { I40E_HMC_STORE(i40e_hmc_obj_rxq, tphrdesc_ena), 1, 193 }, { I40E_HMC_STORE(i40e_hmc_obj_rxq, tphwdesc_ena), 1, 194 }, { I40E_HMC_STORE(i40e_hmc_obj_rxq, tphdata_ena), 1, 195 }, { I40E_HMC_STORE(i40e_hmc_obj_rxq, tphhead_ena), 1, 196 }, { I40E_HMC_STORE(i40e_hmc_obj_rxq, lrxqthresh), 3, 198 }, { I40E_HMC_STORE(i40e_hmc_obj_rxq, prefena), 1, 201 }, { 0 } }; /** * i40e_write_byte - replace HMC context byte * @hmc_bits: pointer to the HMC memory * @ce_info: a description of the struct to be read from * @src: the struct to be read from **/ static void i40e_write_byte(u8 *hmc_bits, struct i40e_context_ele *ce_info, u8 *src) { u8 src_byte, dest_byte, mask; u8 *from, *dest; u16 shift_width; /* copy from the next struct field */ from = src + ce_info->offset; /* prepare the bits and mask */ shift_width = ce_info->lsb % 8; mask = (u8)(BIT(ce_info->width) - 1); src_byte = *from; src_byte &= mask; /* shift to correct alignment */ mask <<= shift_width; src_byte <<= shift_width; /* get the current bits from the target bit string */ dest = hmc_bits + (ce_info->lsb / 8); memcpy(&dest_byte, dest, sizeof(dest_byte)); dest_byte &= ~mask; /* get the bits not changing */ dest_byte |= src_byte; /* add in the new bits */ /* put it all back */ memcpy(dest, &dest_byte, sizeof(dest_byte)); } /** * i40e_write_word - replace HMC context word * @hmc_bits: pointer to the HMC memory * @ce_info: a description of the struct to be read from * @src: the struct to be read from **/ static void i40e_write_word(u8 *hmc_bits, struct i40e_context_ele *ce_info, u8 *src) { u16 src_word, mask; u8 *from, *dest; u16 shift_width; __le16 dest_word; /* copy from the next struct field */ from = src + ce_info->offset; /* prepare the bits and mask */ shift_width = ce_info->lsb % 8; mask = BIT(ce_info->width) - 1; /* don't swizzle the bits until after the mask because the mask bits * will be in a different bit position on big endian machines */ src_word = *(u16 *)from; src_word &= mask; /* shift to correct alignment */ mask <<= shift_width; src_word <<= shift_width; /* get the current bits from the target bit string */ dest = hmc_bits + (ce_info->lsb / 8); memcpy(&dest_word, dest, sizeof(dest_word)); dest_word &= ~(cpu_to_le16(mask)); /* get the bits not changing */ dest_word |= cpu_to_le16(src_word); /* add in the new bits */ /* put it all back */ memcpy(dest, &dest_word, sizeof(dest_word)); } /** * i40e_write_dword - replace HMC context dword * @hmc_bits: pointer to the HMC memory * @ce_info: a description of the struct to be read from * @src: the struct to be read from **/ static void i40e_write_dword(u8 *hmc_bits, struct i40e_context_ele *ce_info, u8 *src) { u32 src_dword, mask; u8 *from, *dest; u16 shift_width; __le32 dest_dword; /* copy from the next struct field */ from = src + ce_info->offset; /* prepare the bits and mask */ shift_width = ce_info->lsb % 8; /* if the field width is exactly 32 on an x86 machine, then the shift * operation will not work because the SHL instructions count is masked * to 5 bits so the shift will do nothing */ if (ce_info->width < 32) mask = BIT(ce_info->width) - 1; else mask = ~(u32)0; /* don't swizzle the bits until after the mask because the mask bits * will be in a different bit position on big endian machines */ src_dword = *(u32 *)from; src_dword &= mask; /* shift to correct alignment */ mask <<= shift_width; src_dword <<= shift_width; /* get the current bits from the target bit string */ dest = hmc_bits + (ce_info->lsb / 8); memcpy(&dest_dword, dest, sizeof(dest_dword)); dest_dword &= ~(cpu_to_le32(mask)); /* get the bits not changing */ dest_dword |= cpu_to_le32(src_dword); /* add in the new bits */ /* put it all back */ memcpy(dest, &dest_dword, sizeof(dest_dword)); } /** * i40e_write_qword - replace HMC context qword * @hmc_bits: pointer to the HMC memory * @ce_info: a description of the struct to be read from * @src: the struct to be read from **/ static void i40e_write_qword(u8 *hmc_bits, struct i40e_context_ele *ce_info, u8 *src) { u64 src_qword, mask; u8 *from, *dest; u16 shift_width; __le64 dest_qword; /* copy from the next struct field */ from = src + ce_info->offset; /* prepare the bits and mask */ shift_width = ce_info->lsb % 8; /* if the field width is exactly 64 on an x86 machine, then the shift * operation will not work because the SHL instructions count is masked * to 6 bits so the shift will do nothing */ if (ce_info->width < 64) mask = BIT_ULL(ce_info->width) - 1; else mask = ~(u64)0; /* don't swizzle the bits until after the mask because the mask bits * will be in a different bit position on big endian machines */ src_qword = *(u64 *)from; src_qword &= mask; /* shift to correct alignment */ mask <<= shift_width; src_qword <<= shift_width; /* get the current bits from the target bit string */ dest = hmc_bits + (ce_info->lsb / 8); memcpy(&dest_qword, dest, sizeof(dest_qword)); dest_qword &= ~(cpu_to_le64(mask)); /* get the bits not changing */ dest_qword |= cpu_to_le64(src_qword); /* add in the new bits */ /* put it all back */ memcpy(dest, &dest_qword, sizeof(dest_qword)); } /** * i40e_clear_hmc_context - zero out the HMC context bits * @hw: the hardware struct * @context_bytes: pointer to the context bit array (DMA memory) * @hmc_type: the type of HMC resource **/ static int i40e_clear_hmc_context(struct i40e_hw *hw, u8 *context_bytes, enum i40e_hmc_lan_rsrc_type hmc_type) { /* clean the bit array */ memset(context_bytes, 0, (u32)hw->hmc.hmc_obj[hmc_type].size); return 0; } /** * i40e_set_hmc_context - replace HMC context bits * @context_bytes: pointer to the context bit array * @ce_info: a description of the struct to be filled * @dest: the struct to be filled **/ static int i40e_set_hmc_context(u8 *context_bytes, struct i40e_context_ele *ce_info, u8 *dest) { int f; for (f = 0; ce_info[f].width != 0; f++) { /* we have to deal with each element of the HMC using the * correct size so that we are correct regardless of the * endianness of the machine */ switch (ce_info[f].size_of) { case 1: i40e_write_byte(context_bytes, &ce_info[f], dest); break; case 2: i40e_write_word(context_bytes, &ce_info[f], dest); break; case 4: i40e_write_dword(context_bytes, &ce_info[f], dest); break; case 8: i40e_write_qword(context_bytes, &ce_info[f], dest); break; } } return 0; } /** * i40e_hmc_get_object_va - retrieves an object's virtual address * @hw: the hardware struct, from which we obtain the i40e_hmc_info pointer * @object_base: pointer to u64 to get the va * @rsrc_type: the hmc resource type * @obj_idx: hmc object index * * This function retrieves the object's virtual address from the object * base pointer. This function is used for LAN Queue contexts. **/ static int i40e_hmc_get_object_va(struct i40e_hw *hw, u8 **object_base, enum i40e_hmc_lan_rsrc_type rsrc_type, u32 obj_idx) { struct i40e_hmc_info *hmc_info = &hw->hmc; u32 obj_offset_in_sd, obj_offset_in_pd; struct i40e_hmc_sd_entry *sd_entry; struct i40e_hmc_pd_entry *pd_entry; u32 pd_idx, pd_lmt, rel_pd_idx; u64 obj_offset_in_fpm; u32 sd_idx, sd_lmt; int ret_code = 0; if (NULL == hmc_info) { ret_code = -EINVAL; hw_dbg(hw, "i40e_hmc_get_object_va: bad hmc_info ptr\n"); goto exit; } if (NULL == hmc_info->hmc_obj) { ret_code = -EINVAL; hw_dbg(hw, "i40e_hmc_get_object_va: bad hmc_info->hmc_obj ptr\n"); goto exit; } if (NULL == object_base) { ret_code = -EINVAL; hw_dbg(hw, "i40e_hmc_get_object_va: bad object_base ptr\n"); goto exit; } if (I40E_HMC_INFO_SIGNATURE != hmc_info->signature) { ret_code = -EINVAL; hw_dbg(hw, "i40e_hmc_get_object_va: bad hmc_info->signature\n"); goto exit; } if (obj_idx >= hmc_info->hmc_obj[rsrc_type].cnt) { hw_dbg(hw, "i40e_hmc_get_object_va: returns error %d\n", ret_code); ret_code = -EINVAL; goto exit; } /* find sd index and limit */ I40E_FIND_SD_INDEX_LIMIT(hmc_info, rsrc_type, obj_idx, 1, &sd_idx, &sd_lmt); sd_entry = &hmc_info->sd_table.sd_entry[sd_idx]; obj_offset_in_fpm = hmc_info->hmc_obj[rsrc_type].base + hmc_info->hmc_obj[rsrc_type].size * obj_idx; if (I40E_SD_TYPE_PAGED == sd_entry->entry_type) { I40E_FIND_PD_INDEX_LIMIT(hmc_info, rsrc_type, obj_idx, 1, &pd_idx, &pd_lmt); rel_pd_idx = pd_idx % I40E_HMC_PD_CNT_IN_SD; pd_entry = &sd_entry->u.pd_table.pd_entry[rel_pd_idx]; obj_offset_in_pd = (u32)(obj_offset_in_fpm % I40E_HMC_PAGED_BP_SIZE); *object_base = (u8 *)pd_entry->bp.addr.va + obj_offset_in_pd; } else { obj_offset_in_sd = (u32)(obj_offset_in_fpm % I40E_HMC_DIRECT_BP_SIZE); *object_base = (u8 *)sd_entry->u.bp.addr.va + obj_offset_in_sd; } exit: return ret_code; } /** * i40e_clear_lan_tx_queue_context - clear the HMC context for the queue * @hw: the hardware struct * @queue: the queue we care about **/ int i40e_clear_lan_tx_queue_context(struct i40e_hw *hw, u16 queue) { u8 *context_bytes; int err; err = i40e_hmc_get_object_va(hw, &context_bytes, I40E_HMC_LAN_TX, queue); if (err < 0) return err; return i40e_clear_hmc_context(hw, context_bytes, I40E_HMC_LAN_TX); } /** * i40e_set_lan_tx_queue_context - set the HMC context for the queue * @hw: the hardware struct * @queue: the queue we care about * @s: the struct to be filled **/ int i40e_set_lan_tx_queue_context(struct i40e_hw *hw, u16 queue, struct i40e_hmc_obj_txq *s) { u8 *context_bytes; int err; err = i40e_hmc_get_object_va(hw, &context_bytes, I40E_HMC_LAN_TX, queue); if (err < 0) return err; return i40e_set_hmc_context(context_bytes, i40e_hmc_txq_ce_info, (u8 *)s); } /** * i40e_clear_lan_rx_queue_context - clear the HMC context for the queue * @hw: the hardware struct * @queue: the queue we care about **/ int i40e_clear_lan_rx_queue_context(struct i40e_hw *hw, u16 queue) { u8 *context_bytes; int err; err = i40e_hmc_get_object_va(hw, &context_bytes, I40E_HMC_LAN_RX, queue); if (err < 0) return err; return i40e_clear_hmc_context(hw, context_bytes, I40E_HMC_LAN_RX); } /** * i40e_set_lan_rx_queue_context - set the HMC context for the queue * @hw: the hardware struct * @queue: the queue we care about * @s: the struct to be filled **/ int i40e_set_lan_rx_queue_context(struct i40e_hw *hw, u16 queue, struct i40e_hmc_obj_rxq *s) { u8 *context_bytes; int err; err = i40e_hmc_get_object_va(hw, &context_bytes, I40E_HMC_LAN_RX, queue); if (err < 0) return err; return i40e_set_hmc_context(context_bytes, i40e_hmc_rxq_ce_info, (u8 *)s); }
linux-master
drivers/net/ethernet/intel/i40e/i40e_lan_hmc.c
// SPDX-License-Identifier: GPL-2.0 /* Copyright(c) 2013 - 2018 Intel Corporation. */ #include "i40e.h" #include "i40e_osdep.h" #include "i40e_register.h" #include "i40e_alloc.h" #include "i40e_hmc.h" #include "i40e_type.h" /** * i40e_add_sd_table_entry - Adds a segment descriptor to the table * @hw: pointer to our hw struct * @hmc_info: pointer to the HMC configuration information struct * @sd_index: segment descriptor index to manipulate * @type: what type of segment descriptor we're manipulating * @direct_mode_sz: size to alloc in direct mode **/ int i40e_add_sd_table_entry(struct i40e_hw *hw, struct i40e_hmc_info *hmc_info, u32 sd_index, enum i40e_sd_entry_type type, u64 direct_mode_sz) { enum i40e_memory_type mem_type __attribute__((unused)); struct i40e_hmc_sd_entry *sd_entry; bool dma_mem_alloc_done = false; struct i40e_dma_mem mem; int ret_code = 0; u64 alloc_len; if (NULL == hmc_info->sd_table.sd_entry) { ret_code = -EINVAL; hw_dbg(hw, "i40e_add_sd_table_entry: bad sd_entry\n"); goto exit; } if (sd_index >= hmc_info->sd_table.sd_cnt) { ret_code = -EINVAL; hw_dbg(hw, "i40e_add_sd_table_entry: bad sd_index\n"); goto exit; } sd_entry = &hmc_info->sd_table.sd_entry[sd_index]; if (!sd_entry->valid) { if (I40E_SD_TYPE_PAGED == type) { mem_type = i40e_mem_pd; alloc_len = I40E_HMC_PAGED_BP_SIZE; } else { mem_type = i40e_mem_bp_jumbo; alloc_len = direct_mode_sz; } /* allocate a 4K pd page or 2M backing page */ ret_code = i40e_allocate_dma_mem(hw, &mem, mem_type, alloc_len, I40E_HMC_PD_BP_BUF_ALIGNMENT); if (ret_code) goto exit; dma_mem_alloc_done = true; if (I40E_SD_TYPE_PAGED == type) { ret_code = i40e_allocate_virt_mem(hw, &sd_entry->u.pd_table.pd_entry_virt_mem, sizeof(struct i40e_hmc_pd_entry) * 512); if (ret_code) goto exit; sd_entry->u.pd_table.pd_entry = (struct i40e_hmc_pd_entry *) sd_entry->u.pd_table.pd_entry_virt_mem.va; sd_entry->u.pd_table.pd_page_addr = mem; } else { sd_entry->u.bp.addr = mem; sd_entry->u.bp.sd_pd_index = sd_index; } /* initialize the sd entry */ hmc_info->sd_table.sd_entry[sd_index].entry_type = type; /* increment the ref count */ I40E_INC_SD_REFCNT(&hmc_info->sd_table); } /* Increment backing page reference count */ if (I40E_SD_TYPE_DIRECT == sd_entry->entry_type) I40E_INC_BP_REFCNT(&sd_entry->u.bp); exit: if (ret_code) if (dma_mem_alloc_done) i40e_free_dma_mem(hw, &mem); return ret_code; } /** * i40e_add_pd_table_entry - Adds page descriptor to the specified table * @hw: pointer to our HW structure * @hmc_info: pointer to the HMC configuration information structure * @pd_index: which page descriptor index to manipulate * @rsrc_pg: if not NULL, use preallocated page instead of allocating new one. * * This function: * 1. Initializes the pd entry * 2. Adds pd_entry in the pd_table * 3. Mark the entry valid in i40e_hmc_pd_entry structure * 4. Initializes the pd_entry's ref count to 1 * assumptions: * 1. The memory for pd should be pinned down, physically contiguous and * aligned on 4K boundary and zeroed memory. * 2. It should be 4K in size. **/ int i40e_add_pd_table_entry(struct i40e_hw *hw, struct i40e_hmc_info *hmc_info, u32 pd_index, struct i40e_dma_mem *rsrc_pg) { struct i40e_hmc_pd_table *pd_table; struct i40e_hmc_pd_entry *pd_entry; struct i40e_dma_mem mem; struct i40e_dma_mem *page = &mem; u32 sd_idx, rel_pd_idx; int ret_code = 0; u64 page_desc; u64 *pd_addr; if (pd_index / I40E_HMC_PD_CNT_IN_SD >= hmc_info->sd_table.sd_cnt) { ret_code = -EINVAL; hw_dbg(hw, "i40e_add_pd_table_entry: bad pd_index\n"); goto exit; } /* find corresponding sd */ sd_idx = (pd_index / I40E_HMC_PD_CNT_IN_SD); if (I40E_SD_TYPE_PAGED != hmc_info->sd_table.sd_entry[sd_idx].entry_type) goto exit; rel_pd_idx = (pd_index % I40E_HMC_PD_CNT_IN_SD); pd_table = &hmc_info->sd_table.sd_entry[sd_idx].u.pd_table; pd_entry = &pd_table->pd_entry[rel_pd_idx]; if (!pd_entry->valid) { if (rsrc_pg) { pd_entry->rsrc_pg = true; page = rsrc_pg; } else { /* allocate a 4K backing page */ ret_code = i40e_allocate_dma_mem(hw, page, i40e_mem_bp, I40E_HMC_PAGED_BP_SIZE, I40E_HMC_PD_BP_BUF_ALIGNMENT); if (ret_code) goto exit; pd_entry->rsrc_pg = false; } pd_entry->bp.addr = *page; pd_entry->bp.sd_pd_index = pd_index; pd_entry->bp.entry_type = I40E_SD_TYPE_PAGED; /* Set page address and valid bit */ page_desc = page->pa | 0x1; pd_addr = (u64 *)pd_table->pd_page_addr.va; pd_addr += rel_pd_idx; /* Add the backing page physical address in the pd entry */ memcpy(pd_addr, &page_desc, sizeof(u64)); pd_entry->sd_index = sd_idx; pd_entry->valid = true; I40E_INC_PD_REFCNT(pd_table); } I40E_INC_BP_REFCNT(&pd_entry->bp); exit: return ret_code; } /** * i40e_remove_pd_bp - remove a backing page from a page descriptor * @hw: pointer to our HW structure * @hmc_info: pointer to the HMC configuration information structure * @idx: the page index * * This function: * 1. Marks the entry in pd tabe (for paged address mode) or in sd table * (for direct address mode) invalid. * 2. Write to register PMPDINV to invalidate the backing page in FV cache * 3. Decrement the ref count for the pd _entry * assumptions: * 1. Caller can deallocate the memory used by backing storage after this * function returns. **/ int i40e_remove_pd_bp(struct i40e_hw *hw, struct i40e_hmc_info *hmc_info, u32 idx) { struct i40e_hmc_pd_entry *pd_entry; struct i40e_hmc_pd_table *pd_table; struct i40e_hmc_sd_entry *sd_entry; u32 sd_idx, rel_pd_idx; int ret_code = 0; u64 *pd_addr; /* calculate index */ sd_idx = idx / I40E_HMC_PD_CNT_IN_SD; rel_pd_idx = idx % I40E_HMC_PD_CNT_IN_SD; if (sd_idx >= hmc_info->sd_table.sd_cnt) { ret_code = -EINVAL; hw_dbg(hw, "i40e_remove_pd_bp: bad idx\n"); goto exit; } sd_entry = &hmc_info->sd_table.sd_entry[sd_idx]; if (I40E_SD_TYPE_PAGED != sd_entry->entry_type) { ret_code = -EINVAL; hw_dbg(hw, "i40e_remove_pd_bp: wrong sd_entry type\n"); goto exit; } /* get the entry and decrease its ref counter */ pd_table = &hmc_info->sd_table.sd_entry[sd_idx].u.pd_table; pd_entry = &pd_table->pd_entry[rel_pd_idx]; I40E_DEC_BP_REFCNT(&pd_entry->bp); if (pd_entry->bp.ref_cnt) goto exit; /* mark the entry invalid */ pd_entry->valid = false; I40E_DEC_PD_REFCNT(pd_table); pd_addr = (u64 *)pd_table->pd_page_addr.va; pd_addr += rel_pd_idx; memset(pd_addr, 0, sizeof(u64)); I40E_INVALIDATE_PF_HMC_PD(hw, sd_idx, idx); /* free memory here */ if (!pd_entry->rsrc_pg) ret_code = i40e_free_dma_mem(hw, &pd_entry->bp.addr); if (ret_code) goto exit; if (!pd_table->ref_cnt) i40e_free_virt_mem(hw, &pd_table->pd_entry_virt_mem); exit: return ret_code; } /** * i40e_prep_remove_sd_bp - Prepares to remove a backing page from a sd entry * @hmc_info: pointer to the HMC configuration information structure * @idx: the page index **/ int i40e_prep_remove_sd_bp(struct i40e_hmc_info *hmc_info, u32 idx) { struct i40e_hmc_sd_entry *sd_entry; int ret_code = 0; /* get the entry and decrease its ref counter */ sd_entry = &hmc_info->sd_table.sd_entry[idx]; I40E_DEC_BP_REFCNT(&sd_entry->u.bp); if (sd_entry->u.bp.ref_cnt) { ret_code = -EBUSY; goto exit; } I40E_DEC_SD_REFCNT(&hmc_info->sd_table); /* mark the entry invalid */ sd_entry->valid = false; exit: return ret_code; } /** * i40e_remove_sd_bp_new - Removes a backing page from a segment descriptor * @hw: pointer to our hw struct * @hmc_info: pointer to the HMC configuration information structure * @idx: the page index * @is_pf: used to distinguish between VF and PF **/ int i40e_remove_sd_bp_new(struct i40e_hw *hw, struct i40e_hmc_info *hmc_info, u32 idx, bool is_pf) { struct i40e_hmc_sd_entry *sd_entry; if (!is_pf) return -EOPNOTSUPP; /* get the entry and decrease its ref counter */ sd_entry = &hmc_info->sd_table.sd_entry[idx]; I40E_CLEAR_PF_SD_ENTRY(hw, idx, I40E_SD_TYPE_DIRECT); return i40e_free_dma_mem(hw, &sd_entry->u.bp.addr); } /** * i40e_prep_remove_pd_page - Prepares to remove a PD page from sd entry. * @hmc_info: pointer to the HMC configuration information structure * @idx: segment descriptor index to find the relevant page descriptor **/ int i40e_prep_remove_pd_page(struct i40e_hmc_info *hmc_info, u32 idx) { struct i40e_hmc_sd_entry *sd_entry; int ret_code = 0; sd_entry = &hmc_info->sd_table.sd_entry[idx]; if (sd_entry->u.pd_table.ref_cnt) { ret_code = -EBUSY; goto exit; } /* mark the entry invalid */ sd_entry->valid = false; I40E_DEC_SD_REFCNT(&hmc_info->sd_table); exit: return ret_code; } /** * i40e_remove_pd_page_new - Removes a PD page from sd entry. * @hw: pointer to our hw struct * @hmc_info: pointer to the HMC configuration information structure * @idx: segment descriptor index to find the relevant page descriptor * @is_pf: used to distinguish between VF and PF **/ int i40e_remove_pd_page_new(struct i40e_hw *hw, struct i40e_hmc_info *hmc_info, u32 idx, bool is_pf) { struct i40e_hmc_sd_entry *sd_entry; if (!is_pf) return -EOPNOTSUPP; sd_entry = &hmc_info->sd_table.sd_entry[idx]; I40E_CLEAR_PF_SD_ENTRY(hw, idx, I40E_SD_TYPE_PAGED); return i40e_free_dma_mem(hw, &sd_entry->u.pd_table.pd_page_addr); }
linux-master
drivers/net/ethernet/intel/i40e/i40e_hmc.c
// SPDX-License-Identifier: GPL-2.0 /* Copyright(c) 2013 - 2018 Intel Corporation. */ /* ethtool support for i40e */ #include "i40e.h" #include "i40e_diag.h" #include "i40e_txrx_common.h" /* ethtool statistics helpers */ /** * struct i40e_stats - definition for an ethtool statistic * @stat_string: statistic name to display in ethtool -S output * @sizeof_stat: the sizeof() the stat, must be no greater than sizeof(u64) * @stat_offset: offsetof() the stat from a base pointer * * This structure defines a statistic to be added to the ethtool stats buffer. * It defines a statistic as offset from a common base pointer. Stats should * be defined in constant arrays using the I40E_STAT macro, with every element * of the array using the same _type for calculating the sizeof_stat and * stat_offset. * * The @sizeof_stat is expected to be sizeof(u8), sizeof(u16), sizeof(u32) or * sizeof(u64). Other sizes are not expected and will produce a WARN_ONCE from * the i40e_add_ethtool_stat() helper function. * * The @stat_string is interpreted as a format string, allowing formatted * values to be inserted while looping over multiple structures for a given * statistics array. Thus, every statistic string in an array should have the * same type and number of format specifiers, to be formatted by variadic * arguments to the i40e_add_stat_string() helper function. **/ struct i40e_stats { char stat_string[ETH_GSTRING_LEN]; int sizeof_stat; int stat_offset; }; /* Helper macro to define an i40e_stat structure with proper size and type. * Use this when defining constant statistics arrays. Note that @_type expects * only a type name and is used multiple times. */ #define I40E_STAT(_type, _name, _stat) { \ .stat_string = _name, \ .sizeof_stat = sizeof_field(_type, _stat), \ .stat_offset = offsetof(_type, _stat) \ } /* Helper macro for defining some statistics directly copied from the netdev * stats structure. */ #define I40E_NETDEV_STAT(_net_stat) \ I40E_STAT(struct rtnl_link_stats64, #_net_stat, _net_stat) /* Helper macro for defining some statistics related to queues */ #define I40E_QUEUE_STAT(_name, _stat) \ I40E_STAT(struct i40e_ring, _name, _stat) /* Stats associated with a Tx or Rx ring */ static const struct i40e_stats i40e_gstrings_queue_stats[] = { I40E_QUEUE_STAT("%s-%u.packets", stats.packets), I40E_QUEUE_STAT("%s-%u.bytes", stats.bytes), }; /** * i40e_add_one_ethtool_stat - copy the stat into the supplied buffer * @data: location to store the stat value * @pointer: basis for where to copy from * @stat: the stat definition * * Copies the stat data defined by the pointer and stat structure pair into * the memory supplied as data. Used to implement i40e_add_ethtool_stats and * i40e_add_queue_stats. If the pointer is null, data will be zero'd. */ static void i40e_add_one_ethtool_stat(u64 *data, void *pointer, const struct i40e_stats *stat) { char *p; if (!pointer) { /* ensure that the ethtool data buffer is zero'd for any stats * which don't have a valid pointer. */ *data = 0; return; } p = (char *)pointer + stat->stat_offset; switch (stat->sizeof_stat) { case sizeof(u64): *data = *((u64 *)p); break; case sizeof(u32): *data = *((u32 *)p); break; case sizeof(u16): *data = *((u16 *)p); break; case sizeof(u8): *data = *((u8 *)p); break; default: WARN_ONCE(1, "unexpected stat size for %s", stat->stat_string); *data = 0; } } /** * __i40e_add_ethtool_stats - copy stats into the ethtool supplied buffer * @data: ethtool stats buffer * @pointer: location to copy stats from * @stats: array of stats to copy * @size: the size of the stats definition * * Copy the stats defined by the stats array using the pointer as a base into * the data buffer supplied by ethtool. Updates the data pointer to point to * the next empty location for successive calls to __i40e_add_ethtool_stats. * If pointer is null, set the data values to zero and update the pointer to * skip these stats. **/ static void __i40e_add_ethtool_stats(u64 **data, void *pointer, const struct i40e_stats stats[], const unsigned int size) { unsigned int i; for (i = 0; i < size; i++) i40e_add_one_ethtool_stat((*data)++, pointer, &stats[i]); } /** * i40e_add_ethtool_stats - copy stats into ethtool supplied buffer * @data: ethtool stats buffer * @pointer: location where stats are stored * @stats: static const array of stat definitions * * Macro to ease the use of __i40e_add_ethtool_stats by taking a static * constant stats array and passing the ARRAY_SIZE(). This avoids typos by * ensuring that we pass the size associated with the given stats array. * * The parameter @stats is evaluated twice, so parameters with side effects * should be avoided. **/ #define i40e_add_ethtool_stats(data, pointer, stats) \ __i40e_add_ethtool_stats(data, pointer, stats, ARRAY_SIZE(stats)) /** * i40e_add_queue_stats - copy queue statistics into supplied buffer * @data: ethtool stats buffer * @ring: the ring to copy * * Queue statistics must be copied while protected by * u64_stats_fetch_begin, so we can't directly use i40e_add_ethtool_stats. * Assumes that queue stats are defined in i40e_gstrings_queue_stats. If the * ring pointer is null, zero out the queue stat values and update the data * pointer. Otherwise safely copy the stats from the ring into the supplied * buffer and update the data pointer when finished. * * This function expects to be called while under rcu_read_lock(). **/ static void i40e_add_queue_stats(u64 **data, struct i40e_ring *ring) { const unsigned int size = ARRAY_SIZE(i40e_gstrings_queue_stats); const struct i40e_stats *stats = i40e_gstrings_queue_stats; unsigned int start; unsigned int i; /* To avoid invalid statistics values, ensure that we keep retrying * the copy until we get a consistent value according to * u64_stats_fetch_retry. But first, make sure our ring is * non-null before attempting to access its syncp. */ do { start = !ring ? 0 : u64_stats_fetch_begin(&ring->syncp); for (i = 0; i < size; i++) { i40e_add_one_ethtool_stat(&(*data)[i], ring, &stats[i]); } } while (ring && u64_stats_fetch_retry(&ring->syncp, start)); /* Once we successfully copy the stats in, update the data pointer */ *data += size; } /** * __i40e_add_stat_strings - copy stat strings into ethtool buffer * @p: ethtool supplied buffer * @stats: stat definitions array * @size: size of the stats array * * Format and copy the strings described by stats into the buffer pointed at * by p. **/ static void __i40e_add_stat_strings(u8 **p, const struct i40e_stats stats[], const unsigned int size, ...) { unsigned int i; for (i = 0; i < size; i++) { va_list args; va_start(args, size); vsnprintf(*p, ETH_GSTRING_LEN, stats[i].stat_string, args); *p += ETH_GSTRING_LEN; va_end(args); } } /** * i40e_add_stat_strings - copy stat strings into ethtool buffer * @p: ethtool supplied buffer * @stats: stat definitions array * * Format and copy the strings described by the const static stats value into * the buffer pointed at by p. * * The parameter @stats is evaluated twice, so parameters with side effects * should be avoided. Additionally, stats must be an array such that * ARRAY_SIZE can be called on it. **/ #define i40e_add_stat_strings(p, stats, ...) \ __i40e_add_stat_strings(p, stats, ARRAY_SIZE(stats), ## __VA_ARGS__) #define I40E_PF_STAT(_name, _stat) \ I40E_STAT(struct i40e_pf, _name, _stat) #define I40E_VSI_STAT(_name, _stat) \ I40E_STAT(struct i40e_vsi, _name, _stat) #define I40E_VEB_STAT(_name, _stat) \ I40E_STAT(struct i40e_veb, _name, _stat) #define I40E_VEB_TC_STAT(_name, _stat) \ I40E_STAT(struct i40e_cp_veb_tc_stats, _name, _stat) #define I40E_PFC_STAT(_name, _stat) \ I40E_STAT(struct i40e_pfc_stats, _name, _stat) static const struct i40e_stats i40e_gstrings_net_stats[] = { I40E_NETDEV_STAT(rx_packets), I40E_NETDEV_STAT(tx_packets), I40E_NETDEV_STAT(rx_bytes), I40E_NETDEV_STAT(tx_bytes), I40E_NETDEV_STAT(rx_errors), I40E_NETDEV_STAT(tx_errors), I40E_NETDEV_STAT(rx_dropped), I40E_NETDEV_STAT(tx_dropped), I40E_NETDEV_STAT(collisions), I40E_NETDEV_STAT(rx_length_errors), I40E_NETDEV_STAT(rx_crc_errors), }; static const struct i40e_stats i40e_gstrings_veb_stats[] = { I40E_VEB_STAT("veb.rx_bytes", stats.rx_bytes), I40E_VEB_STAT("veb.tx_bytes", stats.tx_bytes), I40E_VEB_STAT("veb.rx_unicast", stats.rx_unicast), I40E_VEB_STAT("veb.tx_unicast", stats.tx_unicast), I40E_VEB_STAT("veb.rx_multicast", stats.rx_multicast), I40E_VEB_STAT("veb.tx_multicast", stats.tx_multicast), I40E_VEB_STAT("veb.rx_broadcast", stats.rx_broadcast), I40E_VEB_STAT("veb.tx_broadcast", stats.tx_broadcast), I40E_VEB_STAT("veb.rx_discards", stats.rx_discards), I40E_VEB_STAT("veb.tx_discards", stats.tx_discards), I40E_VEB_STAT("veb.tx_errors", stats.tx_errors), I40E_VEB_STAT("veb.rx_unknown_protocol", stats.rx_unknown_protocol), }; struct i40e_cp_veb_tc_stats { u64 tc_rx_packets; u64 tc_rx_bytes; u64 tc_tx_packets; u64 tc_tx_bytes; }; static const struct i40e_stats i40e_gstrings_veb_tc_stats[] = { I40E_VEB_TC_STAT("veb.tc_%u_tx_packets", tc_tx_packets), I40E_VEB_TC_STAT("veb.tc_%u_tx_bytes", tc_tx_bytes), I40E_VEB_TC_STAT("veb.tc_%u_rx_packets", tc_rx_packets), I40E_VEB_TC_STAT("veb.tc_%u_rx_bytes", tc_rx_bytes), }; static const struct i40e_stats i40e_gstrings_misc_stats[] = { I40E_VSI_STAT("rx_unicast", eth_stats.rx_unicast), I40E_VSI_STAT("tx_unicast", eth_stats.tx_unicast), I40E_VSI_STAT("rx_multicast", eth_stats.rx_multicast), I40E_VSI_STAT("tx_multicast", eth_stats.tx_multicast), I40E_VSI_STAT("rx_broadcast", eth_stats.rx_broadcast), I40E_VSI_STAT("tx_broadcast", eth_stats.tx_broadcast), I40E_VSI_STAT("rx_unknown_protocol", eth_stats.rx_unknown_protocol), I40E_VSI_STAT("tx_linearize", tx_linearize), I40E_VSI_STAT("tx_force_wb", tx_force_wb), I40E_VSI_STAT("tx_busy", tx_busy), I40E_VSI_STAT("tx_stopped", tx_stopped), I40E_VSI_STAT("rx_alloc_fail", rx_buf_failed), I40E_VSI_STAT("rx_pg_alloc_fail", rx_page_failed), I40E_VSI_STAT("rx_cache_reuse", rx_page_reuse), I40E_VSI_STAT("rx_cache_alloc", rx_page_alloc), I40E_VSI_STAT("rx_cache_waive", rx_page_waive), I40E_VSI_STAT("rx_cache_busy", rx_page_busy), I40E_VSI_STAT("tx_restart", tx_restart), }; /* These PF_STATs might look like duplicates of some NETDEV_STATs, * but they are separate. This device supports Virtualization, and * as such might have several netdevs supporting VMDq and FCoE going * through a single port. The NETDEV_STATs are for individual netdevs * seen at the top of the stack, and the PF_STATs are for the physical * function at the bottom of the stack hosting those netdevs. * * The PF_STATs are appended to the netdev stats only when ethtool -S * is queried on the base PF netdev, not on the VMDq or FCoE netdev. */ static const struct i40e_stats i40e_gstrings_stats[] = { I40E_PF_STAT("port.rx_bytes", stats.eth.rx_bytes), I40E_PF_STAT("port.tx_bytes", stats.eth.tx_bytes), I40E_PF_STAT("port.rx_unicast", stats.eth.rx_unicast), I40E_PF_STAT("port.tx_unicast", stats.eth.tx_unicast), I40E_PF_STAT("port.rx_multicast", stats.eth.rx_multicast), I40E_PF_STAT("port.tx_multicast", stats.eth.tx_multicast), I40E_PF_STAT("port.rx_broadcast", stats.eth.rx_broadcast), I40E_PF_STAT("port.tx_broadcast", stats.eth.tx_broadcast), I40E_PF_STAT("port.tx_errors", stats.eth.tx_errors), I40E_PF_STAT("port.rx_dropped", stats.eth.rx_discards), I40E_PF_STAT("port.tx_dropped_link_down", stats.tx_dropped_link_down), I40E_PF_STAT("port.rx_crc_errors", stats.crc_errors), I40E_PF_STAT("port.illegal_bytes", stats.illegal_bytes), I40E_PF_STAT("port.mac_local_faults", stats.mac_local_faults), I40E_PF_STAT("port.mac_remote_faults", stats.mac_remote_faults), I40E_PF_STAT("port.tx_timeout", tx_timeout_count), I40E_PF_STAT("port.rx_csum_bad", hw_csum_rx_error), I40E_PF_STAT("port.rx_length_errors", stats.rx_length_errors), I40E_PF_STAT("port.link_xon_rx", stats.link_xon_rx), I40E_PF_STAT("port.link_xoff_rx", stats.link_xoff_rx), I40E_PF_STAT("port.link_xon_tx", stats.link_xon_tx), I40E_PF_STAT("port.link_xoff_tx", stats.link_xoff_tx), I40E_PF_STAT("port.rx_size_64", stats.rx_size_64), I40E_PF_STAT("port.rx_size_127", stats.rx_size_127), I40E_PF_STAT("port.rx_size_255", stats.rx_size_255), I40E_PF_STAT("port.rx_size_511", stats.rx_size_511), I40E_PF_STAT("port.rx_size_1023", stats.rx_size_1023), I40E_PF_STAT("port.rx_size_1522", stats.rx_size_1522), I40E_PF_STAT("port.rx_size_big", stats.rx_size_big), I40E_PF_STAT("port.tx_size_64", stats.tx_size_64), I40E_PF_STAT("port.tx_size_127", stats.tx_size_127), I40E_PF_STAT("port.tx_size_255", stats.tx_size_255), I40E_PF_STAT("port.tx_size_511", stats.tx_size_511), I40E_PF_STAT("port.tx_size_1023", stats.tx_size_1023), I40E_PF_STAT("port.tx_size_1522", stats.tx_size_1522), I40E_PF_STAT("port.tx_size_big", stats.tx_size_big), I40E_PF_STAT("port.rx_undersize", stats.rx_undersize), I40E_PF_STAT("port.rx_fragments", stats.rx_fragments), I40E_PF_STAT("port.rx_oversize", stats.rx_oversize), I40E_PF_STAT("port.rx_jabber", stats.rx_jabber), I40E_PF_STAT("port.VF_admin_queue_requests", vf_aq_requests), I40E_PF_STAT("port.arq_overflows", arq_overflows), I40E_PF_STAT("port.tx_hwtstamp_timeouts", tx_hwtstamp_timeouts), I40E_PF_STAT("port.rx_hwtstamp_cleared", rx_hwtstamp_cleared), I40E_PF_STAT("port.tx_hwtstamp_skipped", tx_hwtstamp_skipped), I40E_PF_STAT("port.fdir_flush_cnt", fd_flush_cnt), I40E_PF_STAT("port.fdir_atr_match", stats.fd_atr_match), I40E_PF_STAT("port.fdir_atr_tunnel_match", stats.fd_atr_tunnel_match), I40E_PF_STAT("port.fdir_atr_status", stats.fd_atr_status), I40E_PF_STAT("port.fdir_sb_match", stats.fd_sb_match), I40E_PF_STAT("port.fdir_sb_status", stats.fd_sb_status), /* LPI stats */ I40E_PF_STAT("port.tx_lpi_status", stats.tx_lpi_status), I40E_PF_STAT("port.rx_lpi_status", stats.rx_lpi_status), I40E_PF_STAT("port.tx_lpi_count", stats.tx_lpi_count), I40E_PF_STAT("port.rx_lpi_count", stats.rx_lpi_count), }; struct i40e_pfc_stats { u64 priority_xon_rx; u64 priority_xoff_rx; u64 priority_xon_tx; u64 priority_xoff_tx; u64 priority_xon_2_xoff; }; static const struct i40e_stats i40e_gstrings_pfc_stats[] = { I40E_PFC_STAT("port.tx_priority_%u_xon_tx", priority_xon_tx), I40E_PFC_STAT("port.tx_priority_%u_xoff_tx", priority_xoff_tx), I40E_PFC_STAT("port.rx_priority_%u_xon_rx", priority_xon_rx), I40E_PFC_STAT("port.rx_priority_%u_xoff_rx", priority_xoff_rx), I40E_PFC_STAT("port.rx_priority_%u_xon_2_xoff", priority_xon_2_xoff), }; #define I40E_NETDEV_STATS_LEN ARRAY_SIZE(i40e_gstrings_net_stats) #define I40E_MISC_STATS_LEN ARRAY_SIZE(i40e_gstrings_misc_stats) #define I40E_VSI_STATS_LEN (I40E_NETDEV_STATS_LEN + I40E_MISC_STATS_LEN) #define I40E_PFC_STATS_LEN (ARRAY_SIZE(i40e_gstrings_pfc_stats) * \ I40E_MAX_USER_PRIORITY) #define I40E_VEB_STATS_LEN (ARRAY_SIZE(i40e_gstrings_veb_stats) + \ (ARRAY_SIZE(i40e_gstrings_veb_tc_stats) * \ I40E_MAX_TRAFFIC_CLASS)) #define I40E_GLOBAL_STATS_LEN ARRAY_SIZE(i40e_gstrings_stats) #define I40E_PF_STATS_LEN (I40E_GLOBAL_STATS_LEN + \ I40E_PFC_STATS_LEN + \ I40E_VEB_STATS_LEN + \ I40E_VSI_STATS_LEN) /* Length of stats for a single queue */ #define I40E_QUEUE_STATS_LEN ARRAY_SIZE(i40e_gstrings_queue_stats) enum i40e_ethtool_test_id { I40E_ETH_TEST_REG = 0, I40E_ETH_TEST_EEPROM, I40E_ETH_TEST_INTR, I40E_ETH_TEST_LINK, }; static const char i40e_gstrings_test[][ETH_GSTRING_LEN] = { "Register test (offline)", "Eeprom test (offline)", "Interrupt test (offline)", "Link test (on/offline)" }; #define I40E_TEST_LEN (sizeof(i40e_gstrings_test) / ETH_GSTRING_LEN) struct i40e_priv_flags { char flag_string[ETH_GSTRING_LEN]; u64 flag; bool read_only; }; #define I40E_PRIV_FLAG(_name, _flag, _read_only) { \ .flag_string = _name, \ .flag = _flag, \ .read_only = _read_only, \ } static const struct i40e_priv_flags i40e_gstrings_priv_flags[] = { /* NOTE: MFP setting cannot be changed */ I40E_PRIV_FLAG("MFP", I40E_FLAG_MFP_ENABLED, 1), I40E_PRIV_FLAG("total-port-shutdown", I40E_FLAG_TOTAL_PORT_SHUTDOWN_ENABLED, 1), I40E_PRIV_FLAG("LinkPolling", I40E_FLAG_LINK_POLLING_ENABLED, 0), I40E_PRIV_FLAG("flow-director-atr", I40E_FLAG_FD_ATR_ENABLED, 0), I40E_PRIV_FLAG("veb-stats", I40E_FLAG_VEB_STATS_ENABLED, 0), I40E_PRIV_FLAG("hw-atr-eviction", I40E_FLAG_HW_ATR_EVICT_ENABLED, 0), I40E_PRIV_FLAG("link-down-on-close", I40E_FLAG_LINK_DOWN_ON_CLOSE_ENABLED, 0), I40E_PRIV_FLAG("legacy-rx", I40E_FLAG_LEGACY_RX, 0), I40E_PRIV_FLAG("disable-source-pruning", I40E_FLAG_SOURCE_PRUNING_DISABLED, 0), I40E_PRIV_FLAG("disable-fw-lldp", I40E_FLAG_DISABLE_FW_LLDP, 0), I40E_PRIV_FLAG("rs-fec", I40E_FLAG_RS_FEC, 0), I40E_PRIV_FLAG("base-r-fec", I40E_FLAG_BASE_R_FEC, 0), I40E_PRIV_FLAG("vf-vlan-pruning", I40E_FLAG_VF_VLAN_PRUNING, 0), }; #define I40E_PRIV_FLAGS_STR_LEN ARRAY_SIZE(i40e_gstrings_priv_flags) /* Private flags with a global effect, restricted to PF 0 */ static const struct i40e_priv_flags i40e_gl_gstrings_priv_flags[] = { I40E_PRIV_FLAG("vf-true-promisc-support", I40E_FLAG_TRUE_PROMISC_SUPPORT, 0), }; #define I40E_GL_PRIV_FLAGS_STR_LEN ARRAY_SIZE(i40e_gl_gstrings_priv_flags) /** * i40e_partition_setting_complaint - generic complaint for MFP restriction * @pf: the PF struct **/ static void i40e_partition_setting_complaint(struct i40e_pf *pf) { dev_info(&pf->pdev->dev, "The link settings are allowed to be changed only from the first partition of a given port. Please switch to the first partition in order to change the setting.\n"); } /** * i40e_phy_type_to_ethtool - convert the phy_types to ethtool link modes * @pf: PF struct with phy_types * @ks: ethtool link ksettings struct to fill out * **/ static void i40e_phy_type_to_ethtool(struct i40e_pf *pf, struct ethtool_link_ksettings *ks) { struct i40e_link_status *hw_link_info = &pf->hw.phy.link_info; u64 phy_types = pf->hw.phy.phy_types; ethtool_link_ksettings_zero_link_mode(ks, supported); ethtool_link_ksettings_zero_link_mode(ks, advertising); if (phy_types & I40E_CAP_PHY_TYPE_SGMII) { ethtool_link_ksettings_add_link_mode(ks, supported, 1000baseT_Full); if (hw_link_info->requested_speeds & I40E_LINK_SPEED_1GB) ethtool_link_ksettings_add_link_mode(ks, advertising, 1000baseT_Full); if (pf->hw_features & I40E_HW_100M_SGMII_CAPABLE) { ethtool_link_ksettings_add_link_mode(ks, supported, 100baseT_Full); ethtool_link_ksettings_add_link_mode(ks, advertising, 100baseT_Full); } } if (phy_types & I40E_CAP_PHY_TYPE_XAUI || phy_types & I40E_CAP_PHY_TYPE_XFI || phy_types & I40E_CAP_PHY_TYPE_SFI || phy_types & I40E_CAP_PHY_TYPE_10GBASE_SFPP_CU || phy_types & I40E_CAP_PHY_TYPE_10GBASE_AOC) { ethtool_link_ksettings_add_link_mode(ks, supported, 10000baseT_Full); if (hw_link_info->requested_speeds & I40E_LINK_SPEED_10GB) ethtool_link_ksettings_add_link_mode(ks, advertising, 10000baseT_Full); } if (phy_types & I40E_CAP_PHY_TYPE_10GBASE_T) { ethtool_link_ksettings_add_link_mode(ks, supported, 10000baseT_Full); if (hw_link_info->requested_speeds & I40E_LINK_SPEED_10GB) ethtool_link_ksettings_add_link_mode(ks, advertising, 10000baseT_Full); } if (phy_types & I40E_CAP_PHY_TYPE_2_5GBASE_T) { ethtool_link_ksettings_add_link_mode(ks, supported, 2500baseT_Full); if (hw_link_info->requested_speeds & I40E_LINK_SPEED_2_5GB) ethtool_link_ksettings_add_link_mode(ks, advertising, 2500baseT_Full); } if (phy_types & I40E_CAP_PHY_TYPE_5GBASE_T) { ethtool_link_ksettings_add_link_mode(ks, supported, 5000baseT_Full); if (hw_link_info->requested_speeds & I40E_LINK_SPEED_5GB) ethtool_link_ksettings_add_link_mode(ks, advertising, 5000baseT_Full); } if (phy_types & I40E_CAP_PHY_TYPE_XLAUI || phy_types & I40E_CAP_PHY_TYPE_XLPPI || phy_types & I40E_CAP_PHY_TYPE_40GBASE_AOC) ethtool_link_ksettings_add_link_mode(ks, supported, 40000baseCR4_Full); if (phy_types & I40E_CAP_PHY_TYPE_40GBASE_CR4_CU || phy_types & I40E_CAP_PHY_TYPE_40GBASE_CR4) { ethtool_link_ksettings_add_link_mode(ks, supported, 40000baseCR4_Full); if (hw_link_info->requested_speeds & I40E_LINK_SPEED_40GB) ethtool_link_ksettings_add_link_mode(ks, advertising, 40000baseCR4_Full); } if (phy_types & I40E_CAP_PHY_TYPE_100BASE_TX) { ethtool_link_ksettings_add_link_mode(ks, supported, 100baseT_Full); if (hw_link_info->requested_speeds & I40E_LINK_SPEED_100MB) ethtool_link_ksettings_add_link_mode(ks, advertising, 100baseT_Full); } if (phy_types & I40E_CAP_PHY_TYPE_1000BASE_T) { ethtool_link_ksettings_add_link_mode(ks, supported, 1000baseT_Full); if (hw_link_info->requested_speeds & I40E_LINK_SPEED_1GB) ethtool_link_ksettings_add_link_mode(ks, advertising, 1000baseT_Full); } if (phy_types & I40E_CAP_PHY_TYPE_40GBASE_SR4) { ethtool_link_ksettings_add_link_mode(ks, supported, 40000baseSR4_Full); ethtool_link_ksettings_add_link_mode(ks, advertising, 40000baseSR4_Full); } if (phy_types & I40E_CAP_PHY_TYPE_40GBASE_LR4) { ethtool_link_ksettings_add_link_mode(ks, supported, 40000baseLR4_Full); ethtool_link_ksettings_add_link_mode(ks, advertising, 40000baseLR4_Full); } if (phy_types & I40E_CAP_PHY_TYPE_40GBASE_KR4) { ethtool_link_ksettings_add_link_mode(ks, supported, 40000baseKR4_Full); ethtool_link_ksettings_add_link_mode(ks, advertising, 40000baseKR4_Full); } if (phy_types & I40E_CAP_PHY_TYPE_20GBASE_KR2) { ethtool_link_ksettings_add_link_mode(ks, supported, 20000baseKR2_Full); if (hw_link_info->requested_speeds & I40E_LINK_SPEED_20GB) ethtool_link_ksettings_add_link_mode(ks, advertising, 20000baseKR2_Full); } if (phy_types & I40E_CAP_PHY_TYPE_10GBASE_KX4) { ethtool_link_ksettings_add_link_mode(ks, supported, 10000baseKX4_Full); if (hw_link_info->requested_speeds & I40E_LINK_SPEED_10GB) ethtool_link_ksettings_add_link_mode(ks, advertising, 10000baseKX4_Full); } if (phy_types & I40E_CAP_PHY_TYPE_10GBASE_KR && !(pf->hw_features & I40E_HW_HAVE_CRT_RETIMER)) { ethtool_link_ksettings_add_link_mode(ks, supported, 10000baseKR_Full); if (hw_link_info->requested_speeds & I40E_LINK_SPEED_10GB) ethtool_link_ksettings_add_link_mode(ks, advertising, 10000baseKR_Full); } if (phy_types & I40E_CAP_PHY_TYPE_1000BASE_KX && !(pf->hw_features & I40E_HW_HAVE_CRT_RETIMER)) { ethtool_link_ksettings_add_link_mode(ks, supported, 1000baseKX_Full); if (hw_link_info->requested_speeds & I40E_LINK_SPEED_1GB) ethtool_link_ksettings_add_link_mode(ks, advertising, 1000baseKX_Full); } /* need to add 25G PHY types */ if (phy_types & I40E_CAP_PHY_TYPE_25GBASE_KR) { ethtool_link_ksettings_add_link_mode(ks, supported, 25000baseKR_Full); if (hw_link_info->requested_speeds & I40E_LINK_SPEED_25GB) ethtool_link_ksettings_add_link_mode(ks, advertising, 25000baseKR_Full); } if (phy_types & I40E_CAP_PHY_TYPE_25GBASE_CR) { ethtool_link_ksettings_add_link_mode(ks, supported, 25000baseCR_Full); if (hw_link_info->requested_speeds & I40E_LINK_SPEED_25GB) ethtool_link_ksettings_add_link_mode(ks, advertising, 25000baseCR_Full); } if (phy_types & I40E_CAP_PHY_TYPE_25GBASE_SR || phy_types & I40E_CAP_PHY_TYPE_25GBASE_LR) { ethtool_link_ksettings_add_link_mode(ks, supported, 25000baseSR_Full); if (hw_link_info->requested_speeds & I40E_LINK_SPEED_25GB) ethtool_link_ksettings_add_link_mode(ks, advertising, 25000baseSR_Full); } if (phy_types & I40E_CAP_PHY_TYPE_25GBASE_AOC || phy_types & I40E_CAP_PHY_TYPE_25GBASE_ACC) { ethtool_link_ksettings_add_link_mode(ks, supported, 25000baseCR_Full); if (hw_link_info->requested_speeds & I40E_LINK_SPEED_25GB) ethtool_link_ksettings_add_link_mode(ks, advertising, 25000baseCR_Full); } if (phy_types & I40E_CAP_PHY_TYPE_25GBASE_KR || phy_types & I40E_CAP_PHY_TYPE_25GBASE_CR || phy_types & I40E_CAP_PHY_TYPE_25GBASE_SR || phy_types & I40E_CAP_PHY_TYPE_25GBASE_LR || phy_types & I40E_CAP_PHY_TYPE_25GBASE_AOC || phy_types & I40E_CAP_PHY_TYPE_25GBASE_ACC) { ethtool_link_ksettings_add_link_mode(ks, supported, FEC_NONE); ethtool_link_ksettings_add_link_mode(ks, supported, FEC_RS); ethtool_link_ksettings_add_link_mode(ks, supported, FEC_BASER); if (hw_link_info->requested_speeds & I40E_LINK_SPEED_25GB) { ethtool_link_ksettings_add_link_mode(ks, advertising, FEC_NONE); ethtool_link_ksettings_add_link_mode(ks, advertising, FEC_RS); ethtool_link_ksettings_add_link_mode(ks, advertising, FEC_BASER); } } /* need to add new 10G PHY types */ if (phy_types & I40E_CAP_PHY_TYPE_10GBASE_CR1 || phy_types & I40E_CAP_PHY_TYPE_10GBASE_CR1_CU) { ethtool_link_ksettings_add_link_mode(ks, supported, 10000baseCR_Full); if (hw_link_info->requested_speeds & I40E_LINK_SPEED_10GB) ethtool_link_ksettings_add_link_mode(ks, advertising, 10000baseCR_Full); } if (phy_types & I40E_CAP_PHY_TYPE_10GBASE_SR) { ethtool_link_ksettings_add_link_mode(ks, supported, 10000baseSR_Full); if (hw_link_info->requested_speeds & I40E_LINK_SPEED_10GB) ethtool_link_ksettings_add_link_mode(ks, advertising, 10000baseSR_Full); } if (phy_types & I40E_CAP_PHY_TYPE_10GBASE_LR) { ethtool_link_ksettings_add_link_mode(ks, supported, 10000baseLR_Full); if (hw_link_info->requested_speeds & I40E_LINK_SPEED_10GB) ethtool_link_ksettings_add_link_mode(ks, advertising, 10000baseLR_Full); } if (phy_types & I40E_CAP_PHY_TYPE_1000BASE_SX || phy_types & I40E_CAP_PHY_TYPE_1000BASE_LX || phy_types & I40E_CAP_PHY_TYPE_1000BASE_T_OPTICAL) { ethtool_link_ksettings_add_link_mode(ks, supported, 1000baseX_Full); if (hw_link_info->requested_speeds & I40E_LINK_SPEED_1GB) ethtool_link_ksettings_add_link_mode(ks, advertising, 1000baseX_Full); } /* Autoneg PHY types */ if (phy_types & I40E_CAP_PHY_TYPE_SGMII || phy_types & I40E_CAP_PHY_TYPE_40GBASE_KR4 || phy_types & I40E_CAP_PHY_TYPE_40GBASE_CR4_CU || phy_types & I40E_CAP_PHY_TYPE_40GBASE_CR4 || phy_types & I40E_CAP_PHY_TYPE_25GBASE_SR || phy_types & I40E_CAP_PHY_TYPE_25GBASE_LR || phy_types & I40E_CAP_PHY_TYPE_25GBASE_KR || phy_types & I40E_CAP_PHY_TYPE_25GBASE_CR || phy_types & I40E_CAP_PHY_TYPE_20GBASE_KR2 || phy_types & I40E_CAP_PHY_TYPE_10GBASE_SR || phy_types & I40E_CAP_PHY_TYPE_10GBASE_LR || phy_types & I40E_CAP_PHY_TYPE_10GBASE_KX4 || phy_types & I40E_CAP_PHY_TYPE_10GBASE_KR || phy_types & I40E_CAP_PHY_TYPE_10GBASE_CR1_CU || phy_types & I40E_CAP_PHY_TYPE_10GBASE_CR1 || phy_types & I40E_CAP_PHY_TYPE_10GBASE_T || phy_types & I40E_CAP_PHY_TYPE_5GBASE_T || phy_types & I40E_CAP_PHY_TYPE_2_5GBASE_T || phy_types & I40E_CAP_PHY_TYPE_1000BASE_T_OPTICAL || phy_types & I40E_CAP_PHY_TYPE_1000BASE_T || phy_types & I40E_CAP_PHY_TYPE_1000BASE_SX || phy_types & I40E_CAP_PHY_TYPE_1000BASE_LX || phy_types & I40E_CAP_PHY_TYPE_1000BASE_KX || phy_types & I40E_CAP_PHY_TYPE_100BASE_TX) { ethtool_link_ksettings_add_link_mode(ks, supported, Autoneg); ethtool_link_ksettings_add_link_mode(ks, advertising, Autoneg); } } /** * i40e_get_settings_link_up_fec - Get the FEC mode encoding from mask * @req_fec_info: mask request FEC info * @ks: ethtool ksettings to fill in **/ static void i40e_get_settings_link_up_fec(u8 req_fec_info, struct ethtool_link_ksettings *ks) { ethtool_link_ksettings_add_link_mode(ks, supported, FEC_NONE); ethtool_link_ksettings_add_link_mode(ks, supported, FEC_RS); ethtool_link_ksettings_add_link_mode(ks, supported, FEC_BASER); if ((I40E_AQ_SET_FEC_REQUEST_RS & req_fec_info) && (I40E_AQ_SET_FEC_REQUEST_KR & req_fec_info)) { ethtool_link_ksettings_add_link_mode(ks, advertising, FEC_NONE); ethtool_link_ksettings_add_link_mode(ks, advertising, FEC_BASER); ethtool_link_ksettings_add_link_mode(ks, advertising, FEC_RS); } else if (I40E_AQ_SET_FEC_REQUEST_RS & req_fec_info) { ethtool_link_ksettings_add_link_mode(ks, advertising, FEC_RS); } else if (I40E_AQ_SET_FEC_REQUEST_KR & req_fec_info) { ethtool_link_ksettings_add_link_mode(ks, advertising, FEC_BASER); } else { ethtool_link_ksettings_add_link_mode(ks, advertising, FEC_NONE); } } /** * i40e_get_settings_link_up - Get the Link settings for when link is up * @hw: hw structure * @ks: ethtool ksettings to fill in * @netdev: network interface device structure * @pf: pointer to physical function struct **/ static void i40e_get_settings_link_up(struct i40e_hw *hw, struct ethtool_link_ksettings *ks, struct net_device *netdev, struct i40e_pf *pf) { struct i40e_link_status *hw_link_info = &hw->phy.link_info; struct ethtool_link_ksettings cap_ksettings; u32 link_speed = hw_link_info->link_speed; /* Initialize supported and advertised settings based on phy settings */ switch (hw_link_info->phy_type) { case I40E_PHY_TYPE_40GBASE_CR4: case I40E_PHY_TYPE_40GBASE_CR4_CU: ethtool_link_ksettings_add_link_mode(ks, supported, Autoneg); ethtool_link_ksettings_add_link_mode(ks, supported, 40000baseCR4_Full); ethtool_link_ksettings_add_link_mode(ks, advertising, Autoneg); ethtool_link_ksettings_add_link_mode(ks, advertising, 40000baseCR4_Full); break; case I40E_PHY_TYPE_XLAUI: case I40E_PHY_TYPE_XLPPI: case I40E_PHY_TYPE_40GBASE_AOC: ethtool_link_ksettings_add_link_mode(ks, supported, 40000baseCR4_Full); ethtool_link_ksettings_add_link_mode(ks, advertising, 40000baseCR4_Full); break; case I40E_PHY_TYPE_40GBASE_SR4: ethtool_link_ksettings_add_link_mode(ks, supported, 40000baseSR4_Full); ethtool_link_ksettings_add_link_mode(ks, advertising, 40000baseSR4_Full); break; case I40E_PHY_TYPE_40GBASE_LR4: ethtool_link_ksettings_add_link_mode(ks, supported, 40000baseLR4_Full); ethtool_link_ksettings_add_link_mode(ks, advertising, 40000baseLR4_Full); break; case I40E_PHY_TYPE_25GBASE_SR: case I40E_PHY_TYPE_25GBASE_LR: case I40E_PHY_TYPE_10GBASE_SR: case I40E_PHY_TYPE_10GBASE_LR: case I40E_PHY_TYPE_1000BASE_SX: case I40E_PHY_TYPE_1000BASE_LX: ethtool_link_ksettings_add_link_mode(ks, supported, Autoneg); ethtool_link_ksettings_add_link_mode(ks, advertising, Autoneg); ethtool_link_ksettings_add_link_mode(ks, supported, 25000baseSR_Full); ethtool_link_ksettings_add_link_mode(ks, advertising, 25000baseSR_Full); i40e_get_settings_link_up_fec(hw_link_info->req_fec_info, ks); ethtool_link_ksettings_add_link_mode(ks, supported, 10000baseSR_Full); ethtool_link_ksettings_add_link_mode(ks, advertising, 10000baseSR_Full); ethtool_link_ksettings_add_link_mode(ks, supported, 10000baseLR_Full); ethtool_link_ksettings_add_link_mode(ks, advertising, 10000baseLR_Full); ethtool_link_ksettings_add_link_mode(ks, supported, 1000baseX_Full); ethtool_link_ksettings_add_link_mode(ks, advertising, 1000baseX_Full); ethtool_link_ksettings_add_link_mode(ks, supported, 10000baseT_Full); if (hw_link_info->module_type[2] & I40E_MODULE_TYPE_1000BASE_SX || hw_link_info->module_type[2] & I40E_MODULE_TYPE_1000BASE_LX) { ethtool_link_ksettings_add_link_mode(ks, supported, 1000baseT_Full); if (hw_link_info->requested_speeds & I40E_LINK_SPEED_1GB) ethtool_link_ksettings_add_link_mode( ks, advertising, 1000baseT_Full); } if (hw_link_info->requested_speeds & I40E_LINK_SPEED_10GB) ethtool_link_ksettings_add_link_mode(ks, advertising, 10000baseT_Full); break; case I40E_PHY_TYPE_10GBASE_T: case I40E_PHY_TYPE_5GBASE_T_LINK_STATUS: case I40E_PHY_TYPE_2_5GBASE_T_LINK_STATUS: case I40E_PHY_TYPE_1000BASE_T: case I40E_PHY_TYPE_100BASE_TX: ethtool_link_ksettings_add_link_mode(ks, supported, Autoneg); ethtool_link_ksettings_add_link_mode(ks, supported, 10000baseT_Full); ethtool_link_ksettings_add_link_mode(ks, supported, 5000baseT_Full); ethtool_link_ksettings_add_link_mode(ks, supported, 2500baseT_Full); ethtool_link_ksettings_add_link_mode(ks, supported, 1000baseT_Full); ethtool_link_ksettings_add_link_mode(ks, supported, 100baseT_Full); ethtool_link_ksettings_add_link_mode(ks, advertising, Autoneg); if (hw_link_info->requested_speeds & I40E_LINK_SPEED_10GB) ethtool_link_ksettings_add_link_mode(ks, advertising, 10000baseT_Full); if (hw_link_info->requested_speeds & I40E_LINK_SPEED_5GB) ethtool_link_ksettings_add_link_mode(ks, advertising, 5000baseT_Full); if (hw_link_info->requested_speeds & I40E_LINK_SPEED_2_5GB) ethtool_link_ksettings_add_link_mode(ks, advertising, 2500baseT_Full); if (hw_link_info->requested_speeds & I40E_LINK_SPEED_1GB) ethtool_link_ksettings_add_link_mode(ks, advertising, 1000baseT_Full); if (hw_link_info->requested_speeds & I40E_LINK_SPEED_100MB) ethtool_link_ksettings_add_link_mode(ks, advertising, 100baseT_Full); break; case I40E_PHY_TYPE_1000BASE_T_OPTICAL: ethtool_link_ksettings_add_link_mode(ks, supported, Autoneg); ethtool_link_ksettings_add_link_mode(ks, supported, 1000baseT_Full); ethtool_link_ksettings_add_link_mode(ks, advertising, Autoneg); ethtool_link_ksettings_add_link_mode(ks, advertising, 1000baseT_Full); break; case I40E_PHY_TYPE_10GBASE_CR1_CU: case I40E_PHY_TYPE_10GBASE_CR1: ethtool_link_ksettings_add_link_mode(ks, supported, Autoneg); ethtool_link_ksettings_add_link_mode(ks, supported, 10000baseT_Full); ethtool_link_ksettings_add_link_mode(ks, advertising, Autoneg); ethtool_link_ksettings_add_link_mode(ks, advertising, 10000baseT_Full); break; case I40E_PHY_TYPE_XAUI: case I40E_PHY_TYPE_XFI: case I40E_PHY_TYPE_SFI: case I40E_PHY_TYPE_10GBASE_SFPP_CU: case I40E_PHY_TYPE_10GBASE_AOC: ethtool_link_ksettings_add_link_mode(ks, supported, 10000baseT_Full); if (hw_link_info->requested_speeds & I40E_LINK_SPEED_10GB) ethtool_link_ksettings_add_link_mode(ks, advertising, 10000baseT_Full); i40e_get_settings_link_up_fec(hw_link_info->req_fec_info, ks); break; case I40E_PHY_TYPE_SGMII: ethtool_link_ksettings_add_link_mode(ks, supported, Autoneg); ethtool_link_ksettings_add_link_mode(ks, supported, 1000baseT_Full); if (hw_link_info->requested_speeds & I40E_LINK_SPEED_1GB) ethtool_link_ksettings_add_link_mode(ks, advertising, 1000baseT_Full); if (pf->hw_features & I40E_HW_100M_SGMII_CAPABLE) { ethtool_link_ksettings_add_link_mode(ks, supported, 100baseT_Full); if (hw_link_info->requested_speeds & I40E_LINK_SPEED_100MB) ethtool_link_ksettings_add_link_mode( ks, advertising, 100baseT_Full); } break; case I40E_PHY_TYPE_40GBASE_KR4: case I40E_PHY_TYPE_25GBASE_KR: case I40E_PHY_TYPE_20GBASE_KR2: case I40E_PHY_TYPE_10GBASE_KR: case I40E_PHY_TYPE_10GBASE_KX4: case I40E_PHY_TYPE_1000BASE_KX: ethtool_link_ksettings_add_link_mode(ks, supported, 40000baseKR4_Full); ethtool_link_ksettings_add_link_mode(ks, supported, 25000baseKR_Full); ethtool_link_ksettings_add_link_mode(ks, supported, 20000baseKR2_Full); ethtool_link_ksettings_add_link_mode(ks, supported, 10000baseKR_Full); ethtool_link_ksettings_add_link_mode(ks, supported, 10000baseKX4_Full); ethtool_link_ksettings_add_link_mode(ks, supported, 1000baseKX_Full); ethtool_link_ksettings_add_link_mode(ks, supported, Autoneg); ethtool_link_ksettings_add_link_mode(ks, advertising, 40000baseKR4_Full); ethtool_link_ksettings_add_link_mode(ks, advertising, 25000baseKR_Full); i40e_get_settings_link_up_fec(hw_link_info->req_fec_info, ks); ethtool_link_ksettings_add_link_mode(ks, advertising, 20000baseKR2_Full); ethtool_link_ksettings_add_link_mode(ks, advertising, 10000baseKR_Full); ethtool_link_ksettings_add_link_mode(ks, advertising, 10000baseKX4_Full); ethtool_link_ksettings_add_link_mode(ks, advertising, 1000baseKX_Full); ethtool_link_ksettings_add_link_mode(ks, advertising, Autoneg); break; case I40E_PHY_TYPE_25GBASE_CR: ethtool_link_ksettings_add_link_mode(ks, supported, Autoneg); ethtool_link_ksettings_add_link_mode(ks, advertising, Autoneg); ethtool_link_ksettings_add_link_mode(ks, supported, 25000baseCR_Full); ethtool_link_ksettings_add_link_mode(ks, advertising, 25000baseCR_Full); i40e_get_settings_link_up_fec(hw_link_info->req_fec_info, ks); break; case I40E_PHY_TYPE_25GBASE_AOC: case I40E_PHY_TYPE_25GBASE_ACC: ethtool_link_ksettings_add_link_mode(ks, supported, Autoneg); ethtool_link_ksettings_add_link_mode(ks, advertising, Autoneg); ethtool_link_ksettings_add_link_mode(ks, supported, 25000baseCR_Full); ethtool_link_ksettings_add_link_mode(ks, advertising, 25000baseCR_Full); i40e_get_settings_link_up_fec(hw_link_info->req_fec_info, ks); ethtool_link_ksettings_add_link_mode(ks, supported, 10000baseCR_Full); ethtool_link_ksettings_add_link_mode(ks, advertising, 10000baseCR_Full); break; default: /* if we got here and link is up something bad is afoot */ netdev_info(netdev, "WARNING: Link is up but PHY type 0x%x is not recognized, or incorrect cable is in use\n", hw_link_info->phy_type); } /* Now that we've worked out everything that could be supported by the * current PHY type, get what is supported by the NVM and intersect * them to get what is truly supported */ memset(&cap_ksettings, 0, sizeof(struct ethtool_link_ksettings)); i40e_phy_type_to_ethtool(pf, &cap_ksettings); ethtool_intersect_link_masks(ks, &cap_ksettings); /* Set speed and duplex */ switch (link_speed) { case I40E_LINK_SPEED_40GB: ks->base.speed = SPEED_40000; break; case I40E_LINK_SPEED_25GB: ks->base.speed = SPEED_25000; break; case I40E_LINK_SPEED_20GB: ks->base.speed = SPEED_20000; break; case I40E_LINK_SPEED_10GB: ks->base.speed = SPEED_10000; break; case I40E_LINK_SPEED_5GB: ks->base.speed = SPEED_5000; break; case I40E_LINK_SPEED_2_5GB: ks->base.speed = SPEED_2500; break; case I40E_LINK_SPEED_1GB: ks->base.speed = SPEED_1000; break; case I40E_LINK_SPEED_100MB: ks->base.speed = SPEED_100; break; default: ks->base.speed = SPEED_UNKNOWN; break; } ks->base.duplex = DUPLEX_FULL; } /** * i40e_get_settings_link_down - Get the Link settings for when link is down * @hw: hw structure * @ks: ethtool ksettings to fill in * @pf: pointer to physical function struct * * Reports link settings that can be determined when link is down **/ static void i40e_get_settings_link_down(struct i40e_hw *hw, struct ethtool_link_ksettings *ks, struct i40e_pf *pf) { /* link is down and the driver needs to fall back on * supported phy types to figure out what info to display */ i40e_phy_type_to_ethtool(pf, ks); /* With no link speed and duplex are unknown */ ks->base.speed = SPEED_UNKNOWN; ks->base.duplex = DUPLEX_UNKNOWN; } /** * i40e_get_link_ksettings - Get Link Speed and Duplex settings * @netdev: network interface device structure * @ks: ethtool ksettings * * Reports speed/duplex settings based on media_type **/ static int i40e_get_link_ksettings(struct net_device *netdev, struct ethtool_link_ksettings *ks) { struct i40e_netdev_priv *np = netdev_priv(netdev); struct i40e_pf *pf = np->vsi->back; struct i40e_hw *hw = &pf->hw; struct i40e_link_status *hw_link_info = &hw->phy.link_info; bool link_up = hw_link_info->link_info & I40E_AQ_LINK_UP; ethtool_link_ksettings_zero_link_mode(ks, supported); ethtool_link_ksettings_zero_link_mode(ks, advertising); if (link_up) i40e_get_settings_link_up(hw, ks, netdev, pf); else i40e_get_settings_link_down(hw, ks, pf); /* Now set the settings that don't rely on link being up/down */ /* Set autoneg settings */ ks->base.autoneg = ((hw_link_info->an_info & I40E_AQ_AN_COMPLETED) ? AUTONEG_ENABLE : AUTONEG_DISABLE); /* Set media type settings */ switch (hw->phy.media_type) { case I40E_MEDIA_TYPE_BACKPLANE: ethtool_link_ksettings_add_link_mode(ks, supported, Autoneg); ethtool_link_ksettings_add_link_mode(ks, supported, Backplane); ethtool_link_ksettings_add_link_mode(ks, advertising, Autoneg); ethtool_link_ksettings_add_link_mode(ks, advertising, Backplane); ks->base.port = PORT_NONE; break; case I40E_MEDIA_TYPE_BASET: ethtool_link_ksettings_add_link_mode(ks, supported, TP); ethtool_link_ksettings_add_link_mode(ks, advertising, TP); ks->base.port = PORT_TP; break; case I40E_MEDIA_TYPE_DA: case I40E_MEDIA_TYPE_CX4: ethtool_link_ksettings_add_link_mode(ks, supported, FIBRE); ethtool_link_ksettings_add_link_mode(ks, advertising, FIBRE); ks->base.port = PORT_DA; break; case I40E_MEDIA_TYPE_FIBER: ethtool_link_ksettings_add_link_mode(ks, supported, FIBRE); ethtool_link_ksettings_add_link_mode(ks, advertising, FIBRE); ks->base.port = PORT_FIBRE; break; case I40E_MEDIA_TYPE_UNKNOWN: default: ks->base.port = PORT_OTHER; break; } /* Set flow control settings */ ethtool_link_ksettings_add_link_mode(ks, supported, Pause); ethtool_link_ksettings_add_link_mode(ks, supported, Asym_Pause); switch (hw->fc.requested_mode) { case I40E_FC_FULL: ethtool_link_ksettings_add_link_mode(ks, advertising, Pause); break; case I40E_FC_TX_PAUSE: ethtool_link_ksettings_add_link_mode(ks, advertising, Asym_Pause); break; case I40E_FC_RX_PAUSE: ethtool_link_ksettings_add_link_mode(ks, advertising, Pause); ethtool_link_ksettings_add_link_mode(ks, advertising, Asym_Pause); break; default: ethtool_link_ksettings_del_link_mode(ks, advertising, Pause); ethtool_link_ksettings_del_link_mode(ks, advertising, Asym_Pause); break; } return 0; } #define I40E_LBIT_SIZE 8 /** * i40e_speed_to_link_speed - Translate decimal speed to i40e_aq_link_speed * @speed: speed in decimal * @ks: ethtool ksettings * * Return i40e_aq_link_speed based on speed **/ static enum i40e_aq_link_speed i40e_speed_to_link_speed(__u32 speed, const struct ethtool_link_ksettings *ks) { enum i40e_aq_link_speed link_speed = I40E_LINK_SPEED_UNKNOWN; bool speed_changed = false; int i, j; static const struct { __u32 speed; enum i40e_aq_link_speed link_speed; __u8 bit[I40E_LBIT_SIZE]; } i40e_speed_lut[] = { #define I40E_LBIT(mode) ETHTOOL_LINK_MODE_ ## mode ##_Full_BIT {SPEED_100, I40E_LINK_SPEED_100MB, {I40E_LBIT(100baseT)} }, {SPEED_1000, I40E_LINK_SPEED_1GB, {I40E_LBIT(1000baseT), I40E_LBIT(1000baseX), I40E_LBIT(1000baseKX)} }, {SPEED_10000, I40E_LINK_SPEED_10GB, {I40E_LBIT(10000baseT), I40E_LBIT(10000baseKR), I40E_LBIT(10000baseLR), I40E_LBIT(10000baseCR), I40E_LBIT(10000baseSR), I40E_LBIT(10000baseKX4)} }, {SPEED_25000, I40E_LINK_SPEED_25GB, {I40E_LBIT(25000baseCR), I40E_LBIT(25000baseKR), I40E_LBIT(25000baseSR)} }, {SPEED_40000, I40E_LINK_SPEED_40GB, {I40E_LBIT(40000baseKR4), I40E_LBIT(40000baseCR4), I40E_LBIT(40000baseSR4), I40E_LBIT(40000baseLR4)} }, {SPEED_20000, I40E_LINK_SPEED_20GB, {I40E_LBIT(20000baseKR2)} }, {SPEED_2500, I40E_LINK_SPEED_2_5GB, {I40E_LBIT(2500baseT)} }, {SPEED_5000, I40E_LINK_SPEED_5GB, {I40E_LBIT(2500baseT)} } #undef I40E_LBIT }; for (i = 0; i < ARRAY_SIZE(i40e_speed_lut); i++) { if (i40e_speed_lut[i].speed == speed) { for (j = 0; j < I40E_LBIT_SIZE; j++) { if (test_bit(i40e_speed_lut[i].bit[j], ks->link_modes.supported)) { speed_changed = true; break; } if (!i40e_speed_lut[i].bit[j]) break; } if (speed_changed) { link_speed = i40e_speed_lut[i].link_speed; break; } } } return link_speed; } #undef I40E_LBIT_SIZE /** * i40e_set_link_ksettings - Set Speed and Duplex * @netdev: network interface device structure * @ks: ethtool ksettings * * Set speed/duplex per media_types advertised/forced **/ static int i40e_set_link_ksettings(struct net_device *netdev, const struct ethtool_link_ksettings *ks) { struct i40e_netdev_priv *np = netdev_priv(netdev); struct i40e_aq_get_phy_abilities_resp abilities; struct ethtool_link_ksettings safe_ks; struct ethtool_link_ksettings copy_ks; struct i40e_aq_set_phy_config config; struct i40e_pf *pf = np->vsi->back; enum i40e_aq_link_speed link_speed; struct i40e_vsi *vsi = np->vsi; struct i40e_hw *hw = &pf->hw; bool autoneg_changed = false; int timeout = 50; int status = 0; int err = 0; __u32 speed; u8 autoneg; /* Changing port settings is not supported if this isn't the * port's controlling PF */ if (hw->partition_id != 1) { i40e_partition_setting_complaint(pf); return -EOPNOTSUPP; } if (vsi != pf->vsi[pf->lan_vsi]) return -EOPNOTSUPP; if (hw->phy.media_type != I40E_MEDIA_TYPE_BASET && hw->phy.media_type != I40E_MEDIA_TYPE_FIBER && hw->phy.media_type != I40E_MEDIA_TYPE_BACKPLANE && hw->phy.media_type != I40E_MEDIA_TYPE_DA && hw->phy.link_info.link_info & I40E_AQ_LINK_UP) return -EOPNOTSUPP; if (hw->device_id == I40E_DEV_ID_KX_B || hw->device_id == I40E_DEV_ID_KX_C || hw->device_id == I40E_DEV_ID_20G_KR2 || hw->device_id == I40E_DEV_ID_20G_KR2_A || hw->device_id == I40E_DEV_ID_25G_B || hw->device_id == I40E_DEV_ID_KX_X722) { netdev_info(netdev, "Changing settings is not supported on backplane.\n"); return -EOPNOTSUPP; } /* copy the ksettings to copy_ks to avoid modifying the origin */ memcpy(&copy_ks, ks, sizeof(struct ethtool_link_ksettings)); /* save autoneg out of ksettings */ autoneg = copy_ks.base.autoneg; speed = copy_ks.base.speed; /* get our own copy of the bits to check against */ memset(&safe_ks, 0, sizeof(struct ethtool_link_ksettings)); safe_ks.base.cmd = copy_ks.base.cmd; safe_ks.base.link_mode_masks_nwords = copy_ks.base.link_mode_masks_nwords; i40e_get_link_ksettings(netdev, &safe_ks); /* Get link modes supported by hardware and check against modes * requested by the user. Return an error if unsupported mode was set. */ if (!bitmap_subset(copy_ks.link_modes.advertising, safe_ks.link_modes.supported, __ETHTOOL_LINK_MODE_MASK_NBITS)) return -EINVAL; /* set autoneg back to what it currently is */ copy_ks.base.autoneg = safe_ks.base.autoneg; copy_ks.base.speed = safe_ks.base.speed; /* If copy_ks.base and safe_ks.base are not the same now, then they are * trying to set something that we do not support. */ if (memcmp(&copy_ks.base, &safe_ks.base, sizeof(struct ethtool_link_settings))) { netdev_err(netdev, "Only speed and autoneg are supported.\n"); return -EOPNOTSUPP; } while (test_and_set_bit(__I40E_CONFIG_BUSY, pf->state)) { timeout--; if (!timeout) return -EBUSY; usleep_range(1000, 2000); } /* Get the current phy config */ status = i40e_aq_get_phy_capabilities(hw, false, false, &abilities, NULL); if (status) { err = -EAGAIN; goto done; } /* Copy abilities to config in case autoneg is not * set below */ memset(&config, 0, sizeof(struct i40e_aq_set_phy_config)); config.abilities = abilities.abilities; /* Check autoneg */ if (autoneg == AUTONEG_ENABLE) { /* If autoneg was not already enabled */ if (!(hw->phy.link_info.an_info & I40E_AQ_AN_COMPLETED)) { /* If autoneg is not supported, return error */ if (!ethtool_link_ksettings_test_link_mode(&safe_ks, supported, Autoneg)) { netdev_info(netdev, "Autoneg not supported on this phy\n"); err = -EINVAL; goto done; } /* Autoneg is allowed to change */ config.abilities = abilities.abilities | I40E_AQ_PHY_ENABLE_AN; autoneg_changed = true; } } else { /* If autoneg is currently enabled */ if (hw->phy.link_info.an_info & I40E_AQ_AN_COMPLETED) { /* If autoneg is supported 10GBASE_T is the only PHY * that can disable it, so otherwise return error */ if (ethtool_link_ksettings_test_link_mode(&safe_ks, supported, Autoneg) && hw->phy.media_type != I40E_MEDIA_TYPE_BASET) { netdev_info(netdev, "Autoneg cannot be disabled on this phy\n"); err = -EINVAL; goto done; } /* Autoneg is allowed to change */ config.abilities = abilities.abilities & ~I40E_AQ_PHY_ENABLE_AN; autoneg_changed = true; } } if (ethtool_link_ksettings_test_link_mode(ks, advertising, 100baseT_Full)) config.link_speed |= I40E_LINK_SPEED_100MB; if (ethtool_link_ksettings_test_link_mode(ks, advertising, 1000baseT_Full) || ethtool_link_ksettings_test_link_mode(ks, advertising, 1000baseX_Full) || ethtool_link_ksettings_test_link_mode(ks, advertising, 1000baseKX_Full)) config.link_speed |= I40E_LINK_SPEED_1GB; if (ethtool_link_ksettings_test_link_mode(ks, advertising, 10000baseT_Full) || ethtool_link_ksettings_test_link_mode(ks, advertising, 10000baseKX4_Full) || ethtool_link_ksettings_test_link_mode(ks, advertising, 10000baseKR_Full) || ethtool_link_ksettings_test_link_mode(ks, advertising, 10000baseCR_Full) || ethtool_link_ksettings_test_link_mode(ks, advertising, 10000baseSR_Full) || ethtool_link_ksettings_test_link_mode(ks, advertising, 10000baseLR_Full)) config.link_speed |= I40E_LINK_SPEED_10GB; if (ethtool_link_ksettings_test_link_mode(ks, advertising, 2500baseT_Full)) config.link_speed |= I40E_LINK_SPEED_2_5GB; if (ethtool_link_ksettings_test_link_mode(ks, advertising, 5000baseT_Full)) config.link_speed |= I40E_LINK_SPEED_5GB; if (ethtool_link_ksettings_test_link_mode(ks, advertising, 20000baseKR2_Full)) config.link_speed |= I40E_LINK_SPEED_20GB; if (ethtool_link_ksettings_test_link_mode(ks, advertising, 25000baseCR_Full) || ethtool_link_ksettings_test_link_mode(ks, advertising, 25000baseKR_Full) || ethtool_link_ksettings_test_link_mode(ks, advertising, 25000baseSR_Full)) config.link_speed |= I40E_LINK_SPEED_25GB; if (ethtool_link_ksettings_test_link_mode(ks, advertising, 40000baseKR4_Full) || ethtool_link_ksettings_test_link_mode(ks, advertising, 40000baseCR4_Full) || ethtool_link_ksettings_test_link_mode(ks, advertising, 40000baseSR4_Full) || ethtool_link_ksettings_test_link_mode(ks, advertising, 40000baseLR4_Full)) config.link_speed |= I40E_LINK_SPEED_40GB; /* Autonegotiation must be disabled to change speed */ if ((speed != SPEED_UNKNOWN && safe_ks.base.speed != speed) && (autoneg == AUTONEG_DISABLE || (safe_ks.base.autoneg == AUTONEG_DISABLE && !autoneg_changed))) { link_speed = i40e_speed_to_link_speed(speed, ks); if (link_speed == I40E_LINK_SPEED_UNKNOWN) { netdev_info(netdev, "Given speed is not supported\n"); err = -EOPNOTSUPP; goto done; } else { config.link_speed = link_speed; } } else { if (safe_ks.base.speed != speed) { netdev_info(netdev, "Unable to set speed, disable autoneg\n"); err = -EOPNOTSUPP; goto done; } } /* If speed didn't get set, set it to what it currently is. * This is needed because if advertise is 0 (as it is when autoneg * is disabled) then speed won't get set. */ if (!config.link_speed) config.link_speed = abilities.link_speed; if (autoneg_changed || abilities.link_speed != config.link_speed) { /* copy over the rest of the abilities */ config.phy_type = abilities.phy_type; config.phy_type_ext = abilities.phy_type_ext; config.eee_capability = abilities.eee_capability; config.eeer = abilities.eeer_val; config.low_power_ctrl = abilities.d3_lpan; config.fec_config = abilities.fec_cfg_curr_mod_ext_info & I40E_AQ_PHY_FEC_CONFIG_MASK; /* save the requested speeds */ hw->phy.link_info.requested_speeds = config.link_speed; /* set link and auto negotiation so changes take effect */ config.abilities |= I40E_AQ_PHY_ENABLE_ATOMIC_LINK; /* If link is up put link down */ if (hw->phy.link_info.link_info & I40E_AQ_LINK_UP) { /* Tell the OS link is going down, the link will go * back up when fw says it is ready asynchronously */ i40e_print_link_message(vsi, false); netif_carrier_off(netdev); netif_tx_stop_all_queues(netdev); } /* make the aq call */ status = i40e_aq_set_phy_config(hw, &config, NULL); if (status) { netdev_info(netdev, "Set phy config failed, err %pe aq_err %s\n", ERR_PTR(status), i40e_aq_str(hw, hw->aq.asq_last_status)); err = -EAGAIN; goto done; } status = i40e_update_link_info(hw); if (status) netdev_dbg(netdev, "Updating link info failed with err %pe aq_err %s\n", ERR_PTR(status), i40e_aq_str(hw, hw->aq.asq_last_status)); } else { netdev_info(netdev, "Nothing changed, exiting without setting anything.\n"); } done: clear_bit(__I40E_CONFIG_BUSY, pf->state); return err; } static int i40e_set_fec_cfg(struct net_device *netdev, u8 fec_cfg) { struct i40e_netdev_priv *np = netdev_priv(netdev); struct i40e_aq_get_phy_abilities_resp abilities; struct i40e_pf *pf = np->vsi->back; struct i40e_hw *hw = &pf->hw; int status = 0; u32 flags = 0; int err = 0; flags = READ_ONCE(pf->flags); i40e_set_fec_in_flags(fec_cfg, &flags); /* Get the current phy config */ memset(&abilities, 0, sizeof(abilities)); status = i40e_aq_get_phy_capabilities(hw, false, false, &abilities, NULL); if (status) { err = -EAGAIN; goto done; } if (abilities.fec_cfg_curr_mod_ext_info != fec_cfg) { struct i40e_aq_set_phy_config config; memset(&config, 0, sizeof(config)); config.phy_type = abilities.phy_type; config.abilities = abilities.abilities | I40E_AQ_PHY_ENABLE_ATOMIC_LINK; config.phy_type_ext = abilities.phy_type_ext; config.link_speed = abilities.link_speed; config.eee_capability = abilities.eee_capability; config.eeer = abilities.eeer_val; config.low_power_ctrl = abilities.d3_lpan; config.fec_config = fec_cfg & I40E_AQ_PHY_FEC_CONFIG_MASK; status = i40e_aq_set_phy_config(hw, &config, NULL); if (status) { netdev_info(netdev, "Set phy config failed, err %pe aq_err %s\n", ERR_PTR(status), i40e_aq_str(hw, hw->aq.asq_last_status)); err = -EAGAIN; goto done; } pf->flags = flags; status = i40e_update_link_info(hw); if (status) /* debug level message only due to relation to the link * itself rather than to the FEC settings * (e.g. no physical connection etc.) */ netdev_dbg(netdev, "Updating link info failed with err %pe aq_err %s\n", ERR_PTR(status), i40e_aq_str(hw, hw->aq.asq_last_status)); } done: return err; } static int i40e_get_fec_param(struct net_device *netdev, struct ethtool_fecparam *fecparam) { struct i40e_netdev_priv *np = netdev_priv(netdev); struct i40e_aq_get_phy_abilities_resp abilities; struct i40e_pf *pf = np->vsi->back; struct i40e_hw *hw = &pf->hw; int status = 0; int err = 0; u8 fec_cfg; /* Get the current phy config */ memset(&abilities, 0, sizeof(abilities)); status = i40e_aq_get_phy_capabilities(hw, false, false, &abilities, NULL); if (status) { err = -EAGAIN; goto done; } fecparam->fec = 0; fec_cfg = abilities.fec_cfg_curr_mod_ext_info; if (fec_cfg & I40E_AQ_SET_FEC_AUTO) fecparam->fec |= ETHTOOL_FEC_AUTO; else if (fec_cfg & (I40E_AQ_SET_FEC_REQUEST_RS | I40E_AQ_SET_FEC_ABILITY_RS)) fecparam->fec |= ETHTOOL_FEC_RS; else if (fec_cfg & (I40E_AQ_SET_FEC_REQUEST_KR | I40E_AQ_SET_FEC_ABILITY_KR)) fecparam->fec |= ETHTOOL_FEC_BASER; if (fec_cfg == 0) fecparam->fec |= ETHTOOL_FEC_OFF; if (hw->phy.link_info.fec_info & I40E_AQ_CONFIG_FEC_KR_ENA) fecparam->active_fec = ETHTOOL_FEC_BASER; else if (hw->phy.link_info.fec_info & I40E_AQ_CONFIG_FEC_RS_ENA) fecparam->active_fec = ETHTOOL_FEC_RS; else fecparam->active_fec = ETHTOOL_FEC_OFF; done: return err; } static int i40e_set_fec_param(struct net_device *netdev, struct ethtool_fecparam *fecparam) { struct i40e_netdev_priv *np = netdev_priv(netdev); struct i40e_pf *pf = np->vsi->back; struct i40e_hw *hw = &pf->hw; u8 fec_cfg = 0; if (hw->device_id != I40E_DEV_ID_25G_SFP28 && hw->device_id != I40E_DEV_ID_25G_B && hw->device_id != I40E_DEV_ID_KX_X722) return -EPERM; if (hw->mac.type == I40E_MAC_X722 && !(hw->flags & I40E_HW_FLAG_X722_FEC_REQUEST_CAPABLE)) { netdev_err(netdev, "Setting FEC encoding not supported by firmware. Please update the NVM image.\n"); return -EOPNOTSUPP; } switch (fecparam->fec) { case ETHTOOL_FEC_AUTO: fec_cfg = I40E_AQ_SET_FEC_AUTO; break; case ETHTOOL_FEC_RS: fec_cfg = (I40E_AQ_SET_FEC_REQUEST_RS | I40E_AQ_SET_FEC_ABILITY_RS); break; case ETHTOOL_FEC_BASER: fec_cfg = (I40E_AQ_SET_FEC_REQUEST_KR | I40E_AQ_SET_FEC_ABILITY_KR); break; case ETHTOOL_FEC_OFF: case ETHTOOL_FEC_NONE: fec_cfg = 0; break; default: dev_warn(&pf->pdev->dev, "Unsupported FEC mode: %d", fecparam->fec); return -EINVAL; } return i40e_set_fec_cfg(netdev, fec_cfg); } static int i40e_nway_reset(struct net_device *netdev) { /* restart autonegotiation */ struct i40e_netdev_priv *np = netdev_priv(netdev); struct i40e_pf *pf = np->vsi->back; struct i40e_hw *hw = &pf->hw; bool link_up = hw->phy.link_info.link_info & I40E_AQ_LINK_UP; int ret = 0; ret = i40e_aq_set_link_restart_an(hw, link_up, NULL); if (ret) { netdev_info(netdev, "link restart failed, err %pe aq_err %s\n", ERR_PTR(ret), i40e_aq_str(hw, hw->aq.asq_last_status)); return -EIO; } return 0; } /** * i40e_get_pauseparam - Get Flow Control status * @netdev: netdevice structure * @pause: buffer to return pause parameters * * Return tx/rx-pause status **/ static void i40e_get_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *pause) { struct i40e_netdev_priv *np = netdev_priv(netdev); struct i40e_pf *pf = np->vsi->back; struct i40e_hw *hw = &pf->hw; struct i40e_link_status *hw_link_info = &hw->phy.link_info; struct i40e_dcbx_config *dcbx_cfg = &hw->local_dcbx_config; pause->autoneg = ((hw_link_info->an_info & I40E_AQ_AN_COMPLETED) ? AUTONEG_ENABLE : AUTONEG_DISABLE); /* PFC enabled so report LFC as off */ if (dcbx_cfg->pfc.pfcenable) { pause->rx_pause = 0; pause->tx_pause = 0; return; } if (hw->fc.current_mode == I40E_FC_RX_PAUSE) { pause->rx_pause = 1; } else if (hw->fc.current_mode == I40E_FC_TX_PAUSE) { pause->tx_pause = 1; } else if (hw->fc.current_mode == I40E_FC_FULL) { pause->rx_pause = 1; pause->tx_pause = 1; } } /** * i40e_set_pauseparam - Set Flow Control parameter * @netdev: network interface device structure * @pause: return tx/rx flow control status **/ static int i40e_set_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *pause) { struct i40e_netdev_priv *np = netdev_priv(netdev); struct i40e_pf *pf = np->vsi->back; struct i40e_vsi *vsi = np->vsi; struct i40e_hw *hw = &pf->hw; struct i40e_link_status *hw_link_info = &hw->phy.link_info; struct i40e_dcbx_config *dcbx_cfg = &hw->local_dcbx_config; bool link_up = hw_link_info->link_info & I40E_AQ_LINK_UP; u8 aq_failures; int err = 0; int status; u32 is_an; /* Changing the port's flow control is not supported if this isn't the * port's controlling PF */ if (hw->partition_id != 1) { i40e_partition_setting_complaint(pf); return -EOPNOTSUPP; } if (vsi != pf->vsi[pf->lan_vsi]) return -EOPNOTSUPP; is_an = hw_link_info->an_info & I40E_AQ_AN_COMPLETED; if (pause->autoneg != is_an) { netdev_info(netdev, "To change autoneg please use: ethtool -s <dev> autoneg <on|off>\n"); return -EOPNOTSUPP; } /* If we have link and don't have autoneg */ if (!test_bit(__I40E_DOWN, pf->state) && !is_an) { /* Send message that it might not necessarily work*/ netdev_info(netdev, "Autoneg did not complete so changing settings may not result in an actual change.\n"); } if (dcbx_cfg->pfc.pfcenable) { netdev_info(netdev, "Priority flow control enabled. Cannot set link flow control.\n"); return -EOPNOTSUPP; } if (pause->rx_pause && pause->tx_pause) hw->fc.requested_mode = I40E_FC_FULL; else if (pause->rx_pause && !pause->tx_pause) hw->fc.requested_mode = I40E_FC_RX_PAUSE; else if (!pause->rx_pause && pause->tx_pause) hw->fc.requested_mode = I40E_FC_TX_PAUSE; else if (!pause->rx_pause && !pause->tx_pause) hw->fc.requested_mode = I40E_FC_NONE; else return -EINVAL; /* Tell the OS link is going down, the link will go back up when fw * says it is ready asynchronously */ i40e_print_link_message(vsi, false); netif_carrier_off(netdev); netif_tx_stop_all_queues(netdev); /* Set the fc mode and only restart an if link is up*/ status = i40e_set_fc(hw, &aq_failures, link_up); if (aq_failures & I40E_SET_FC_AQ_FAIL_GET) { netdev_info(netdev, "Set fc failed on the get_phy_capabilities call with err %pe aq_err %s\n", ERR_PTR(status), i40e_aq_str(hw, hw->aq.asq_last_status)); err = -EAGAIN; } if (aq_failures & I40E_SET_FC_AQ_FAIL_SET) { netdev_info(netdev, "Set fc failed on the set_phy_config call with err %pe aq_err %s\n", ERR_PTR(status), i40e_aq_str(hw, hw->aq.asq_last_status)); err = -EAGAIN; } if (aq_failures & I40E_SET_FC_AQ_FAIL_UPDATE) { netdev_info(netdev, "Set fc failed on the get_link_info call with err %pe aq_err %s\n", ERR_PTR(status), i40e_aq_str(hw, hw->aq.asq_last_status)); err = -EAGAIN; } if (!test_bit(__I40E_DOWN, pf->state) && is_an) { /* Give it a little more time to try to come back */ msleep(75); if (!test_bit(__I40E_DOWN, pf->state)) return i40e_nway_reset(netdev); } return err; } static u32 i40e_get_msglevel(struct net_device *netdev) { struct i40e_netdev_priv *np = netdev_priv(netdev); struct i40e_pf *pf = np->vsi->back; u32 debug_mask = pf->hw.debug_mask; if (debug_mask) netdev_info(netdev, "i40e debug_mask: 0x%08X\n", debug_mask); return pf->msg_enable; } static void i40e_set_msglevel(struct net_device *netdev, u32 data) { struct i40e_netdev_priv *np = netdev_priv(netdev); struct i40e_pf *pf = np->vsi->back; if (I40E_DEBUG_USER & data) pf->hw.debug_mask = data; else pf->msg_enable = data; } static int i40e_get_regs_len(struct net_device *netdev) { int reg_count = 0; int i; for (i = 0; i40e_reg_list[i].offset != 0; i++) reg_count += i40e_reg_list[i].elements; return reg_count * sizeof(u32); } static void i40e_get_regs(struct net_device *netdev, struct ethtool_regs *regs, void *p) { struct i40e_netdev_priv *np = netdev_priv(netdev); struct i40e_pf *pf = np->vsi->back; struct i40e_hw *hw = &pf->hw; u32 *reg_buf = p; unsigned int i, j, ri; u32 reg; /* Tell ethtool which driver-version-specific regs output we have. * * At some point, if we have ethtool doing special formatting of * this data, it will rely on this version number to know how to * interpret things. Hence, this needs to be updated if/when the * diags register table is changed. */ regs->version = 1; /* loop through the diags reg table for what to print */ ri = 0; for (i = 0; i40e_reg_list[i].offset != 0; i++) { for (j = 0; j < i40e_reg_list[i].elements; j++) { reg = i40e_reg_list[i].offset + (j * i40e_reg_list[i].stride); reg_buf[ri++] = rd32(hw, reg); } } } static int i40e_get_eeprom(struct net_device *netdev, struct ethtool_eeprom *eeprom, u8 *bytes) { struct i40e_netdev_priv *np = netdev_priv(netdev); struct i40e_hw *hw = &np->vsi->back->hw; struct i40e_pf *pf = np->vsi->back; int ret_val = 0, len, offset; u8 *eeprom_buff; u16 i, sectors; bool last; u32 magic; #define I40E_NVM_SECTOR_SIZE 4096 if (eeprom->len == 0) return -EINVAL; /* check for NVMUpdate access method */ magic = hw->vendor_id | (hw->device_id << 16); if (eeprom->magic && eeprom->magic != magic) { struct i40e_nvm_access *cmd = (struct i40e_nvm_access *)eeprom; int errno = 0; /* make sure it is the right magic for NVMUpdate */ if ((eeprom->magic >> 16) != hw->device_id) errno = -EINVAL; else if (test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state) || test_bit(__I40E_RESET_INTR_RECEIVED, pf->state)) errno = -EBUSY; else ret_val = i40e_nvmupd_command(hw, cmd, bytes, &errno); if ((errno || ret_val) && (hw->debug_mask & I40E_DEBUG_NVM)) dev_info(&pf->pdev->dev, "NVMUpdate read failed err=%d status=0x%x errno=%d module=%d offset=0x%x size=%d\n", ret_val, hw->aq.asq_last_status, errno, (u8)(cmd->config & I40E_NVM_MOD_PNT_MASK), cmd->offset, cmd->data_size); return errno; } /* normal ethtool get_eeprom support */ eeprom->magic = hw->vendor_id | (hw->device_id << 16); eeprom_buff = kzalloc(eeprom->len, GFP_KERNEL); if (!eeprom_buff) return -ENOMEM; ret_val = i40e_acquire_nvm(hw, I40E_RESOURCE_READ); if (ret_val) { dev_info(&pf->pdev->dev, "Failed Acquiring NVM resource for read err=%d status=0x%x\n", ret_val, hw->aq.asq_last_status); goto free_buff; } sectors = eeprom->len / I40E_NVM_SECTOR_SIZE; sectors += (eeprom->len % I40E_NVM_SECTOR_SIZE) ? 1 : 0; len = I40E_NVM_SECTOR_SIZE; last = false; for (i = 0; i < sectors; i++) { if (i == (sectors - 1)) { len = eeprom->len - (I40E_NVM_SECTOR_SIZE * i); last = true; } offset = eeprom->offset + (I40E_NVM_SECTOR_SIZE * i), ret_val = i40e_aq_read_nvm(hw, 0x0, offset, len, (u8 *)eeprom_buff + (I40E_NVM_SECTOR_SIZE * i), last, NULL); if (ret_val && hw->aq.asq_last_status == I40E_AQ_RC_EPERM) { dev_info(&pf->pdev->dev, "read NVM failed, invalid offset 0x%x\n", offset); break; } else if (ret_val && hw->aq.asq_last_status == I40E_AQ_RC_EACCES) { dev_info(&pf->pdev->dev, "read NVM failed, access, offset 0x%x\n", offset); break; } else if (ret_val) { dev_info(&pf->pdev->dev, "read NVM failed offset %d err=%d status=0x%x\n", offset, ret_val, hw->aq.asq_last_status); break; } } i40e_release_nvm(hw); memcpy(bytes, (u8 *)eeprom_buff, eeprom->len); free_buff: kfree(eeprom_buff); return ret_val; } static int i40e_get_eeprom_len(struct net_device *netdev) { struct i40e_netdev_priv *np = netdev_priv(netdev); struct i40e_hw *hw = &np->vsi->back->hw; u32 val; #define X722_EEPROM_SCOPE_LIMIT 0x5B9FFF if (hw->mac.type == I40E_MAC_X722) { val = X722_EEPROM_SCOPE_LIMIT + 1; return val; } val = (rd32(hw, I40E_GLPCI_LBARCTRL) & I40E_GLPCI_LBARCTRL_FL_SIZE_MASK) >> I40E_GLPCI_LBARCTRL_FL_SIZE_SHIFT; /* register returns value in power of 2, 64Kbyte chunks. */ val = (64 * 1024) * BIT(val); return val; } static int i40e_set_eeprom(struct net_device *netdev, struct ethtool_eeprom *eeprom, u8 *bytes) { struct i40e_netdev_priv *np = netdev_priv(netdev); struct i40e_hw *hw = &np->vsi->back->hw; struct i40e_pf *pf = np->vsi->back; struct i40e_nvm_access *cmd = (struct i40e_nvm_access *)eeprom; int ret_val = 0; int errno = 0; u32 magic; /* normal ethtool set_eeprom is not supported */ magic = hw->vendor_id | (hw->device_id << 16); if (eeprom->magic == magic) errno = -EOPNOTSUPP; /* check for NVMUpdate access method */ else if (!eeprom->magic || (eeprom->magic >> 16) != hw->device_id) errno = -EINVAL; else if (test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state) || test_bit(__I40E_RESET_INTR_RECEIVED, pf->state)) errno = -EBUSY; else ret_val = i40e_nvmupd_command(hw, cmd, bytes, &errno); if ((errno || ret_val) && (hw->debug_mask & I40E_DEBUG_NVM)) dev_info(&pf->pdev->dev, "NVMUpdate write failed err=%d status=0x%x errno=%d module=%d offset=0x%x size=%d\n", ret_val, hw->aq.asq_last_status, errno, (u8)(cmd->config & I40E_NVM_MOD_PNT_MASK), cmd->offset, cmd->data_size); return errno; } static void i40e_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo) { struct i40e_netdev_priv *np = netdev_priv(netdev); struct i40e_vsi *vsi = np->vsi; struct i40e_pf *pf = vsi->back; strscpy(drvinfo->driver, i40e_driver_name, sizeof(drvinfo->driver)); strscpy(drvinfo->fw_version, i40e_nvm_version_str(&pf->hw), sizeof(drvinfo->fw_version)); strscpy(drvinfo->bus_info, pci_name(pf->pdev), sizeof(drvinfo->bus_info)); drvinfo->n_priv_flags = I40E_PRIV_FLAGS_STR_LEN; if (pf->hw.pf_id == 0) drvinfo->n_priv_flags += I40E_GL_PRIV_FLAGS_STR_LEN; } static void i40e_get_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring, struct kernel_ethtool_ringparam *kernel_ring, struct netlink_ext_ack *extack) { struct i40e_netdev_priv *np = netdev_priv(netdev); struct i40e_pf *pf = np->vsi->back; struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi]; ring->rx_max_pending = I40E_MAX_NUM_DESCRIPTORS; ring->tx_max_pending = I40E_MAX_NUM_DESCRIPTORS; ring->rx_mini_max_pending = 0; ring->rx_jumbo_max_pending = 0; ring->rx_pending = vsi->rx_rings[0]->count; ring->tx_pending = vsi->tx_rings[0]->count; ring->rx_mini_pending = 0; ring->rx_jumbo_pending = 0; } static bool i40e_active_tx_ring_index(struct i40e_vsi *vsi, u16 index) { if (i40e_enabled_xdp_vsi(vsi)) { return index < vsi->num_queue_pairs || (index >= vsi->alloc_queue_pairs && index < vsi->alloc_queue_pairs + vsi->num_queue_pairs); } return index < vsi->num_queue_pairs; } static int i40e_set_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring, struct kernel_ethtool_ringparam *kernel_ring, struct netlink_ext_ack *extack) { struct i40e_ring *tx_rings = NULL, *rx_rings = NULL; struct i40e_netdev_priv *np = netdev_priv(netdev); struct i40e_hw *hw = &np->vsi->back->hw; struct i40e_vsi *vsi = np->vsi; struct i40e_pf *pf = vsi->back; u32 new_rx_count, new_tx_count; u16 tx_alloc_queue_pairs; int timeout = 50; int i, err = 0; if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending)) return -EINVAL; if (ring->tx_pending > I40E_MAX_NUM_DESCRIPTORS || ring->tx_pending < I40E_MIN_NUM_DESCRIPTORS || ring->rx_pending > I40E_MAX_NUM_DESCRIPTORS || ring->rx_pending < I40E_MIN_NUM_DESCRIPTORS) { netdev_info(netdev, "Descriptors requested (Tx: %d / Rx: %d) out of range [%d-%d]\n", ring->tx_pending, ring->rx_pending, I40E_MIN_NUM_DESCRIPTORS, I40E_MAX_NUM_DESCRIPTORS); return -EINVAL; } new_tx_count = ALIGN(ring->tx_pending, I40E_REQ_DESCRIPTOR_MULTIPLE); new_rx_count = ALIGN(ring->rx_pending, I40E_REQ_DESCRIPTOR_MULTIPLE); /* if nothing to do return success */ if ((new_tx_count == vsi->tx_rings[0]->count) && (new_rx_count == vsi->rx_rings[0]->count)) return 0; /* If there is a AF_XDP page pool attached to any of Rx rings, * disallow changing the number of descriptors -- regardless * if the netdev is running or not. */ if (i40e_xsk_any_rx_ring_enabled(vsi)) return -EBUSY; while (test_and_set_bit(__I40E_CONFIG_BUSY, pf->state)) { timeout--; if (!timeout) return -EBUSY; usleep_range(1000, 2000); } if (!netif_running(vsi->netdev)) { /* simple case - set for the next time the netdev is started */ for (i = 0; i < vsi->num_queue_pairs; i++) { vsi->tx_rings[i]->count = new_tx_count; vsi->rx_rings[i]->count = new_rx_count; if (i40e_enabled_xdp_vsi(vsi)) vsi->xdp_rings[i]->count = new_tx_count; } vsi->num_tx_desc = new_tx_count; vsi->num_rx_desc = new_rx_count; goto done; } /* We can't just free everything and then setup again, * because the ISRs in MSI-X mode get passed pointers * to the Tx and Rx ring structs. */ /* alloc updated Tx and XDP Tx resources */ tx_alloc_queue_pairs = vsi->alloc_queue_pairs * (i40e_enabled_xdp_vsi(vsi) ? 2 : 1); if (new_tx_count != vsi->tx_rings[0]->count) { netdev_info(netdev, "Changing Tx descriptor count from %d to %d.\n", vsi->tx_rings[0]->count, new_tx_count); tx_rings = kcalloc(tx_alloc_queue_pairs, sizeof(struct i40e_ring), GFP_KERNEL); if (!tx_rings) { err = -ENOMEM; goto done; } for (i = 0; i < tx_alloc_queue_pairs; i++) { if (!i40e_active_tx_ring_index(vsi, i)) continue; tx_rings[i] = *vsi->tx_rings[i]; tx_rings[i].count = new_tx_count; /* the desc and bi pointers will be reallocated in the * setup call */ tx_rings[i].desc = NULL; tx_rings[i].rx_bi = NULL; err = i40e_setup_tx_descriptors(&tx_rings[i]); if (err) { while (i) { i--; if (!i40e_active_tx_ring_index(vsi, i)) continue; i40e_free_tx_resources(&tx_rings[i]); } kfree(tx_rings); tx_rings = NULL; goto done; } } } /* alloc updated Rx resources */ if (new_rx_count != vsi->rx_rings[0]->count) { netdev_info(netdev, "Changing Rx descriptor count from %d to %d\n", vsi->rx_rings[0]->count, new_rx_count); rx_rings = kcalloc(vsi->alloc_queue_pairs, sizeof(struct i40e_ring), GFP_KERNEL); if (!rx_rings) { err = -ENOMEM; goto free_tx; } for (i = 0; i < vsi->num_queue_pairs; i++) { u16 unused; /* clone ring and setup updated count */ rx_rings[i] = *vsi->rx_rings[i]; rx_rings[i].count = new_rx_count; /* the desc and bi pointers will be reallocated in the * setup call */ rx_rings[i].desc = NULL; rx_rings[i].rx_bi = NULL; /* Clear cloned XDP RX-queue info before setup call */ memset(&rx_rings[i].xdp_rxq, 0, sizeof(rx_rings[i].xdp_rxq)); /* this is to allow wr32 to have something to write to * during early allocation of Rx buffers */ rx_rings[i].tail = hw->hw_addr + I40E_PRTGEN_STATUS; err = i40e_setup_rx_descriptors(&rx_rings[i]); if (err) goto rx_unwind; /* now allocate the Rx buffers to make sure the OS * has enough memory, any failure here means abort */ unused = I40E_DESC_UNUSED(&rx_rings[i]); err = i40e_alloc_rx_buffers(&rx_rings[i], unused); rx_unwind: if (err) { do { i40e_free_rx_resources(&rx_rings[i]); } while (i--); kfree(rx_rings); rx_rings = NULL; goto free_tx; } } } /* Bring interface down, copy in the new ring info, * then restore the interface */ i40e_down(vsi); if (tx_rings) { for (i = 0; i < tx_alloc_queue_pairs; i++) { if (i40e_active_tx_ring_index(vsi, i)) { i40e_free_tx_resources(vsi->tx_rings[i]); *vsi->tx_rings[i] = tx_rings[i]; } } kfree(tx_rings); tx_rings = NULL; } if (rx_rings) { for (i = 0; i < vsi->num_queue_pairs; i++) { i40e_free_rx_resources(vsi->rx_rings[i]); /* get the real tail offset */ rx_rings[i].tail = vsi->rx_rings[i]->tail; /* this is to fake out the allocation routine * into thinking it has to realloc everything * but the recycling logic will let us re-use * the buffers allocated above */ rx_rings[i].next_to_use = 0; rx_rings[i].next_to_clean = 0; rx_rings[i].next_to_alloc = 0; /* do a struct copy */ *vsi->rx_rings[i] = rx_rings[i]; } kfree(rx_rings); rx_rings = NULL; } vsi->num_tx_desc = new_tx_count; vsi->num_rx_desc = new_rx_count; i40e_up(vsi); free_tx: /* error cleanup if the Rx allocations failed after getting Tx */ if (tx_rings) { for (i = 0; i < tx_alloc_queue_pairs; i++) { if (i40e_active_tx_ring_index(vsi, i)) i40e_free_tx_resources(vsi->tx_rings[i]); } kfree(tx_rings); tx_rings = NULL; } done: clear_bit(__I40E_CONFIG_BUSY, pf->state); return err; } /** * i40e_get_stats_count - return the stats count for a device * @netdev: the netdev to return the count for * * Returns the total number of statistics for this netdev. Note that even * though this is a function, it is required that the count for a specific * netdev must never change. Basing the count on static values such as the * maximum number of queues or the device type is ok. However, the API for * obtaining stats is *not* safe against changes based on non-static * values such as the *current* number of queues, or runtime flags. * * If a statistic is not always enabled, return it as part of the count * anyways, always return its string, and report its value as zero. **/ static int i40e_get_stats_count(struct net_device *netdev) { struct i40e_netdev_priv *np = netdev_priv(netdev); struct i40e_vsi *vsi = np->vsi; struct i40e_pf *pf = vsi->back; int stats_len; if (vsi == pf->vsi[pf->lan_vsi] && pf->hw.partition_id == 1) stats_len = I40E_PF_STATS_LEN; else stats_len = I40E_VSI_STATS_LEN; /* The number of stats reported for a given net_device must remain * constant throughout the life of that device. * * This is because the API for obtaining the size, strings, and stats * is spread out over three separate ethtool ioctls. There is no safe * way to lock the number of stats across these calls, so we must * assume that they will never change. * * Due to this, we report the maximum number of queues, even if not * every queue is currently configured. Since we always allocate * queues in pairs, we'll just use netdev->num_tx_queues * 2. This * works because the num_tx_queues is set at device creation and never * changes. */ stats_len += I40E_QUEUE_STATS_LEN * 2 * netdev->num_tx_queues; return stats_len; } static int i40e_get_sset_count(struct net_device *netdev, int sset) { struct i40e_netdev_priv *np = netdev_priv(netdev); struct i40e_vsi *vsi = np->vsi; struct i40e_pf *pf = vsi->back; switch (sset) { case ETH_SS_TEST: return I40E_TEST_LEN; case ETH_SS_STATS: return i40e_get_stats_count(netdev); case ETH_SS_PRIV_FLAGS: return I40E_PRIV_FLAGS_STR_LEN + (pf->hw.pf_id == 0 ? I40E_GL_PRIV_FLAGS_STR_LEN : 0); default: return -EOPNOTSUPP; } } /** * i40e_get_veb_tc_stats - copy VEB TC statistics to formatted structure * @tc: the TC statistics in VEB structure (veb->tc_stats) * @i: the index of traffic class in (veb->tc_stats) structure to copy * * Copy VEB TC statistics from structure of arrays (veb->tc_stats) to * one dimensional structure i40e_cp_veb_tc_stats. * Produce formatted i40e_cp_veb_tc_stats structure of the VEB TC * statistics for the given TC. **/ static struct i40e_cp_veb_tc_stats i40e_get_veb_tc_stats(struct i40e_veb_tc_stats *tc, unsigned int i) { struct i40e_cp_veb_tc_stats veb_tc = { .tc_rx_packets = tc->tc_rx_packets[i], .tc_rx_bytes = tc->tc_rx_bytes[i], .tc_tx_packets = tc->tc_tx_packets[i], .tc_tx_bytes = tc->tc_tx_bytes[i], }; return veb_tc; } /** * i40e_get_pfc_stats - copy HW PFC statistics to formatted structure * @pf: the PF device structure * @i: the priority value to copy * * The PFC stats are found as arrays in pf->stats, which is not easy to pass * into i40e_add_ethtool_stats. Produce a formatted i40e_pfc_stats structure * of the PFC stats for the given priority. **/ static inline struct i40e_pfc_stats i40e_get_pfc_stats(struct i40e_pf *pf, unsigned int i) { #define I40E_GET_PFC_STAT(stat, priority) \ .stat = pf->stats.stat[priority] struct i40e_pfc_stats pfc = { I40E_GET_PFC_STAT(priority_xon_rx, i), I40E_GET_PFC_STAT(priority_xoff_rx, i), I40E_GET_PFC_STAT(priority_xon_tx, i), I40E_GET_PFC_STAT(priority_xoff_tx, i), I40E_GET_PFC_STAT(priority_xon_2_xoff, i), }; return pfc; } /** * i40e_get_ethtool_stats - copy stat values into supplied buffer * @netdev: the netdev to collect stats for * @stats: ethtool stats command structure * @data: ethtool supplied buffer * * Copy the stats values for this netdev into the buffer. Expects data to be * pre-allocated to the size returned by i40e_get_stats_count.. Note that all * statistics must be copied in a static order, and the count must not change * for a given netdev. See i40e_get_stats_count for more details. * * If a statistic is not currently valid (such as a disabled queue), this * function reports its value as zero. **/ static void i40e_get_ethtool_stats(struct net_device *netdev, struct ethtool_stats *stats, u64 *data) { struct i40e_netdev_priv *np = netdev_priv(netdev); struct i40e_vsi *vsi = np->vsi; struct i40e_pf *pf = vsi->back; struct i40e_veb *veb = NULL; unsigned int i; bool veb_stats; u64 *p = data; i40e_update_stats(vsi); i40e_add_ethtool_stats(&data, i40e_get_vsi_stats_struct(vsi), i40e_gstrings_net_stats); i40e_add_ethtool_stats(&data, vsi, i40e_gstrings_misc_stats); rcu_read_lock(); for (i = 0; i < netdev->num_tx_queues; i++) { i40e_add_queue_stats(&data, READ_ONCE(vsi->tx_rings[i])); i40e_add_queue_stats(&data, READ_ONCE(vsi->rx_rings[i])); } rcu_read_unlock(); if (vsi != pf->vsi[pf->lan_vsi] || pf->hw.partition_id != 1) goto check_data_pointer; veb_stats = ((pf->lan_veb != I40E_NO_VEB) && (pf->lan_veb < I40E_MAX_VEB) && (pf->flags & I40E_FLAG_VEB_STATS_ENABLED)); if (veb_stats) { veb = pf->veb[pf->lan_veb]; i40e_update_veb_stats(veb); } /* If veb stats aren't enabled, pass NULL instead of the veb so that * we initialize stats to zero and update the data pointer * intelligently */ i40e_add_ethtool_stats(&data, veb_stats ? veb : NULL, i40e_gstrings_veb_stats); for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) if (veb_stats) { struct i40e_cp_veb_tc_stats veb_tc = i40e_get_veb_tc_stats(&veb->tc_stats, i); i40e_add_ethtool_stats(&data, &veb_tc, i40e_gstrings_veb_tc_stats); } else { i40e_add_ethtool_stats(&data, NULL, i40e_gstrings_veb_tc_stats); } i40e_add_ethtool_stats(&data, pf, i40e_gstrings_stats); for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) { struct i40e_pfc_stats pfc = i40e_get_pfc_stats(pf, i); i40e_add_ethtool_stats(&data, &pfc, i40e_gstrings_pfc_stats); } check_data_pointer: WARN_ONCE(data - p != i40e_get_stats_count(netdev), "ethtool stats count mismatch!"); } /** * i40e_get_stat_strings - copy stat strings into supplied buffer * @netdev: the netdev to collect strings for * @data: supplied buffer to copy strings into * * Copy the strings related to stats for this netdev. Expects data to be * pre-allocated with the size reported by i40e_get_stats_count. Note that the * strings must be copied in a static order and the total count must not * change for a given netdev. See i40e_get_stats_count for more details. **/ static void i40e_get_stat_strings(struct net_device *netdev, u8 *data) { struct i40e_netdev_priv *np = netdev_priv(netdev); struct i40e_vsi *vsi = np->vsi; struct i40e_pf *pf = vsi->back; unsigned int i; u8 *p = data; i40e_add_stat_strings(&data, i40e_gstrings_net_stats); i40e_add_stat_strings(&data, i40e_gstrings_misc_stats); for (i = 0; i < netdev->num_tx_queues; i++) { i40e_add_stat_strings(&data, i40e_gstrings_queue_stats, "tx", i); i40e_add_stat_strings(&data, i40e_gstrings_queue_stats, "rx", i); } if (vsi != pf->vsi[pf->lan_vsi] || pf->hw.partition_id != 1) goto check_data_pointer; i40e_add_stat_strings(&data, i40e_gstrings_veb_stats); for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) i40e_add_stat_strings(&data, i40e_gstrings_veb_tc_stats, i); i40e_add_stat_strings(&data, i40e_gstrings_stats); for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) i40e_add_stat_strings(&data, i40e_gstrings_pfc_stats, i); check_data_pointer: WARN_ONCE(data - p != i40e_get_stats_count(netdev) * ETH_GSTRING_LEN, "stat strings count mismatch!"); } static void i40e_get_priv_flag_strings(struct net_device *netdev, u8 *data) { struct i40e_netdev_priv *np = netdev_priv(netdev); struct i40e_vsi *vsi = np->vsi; struct i40e_pf *pf = vsi->back; unsigned int i; u8 *p = data; for (i = 0; i < I40E_PRIV_FLAGS_STR_LEN; i++) ethtool_sprintf(&p, i40e_gstrings_priv_flags[i].flag_string); if (pf->hw.pf_id != 0) return; for (i = 0; i < I40E_GL_PRIV_FLAGS_STR_LEN; i++) ethtool_sprintf(&p, i40e_gl_gstrings_priv_flags[i].flag_string); } static void i40e_get_strings(struct net_device *netdev, u32 stringset, u8 *data) { switch (stringset) { case ETH_SS_TEST: memcpy(data, i40e_gstrings_test, I40E_TEST_LEN * ETH_GSTRING_LEN); break; case ETH_SS_STATS: i40e_get_stat_strings(netdev, data); break; case ETH_SS_PRIV_FLAGS: i40e_get_priv_flag_strings(netdev, data); break; default: break; } } static int i40e_get_ts_info(struct net_device *dev, struct ethtool_ts_info *info) { struct i40e_pf *pf = i40e_netdev_to_pf(dev); /* only report HW timestamping if PTP is enabled */ if (!(pf->flags & I40E_FLAG_PTP)) return ethtool_op_get_ts_info(dev, info); info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE | SOF_TIMESTAMPING_RX_SOFTWARE | SOF_TIMESTAMPING_SOFTWARE | SOF_TIMESTAMPING_TX_HARDWARE | SOF_TIMESTAMPING_RX_HARDWARE | SOF_TIMESTAMPING_RAW_HARDWARE; if (pf->ptp_clock) info->phc_index = ptp_clock_index(pf->ptp_clock); else info->phc_index = -1; info->tx_types = BIT(HWTSTAMP_TX_OFF) | BIT(HWTSTAMP_TX_ON); info->rx_filters = BIT(HWTSTAMP_FILTER_NONE) | BIT(HWTSTAMP_FILTER_PTP_V2_L2_EVENT) | BIT(HWTSTAMP_FILTER_PTP_V2_L2_SYNC) | BIT(HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ); if (pf->hw_features & I40E_HW_PTP_L4_CAPABLE) info->rx_filters |= BIT(HWTSTAMP_FILTER_PTP_V1_L4_SYNC) | BIT(HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ) | BIT(HWTSTAMP_FILTER_PTP_V2_EVENT) | BIT(HWTSTAMP_FILTER_PTP_V2_L4_EVENT) | BIT(HWTSTAMP_FILTER_PTP_V2_SYNC) | BIT(HWTSTAMP_FILTER_PTP_V2_L4_SYNC) | BIT(HWTSTAMP_FILTER_PTP_V2_DELAY_REQ) | BIT(HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ); return 0; } static u64 i40e_link_test(struct net_device *netdev, u64 *data) { struct i40e_netdev_priv *np = netdev_priv(netdev); struct i40e_pf *pf = np->vsi->back; bool link_up = false; int status; netif_info(pf, hw, netdev, "link test\n"); status = i40e_get_link_status(&pf->hw, &link_up); if (status) { netif_err(pf, drv, netdev, "link query timed out, please retry test\n"); *data = 1; return *data; } if (link_up) *data = 0; else *data = 1; return *data; } static u64 i40e_reg_test(struct net_device *netdev, u64 *data) { struct i40e_netdev_priv *np = netdev_priv(netdev); struct i40e_pf *pf = np->vsi->back; netif_info(pf, hw, netdev, "register test\n"); *data = i40e_diag_reg_test(&pf->hw); return *data; } static u64 i40e_eeprom_test(struct net_device *netdev, u64 *data) { struct i40e_netdev_priv *np = netdev_priv(netdev); struct i40e_pf *pf = np->vsi->back; netif_info(pf, hw, netdev, "eeprom test\n"); *data = i40e_diag_eeprom_test(&pf->hw); /* forcebly clear the NVM Update state machine */ pf->hw.nvmupd_state = I40E_NVMUPD_STATE_INIT; return *data; } static u64 i40e_intr_test(struct net_device *netdev, u64 *data) { struct i40e_netdev_priv *np = netdev_priv(netdev); struct i40e_pf *pf = np->vsi->back; u16 swc_old = pf->sw_int_count; netif_info(pf, hw, netdev, "interrupt test\n"); wr32(&pf->hw, I40E_PFINT_DYN_CTL0, (I40E_PFINT_DYN_CTL0_INTENA_MASK | I40E_PFINT_DYN_CTL0_SWINT_TRIG_MASK | I40E_PFINT_DYN_CTL0_ITR_INDX_MASK | I40E_PFINT_DYN_CTL0_SW_ITR_INDX_ENA_MASK | I40E_PFINT_DYN_CTL0_SW_ITR_INDX_MASK)); usleep_range(1000, 2000); *data = (swc_old == pf->sw_int_count); return *data; } static inline bool i40e_active_vfs(struct i40e_pf *pf) { struct i40e_vf *vfs = pf->vf; int i; for (i = 0; i < pf->num_alloc_vfs; i++) if (test_bit(I40E_VF_STATE_ACTIVE, &vfs[i].vf_states)) return true; return false; } static inline bool i40e_active_vmdqs(struct i40e_pf *pf) { return !!i40e_find_vsi_by_type(pf, I40E_VSI_VMDQ2); } static void i40e_diag_test(struct net_device *netdev, struct ethtool_test *eth_test, u64 *data) { struct i40e_netdev_priv *np = netdev_priv(netdev); bool if_running = netif_running(netdev); struct i40e_pf *pf = np->vsi->back; if (eth_test->flags == ETH_TEST_FL_OFFLINE) { /* Offline tests */ netif_info(pf, drv, netdev, "offline testing starting\n"); set_bit(__I40E_TESTING, pf->state); if (test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state) || test_bit(__I40E_RESET_INTR_RECEIVED, pf->state)) { dev_warn(&pf->pdev->dev, "Cannot start offline testing when PF is in reset state.\n"); goto skip_ol_tests; } if (i40e_active_vfs(pf) || i40e_active_vmdqs(pf)) { dev_warn(&pf->pdev->dev, "Please take active VFs and Netqueues offline and restart the adapter before running NIC diagnostics\n"); goto skip_ol_tests; } /* If the device is online then take it offline */ if (if_running) /* indicate we're in test mode */ i40e_close(netdev); else /* This reset does not affect link - if it is * changed to a type of reset that does affect * link then the following link test would have * to be moved to before the reset */ i40e_do_reset(pf, BIT(__I40E_PF_RESET_REQUESTED), true); if (i40e_link_test(netdev, &data[I40E_ETH_TEST_LINK])) eth_test->flags |= ETH_TEST_FL_FAILED; if (i40e_eeprom_test(netdev, &data[I40E_ETH_TEST_EEPROM])) eth_test->flags |= ETH_TEST_FL_FAILED; if (i40e_intr_test(netdev, &data[I40E_ETH_TEST_INTR])) eth_test->flags |= ETH_TEST_FL_FAILED; /* run reg test last, a reset is required after it */ if (i40e_reg_test(netdev, &data[I40E_ETH_TEST_REG])) eth_test->flags |= ETH_TEST_FL_FAILED; clear_bit(__I40E_TESTING, pf->state); i40e_do_reset(pf, BIT(__I40E_PF_RESET_REQUESTED), true); if (if_running) i40e_open(netdev); } else { /* Online tests */ netif_info(pf, drv, netdev, "online testing starting\n"); if (i40e_link_test(netdev, &data[I40E_ETH_TEST_LINK])) eth_test->flags |= ETH_TEST_FL_FAILED; /* Offline only tests, not run in online; pass by default */ data[I40E_ETH_TEST_REG] = 0; data[I40E_ETH_TEST_EEPROM] = 0; data[I40E_ETH_TEST_INTR] = 0; } netif_info(pf, drv, netdev, "testing finished\n"); return; skip_ol_tests: data[I40E_ETH_TEST_REG] = 1; data[I40E_ETH_TEST_EEPROM] = 1; data[I40E_ETH_TEST_INTR] = 1; data[I40E_ETH_TEST_LINK] = 1; eth_test->flags |= ETH_TEST_FL_FAILED; clear_bit(__I40E_TESTING, pf->state); netif_info(pf, drv, netdev, "testing failed\n"); } static void i40e_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) { struct i40e_netdev_priv *np = netdev_priv(netdev); struct i40e_pf *pf = np->vsi->back; struct i40e_hw *hw = &pf->hw; u16 wol_nvm_bits; /* NVM bit on means WoL disabled for the port */ i40e_read_nvm_word(hw, I40E_SR_NVM_WAKE_ON_LAN, &wol_nvm_bits); if ((BIT(hw->port) & wol_nvm_bits) || (hw->partition_id != 1)) { wol->supported = 0; wol->wolopts = 0; } else { wol->supported = WAKE_MAGIC; wol->wolopts = (pf->wol_en ? WAKE_MAGIC : 0); } } /** * i40e_set_wol - set the WakeOnLAN configuration * @netdev: the netdev in question * @wol: the ethtool WoL setting data **/ static int i40e_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) { struct i40e_netdev_priv *np = netdev_priv(netdev); struct i40e_pf *pf = np->vsi->back; struct i40e_vsi *vsi = np->vsi; struct i40e_hw *hw = &pf->hw; u16 wol_nvm_bits; /* WoL not supported if this isn't the controlling PF on the port */ if (hw->partition_id != 1) { i40e_partition_setting_complaint(pf); return -EOPNOTSUPP; } if (vsi != pf->vsi[pf->lan_vsi]) return -EOPNOTSUPP; /* NVM bit on means WoL disabled for the port */ i40e_read_nvm_word(hw, I40E_SR_NVM_WAKE_ON_LAN, &wol_nvm_bits); if (BIT(hw->port) & wol_nvm_bits) return -EOPNOTSUPP; /* only magic packet is supported */ if (wol->wolopts & ~WAKE_MAGIC) return -EOPNOTSUPP; /* is this a new value? */ if (pf->wol_en != !!wol->wolopts) { pf->wol_en = !!wol->wolopts; device_set_wakeup_enable(&pf->pdev->dev, pf->wol_en); } return 0; } static int i40e_set_phys_id(struct net_device *netdev, enum ethtool_phys_id_state state) { struct i40e_netdev_priv *np = netdev_priv(netdev); struct i40e_pf *pf = np->vsi->back; struct i40e_hw *hw = &pf->hw; int blink_freq = 2; u16 temp_status; int ret = 0; switch (state) { case ETHTOOL_ID_ACTIVE: if (!(pf->hw_features & I40E_HW_PHY_CONTROLS_LEDS)) { pf->led_status = i40e_led_get(hw); } else { if (!(hw->flags & I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE)) i40e_aq_set_phy_debug(hw, I40E_PHY_DEBUG_ALL, NULL); ret = i40e_led_get_phy(hw, &temp_status, &pf->phy_led_val); pf->led_status = temp_status; } return blink_freq; case ETHTOOL_ID_ON: if (!(pf->hw_features & I40E_HW_PHY_CONTROLS_LEDS)) i40e_led_set(hw, 0xf, false); else ret = i40e_led_set_phy(hw, true, pf->led_status, 0); break; case ETHTOOL_ID_OFF: if (!(pf->hw_features & I40E_HW_PHY_CONTROLS_LEDS)) i40e_led_set(hw, 0x0, false); else ret = i40e_led_set_phy(hw, false, pf->led_status, 0); break; case ETHTOOL_ID_INACTIVE: if (!(pf->hw_features & I40E_HW_PHY_CONTROLS_LEDS)) { i40e_led_set(hw, pf->led_status, false); } else { ret = i40e_led_set_phy(hw, false, pf->led_status, (pf->phy_led_val | I40E_PHY_LED_MODE_ORIG)); if (!(hw->flags & I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE)) i40e_aq_set_phy_debug(hw, 0, NULL); } break; default: break; } if (ret) return -ENOENT; else return 0; } /* NOTE: i40e hardware uses a conversion factor of 2 for Interrupt * Throttle Rate (ITR) ie. ITR(1) = 2us ITR(10) = 20 us, and also * 125us (8000 interrupts per second) == ITR(62) */ /** * __i40e_get_coalesce - get per-queue coalesce settings * @netdev: the netdev to check * @ec: ethtool coalesce data structure * @queue: which queue to pick * * Gets the per-queue settings for coalescence. Specifically Rx and Tx usecs * are per queue. If queue is <0 then we default to queue 0 as the * representative value. **/ static int __i40e_get_coalesce(struct net_device *netdev, struct ethtool_coalesce *ec, int queue) { struct i40e_netdev_priv *np = netdev_priv(netdev); struct i40e_ring *rx_ring, *tx_ring; struct i40e_vsi *vsi = np->vsi; ec->tx_max_coalesced_frames_irq = vsi->work_limit; ec->rx_max_coalesced_frames_irq = vsi->work_limit; /* rx and tx usecs has per queue value. If user doesn't specify the * queue, return queue 0's value to represent. */ if (queue < 0) queue = 0; else if (queue >= vsi->num_queue_pairs) return -EINVAL; rx_ring = vsi->rx_rings[queue]; tx_ring = vsi->tx_rings[queue]; if (ITR_IS_DYNAMIC(rx_ring->itr_setting)) ec->use_adaptive_rx_coalesce = 1; if (ITR_IS_DYNAMIC(tx_ring->itr_setting)) ec->use_adaptive_tx_coalesce = 1; ec->rx_coalesce_usecs = rx_ring->itr_setting & ~I40E_ITR_DYNAMIC; ec->tx_coalesce_usecs = tx_ring->itr_setting & ~I40E_ITR_DYNAMIC; /* we use the _usecs_high to store/set the interrupt rate limit * that the hardware supports, that almost but not quite * fits the original intent of the ethtool variable, * the rx_coalesce_usecs_high limits total interrupts * per second from both tx/rx sources. */ ec->rx_coalesce_usecs_high = vsi->int_rate_limit; ec->tx_coalesce_usecs_high = vsi->int_rate_limit; return 0; } /** * i40e_get_coalesce - get a netdev's coalesce settings * @netdev: the netdev to check * @ec: ethtool coalesce data structure * @kernel_coal: ethtool CQE mode setting structure * @extack: extack for reporting error messages * * Gets the coalesce settings for a particular netdev. Note that if user has * modified per-queue settings, this only guarantees to represent queue 0. See * __i40e_get_coalesce for more details. **/ static int i40e_get_coalesce(struct net_device *netdev, struct ethtool_coalesce *ec, struct kernel_ethtool_coalesce *kernel_coal, struct netlink_ext_ack *extack) { return __i40e_get_coalesce(netdev, ec, -1); } /** * i40e_get_per_queue_coalesce - gets coalesce settings for particular queue * @netdev: netdev structure * @ec: ethtool's coalesce settings * @queue: the particular queue to read * * Will read a specific queue's coalesce settings **/ static int i40e_get_per_queue_coalesce(struct net_device *netdev, u32 queue, struct ethtool_coalesce *ec) { return __i40e_get_coalesce(netdev, ec, queue); } /** * i40e_set_itr_per_queue - set ITR values for specific queue * @vsi: the VSI to set values for * @ec: coalesce settings from ethtool * @queue: the queue to modify * * Change the ITR settings for a specific queue. **/ static void i40e_set_itr_per_queue(struct i40e_vsi *vsi, struct ethtool_coalesce *ec, int queue) { struct i40e_ring *rx_ring = vsi->rx_rings[queue]; struct i40e_ring *tx_ring = vsi->tx_rings[queue]; struct i40e_pf *pf = vsi->back; struct i40e_hw *hw = &pf->hw; struct i40e_q_vector *q_vector; u16 intrl; intrl = i40e_intrl_usec_to_reg(vsi->int_rate_limit); rx_ring->itr_setting = ITR_REG_ALIGN(ec->rx_coalesce_usecs); tx_ring->itr_setting = ITR_REG_ALIGN(ec->tx_coalesce_usecs); if (ec->use_adaptive_rx_coalesce) rx_ring->itr_setting |= I40E_ITR_DYNAMIC; else rx_ring->itr_setting &= ~I40E_ITR_DYNAMIC; if (ec->use_adaptive_tx_coalesce) tx_ring->itr_setting |= I40E_ITR_DYNAMIC; else tx_ring->itr_setting &= ~I40E_ITR_DYNAMIC; q_vector = rx_ring->q_vector; q_vector->rx.target_itr = ITR_TO_REG(rx_ring->itr_setting); q_vector = tx_ring->q_vector; q_vector->tx.target_itr = ITR_TO_REG(tx_ring->itr_setting); /* The interrupt handler itself will take care of programming * the Tx and Rx ITR values based on the values we have entered * into the q_vector, no need to write the values now. */ wr32(hw, I40E_PFINT_RATEN(q_vector->reg_idx), intrl); i40e_flush(hw); } /** * __i40e_set_coalesce - set coalesce settings for particular queue * @netdev: the netdev to change * @ec: ethtool coalesce settings * @queue: the queue to change * * Sets the coalesce settings for a particular queue. **/ static int __i40e_set_coalesce(struct net_device *netdev, struct ethtool_coalesce *ec, int queue) { struct i40e_netdev_priv *np = netdev_priv(netdev); u16 intrl_reg, cur_rx_itr, cur_tx_itr; struct i40e_vsi *vsi = np->vsi; struct i40e_pf *pf = vsi->back; int i; if (ec->tx_max_coalesced_frames_irq || ec->rx_max_coalesced_frames_irq) vsi->work_limit = ec->tx_max_coalesced_frames_irq; if (queue < 0) { cur_rx_itr = vsi->rx_rings[0]->itr_setting; cur_tx_itr = vsi->tx_rings[0]->itr_setting; } else if (queue < vsi->num_queue_pairs) { cur_rx_itr = vsi->rx_rings[queue]->itr_setting; cur_tx_itr = vsi->tx_rings[queue]->itr_setting; } else { netif_info(pf, drv, netdev, "Invalid queue value, queue range is 0 - %d\n", vsi->num_queue_pairs - 1); return -EINVAL; } cur_tx_itr &= ~I40E_ITR_DYNAMIC; cur_rx_itr &= ~I40E_ITR_DYNAMIC; /* tx_coalesce_usecs_high is ignored, use rx-usecs-high instead */ if (ec->tx_coalesce_usecs_high != vsi->int_rate_limit) { netif_info(pf, drv, netdev, "tx-usecs-high is not used, please program rx-usecs-high\n"); return -EINVAL; } if (ec->rx_coalesce_usecs_high > INTRL_REG_TO_USEC(I40E_MAX_INTRL)) { netif_info(pf, drv, netdev, "Invalid value, rx-usecs-high range is 0-%lu\n", INTRL_REG_TO_USEC(I40E_MAX_INTRL)); return -EINVAL; } if (ec->rx_coalesce_usecs != cur_rx_itr && ec->use_adaptive_rx_coalesce) { netif_info(pf, drv, netdev, "RX interrupt moderation cannot be changed if adaptive-rx is enabled.\n"); return -EINVAL; } if (ec->rx_coalesce_usecs > I40E_MAX_ITR) { netif_info(pf, drv, netdev, "Invalid value, rx-usecs range is 0-8160\n"); return -EINVAL; } if (ec->tx_coalesce_usecs != cur_tx_itr && ec->use_adaptive_tx_coalesce) { netif_info(pf, drv, netdev, "TX interrupt moderation cannot be changed if adaptive-tx is enabled.\n"); return -EINVAL; } if (ec->tx_coalesce_usecs > I40E_MAX_ITR) { netif_info(pf, drv, netdev, "Invalid value, tx-usecs range is 0-8160\n"); return -EINVAL; } if (ec->use_adaptive_rx_coalesce && !cur_rx_itr) ec->rx_coalesce_usecs = I40E_MIN_ITR; if (ec->use_adaptive_tx_coalesce && !cur_tx_itr) ec->tx_coalesce_usecs = I40E_MIN_ITR; intrl_reg = i40e_intrl_usec_to_reg(ec->rx_coalesce_usecs_high); vsi->int_rate_limit = INTRL_REG_TO_USEC(intrl_reg); if (vsi->int_rate_limit != ec->rx_coalesce_usecs_high) { netif_info(pf, drv, netdev, "Interrupt rate limit rounded down to %d\n", vsi->int_rate_limit); } /* rx and tx usecs has per queue value. If user doesn't specify the * queue, apply to all queues. */ if (queue < 0) { for (i = 0; i < vsi->num_queue_pairs; i++) i40e_set_itr_per_queue(vsi, ec, i); } else { i40e_set_itr_per_queue(vsi, ec, queue); } return 0; } /** * i40e_set_coalesce - set coalesce settings for every queue on the netdev * @netdev: the netdev to change * @ec: ethtool coalesce settings * @kernel_coal: ethtool CQE mode setting structure * @extack: extack for reporting error messages * * This will set each queue to the same coalesce settings. **/ static int i40e_set_coalesce(struct net_device *netdev, struct ethtool_coalesce *ec, struct kernel_ethtool_coalesce *kernel_coal, struct netlink_ext_ack *extack) { return __i40e_set_coalesce(netdev, ec, -1); } /** * i40e_set_per_queue_coalesce - set specific queue's coalesce settings * @netdev: the netdev to change * @ec: ethtool's coalesce settings * @queue: the queue to change * * Sets the specified queue's coalesce settings. **/ static int i40e_set_per_queue_coalesce(struct net_device *netdev, u32 queue, struct ethtool_coalesce *ec) { return __i40e_set_coalesce(netdev, ec, queue); } /** * i40e_get_rss_hash_opts - Get RSS hash Input Set for each flow type * @pf: pointer to the physical function struct * @cmd: ethtool rxnfc command * * Returns Success if the flow is supported, else Invalid Input. **/ static int i40e_get_rss_hash_opts(struct i40e_pf *pf, struct ethtool_rxnfc *cmd) { struct i40e_hw *hw = &pf->hw; u8 flow_pctype = 0; u64 i_set = 0; cmd->data = 0; switch (cmd->flow_type) { case TCP_V4_FLOW: flow_pctype = I40E_FILTER_PCTYPE_NONF_IPV4_TCP; break; case UDP_V4_FLOW: flow_pctype = I40E_FILTER_PCTYPE_NONF_IPV4_UDP; break; case TCP_V6_FLOW: flow_pctype = I40E_FILTER_PCTYPE_NONF_IPV6_TCP; break; case UDP_V6_FLOW: flow_pctype = I40E_FILTER_PCTYPE_NONF_IPV6_UDP; break; case SCTP_V4_FLOW: case AH_ESP_V4_FLOW: case AH_V4_FLOW: case ESP_V4_FLOW: case IPV4_FLOW: case SCTP_V6_FLOW: case AH_ESP_V6_FLOW: case AH_V6_FLOW: case ESP_V6_FLOW: case IPV6_FLOW: /* Default is src/dest for IP, no matter the L4 hashing */ cmd->data |= RXH_IP_SRC | RXH_IP_DST; break; default: return -EINVAL; } /* Read flow based hash input set register */ if (flow_pctype) { i_set = (u64)i40e_read_rx_ctl(hw, I40E_GLQF_HASH_INSET(0, flow_pctype)) | ((u64)i40e_read_rx_ctl(hw, I40E_GLQF_HASH_INSET(1, flow_pctype)) << 32); } /* Process bits of hash input set */ if (i_set) { if (i_set & I40E_L4_SRC_MASK) cmd->data |= RXH_L4_B_0_1; if (i_set & I40E_L4_DST_MASK) cmd->data |= RXH_L4_B_2_3; if (cmd->flow_type == TCP_V4_FLOW || cmd->flow_type == UDP_V4_FLOW) { if (hw->mac.type == I40E_MAC_X722) { if (i_set & I40E_X722_L3_SRC_MASK) cmd->data |= RXH_IP_SRC; if (i_set & I40E_X722_L3_DST_MASK) cmd->data |= RXH_IP_DST; } else { if (i_set & I40E_L3_SRC_MASK) cmd->data |= RXH_IP_SRC; if (i_set & I40E_L3_DST_MASK) cmd->data |= RXH_IP_DST; } } else if (cmd->flow_type == TCP_V6_FLOW || cmd->flow_type == UDP_V6_FLOW) { if (i_set & I40E_L3_V6_SRC_MASK) cmd->data |= RXH_IP_SRC; if (i_set & I40E_L3_V6_DST_MASK) cmd->data |= RXH_IP_DST; } } return 0; } /** * i40e_check_mask - Check whether a mask field is set * @mask: the full mask value * @field: mask of the field to check * * If the given mask is fully set, return positive value. If the mask for the * field is fully unset, return zero. Otherwise return a negative error code. **/ static int i40e_check_mask(u64 mask, u64 field) { u64 value = mask & field; if (value == field) return 1; else if (!value) return 0; else return -1; } /** * i40e_parse_rx_flow_user_data - Deconstruct user-defined data * @fsp: pointer to rx flow specification * @data: pointer to userdef data structure for storage * * Read the user-defined data and deconstruct the value into a structure. No * other code should read the user-defined data, so as to ensure that every * place consistently reads the value correctly. * * The user-defined field is a 64bit Big Endian format value, which we * deconstruct by reading bits or bit fields from it. Single bit flags shall * be defined starting from the highest bits, while small bit field values * shall be defined starting from the lowest bits. * * Returns 0 if the data is valid, and non-zero if the userdef data is invalid * and the filter should be rejected. The data structure will always be * modified even if FLOW_EXT is not set. * **/ static int i40e_parse_rx_flow_user_data(struct ethtool_rx_flow_spec *fsp, struct i40e_rx_flow_userdef *data) { u64 value, mask; int valid; /* Zero memory first so it's always consistent. */ memset(data, 0, sizeof(*data)); if (!(fsp->flow_type & FLOW_EXT)) return 0; value = be64_to_cpu(*((__be64 *)fsp->h_ext.data)); mask = be64_to_cpu(*((__be64 *)fsp->m_ext.data)); #define I40E_USERDEF_FLEX_WORD GENMASK_ULL(15, 0) #define I40E_USERDEF_FLEX_OFFSET GENMASK_ULL(31, 16) #define I40E_USERDEF_FLEX_FILTER GENMASK_ULL(31, 0) valid = i40e_check_mask(mask, I40E_USERDEF_FLEX_FILTER); if (valid < 0) { return -EINVAL; } else if (valid) { data->flex_word = value & I40E_USERDEF_FLEX_WORD; data->flex_offset = (value & I40E_USERDEF_FLEX_OFFSET) >> 16; data->flex_filter = true; } return 0; } /** * i40e_fill_rx_flow_user_data - Fill in user-defined data field * @fsp: pointer to rx_flow specification * @data: pointer to return userdef data * * Reads the userdef data structure and properly fills in the user defined * fields of the rx_flow_spec. **/ static void i40e_fill_rx_flow_user_data(struct ethtool_rx_flow_spec *fsp, struct i40e_rx_flow_userdef *data) { u64 value = 0, mask = 0; if (data->flex_filter) { value |= data->flex_word; value |= (u64)data->flex_offset << 16; mask |= I40E_USERDEF_FLEX_FILTER; } if (value || mask) fsp->flow_type |= FLOW_EXT; *((__be64 *)fsp->h_ext.data) = cpu_to_be64(value); *((__be64 *)fsp->m_ext.data) = cpu_to_be64(mask); } /** * i40e_get_ethtool_fdir_all - Populates the rule count of a command * @pf: Pointer to the physical function struct * @cmd: The command to get or set Rx flow classification rules * @rule_locs: Array of used rule locations * * This function populates both the total and actual rule count of * the ethtool flow classification command * * Returns 0 on success or -EMSGSIZE if entry not found **/ static int i40e_get_ethtool_fdir_all(struct i40e_pf *pf, struct ethtool_rxnfc *cmd, u32 *rule_locs) { struct i40e_fdir_filter *rule; struct hlist_node *node2; int cnt = 0; /* report total rule count */ cmd->data = i40e_get_fd_cnt_all(pf); hlist_for_each_entry_safe(rule, node2, &pf->fdir_filter_list, fdir_node) { if (cnt == cmd->rule_cnt) return -EMSGSIZE; rule_locs[cnt] = rule->fd_id; cnt++; } cmd->rule_cnt = cnt; return 0; } /** * i40e_get_ethtool_fdir_entry - Look up a filter based on Rx flow * @pf: Pointer to the physical function struct * @cmd: The command to get or set Rx flow classification rules * * This function looks up a filter based on the Rx flow classification * command and fills the flow spec info for it if found * * Returns 0 on success or -EINVAL if filter not found **/ static int i40e_get_ethtool_fdir_entry(struct i40e_pf *pf, struct ethtool_rxnfc *cmd) { struct ethtool_rx_flow_spec *fsp = (struct ethtool_rx_flow_spec *)&cmd->fs; struct i40e_rx_flow_userdef userdef = {0}; struct i40e_fdir_filter *rule = NULL; struct hlist_node *node2; u64 input_set; u16 index; hlist_for_each_entry_safe(rule, node2, &pf->fdir_filter_list, fdir_node) { if (fsp->location <= rule->fd_id) break; } if (!rule || fsp->location != rule->fd_id) return -EINVAL; fsp->flow_type = rule->flow_type; if (fsp->flow_type == IP_USER_FLOW) { fsp->h_u.usr_ip4_spec.ip_ver = ETH_RX_NFC_IP4; fsp->h_u.usr_ip4_spec.proto = 0; fsp->m_u.usr_ip4_spec.proto = 0; } if (fsp->flow_type == IPV6_USER_FLOW || fsp->flow_type == UDP_V6_FLOW || fsp->flow_type == TCP_V6_FLOW || fsp->flow_type == SCTP_V6_FLOW) { /* Reverse the src and dest notion, since the HW views them * from Tx perspective where as the user expects it from * Rx filter view. */ fsp->h_u.tcp_ip6_spec.psrc = rule->dst_port; fsp->h_u.tcp_ip6_spec.pdst = rule->src_port; memcpy(fsp->h_u.tcp_ip6_spec.ip6dst, rule->src_ip6, sizeof(__be32) * 4); memcpy(fsp->h_u.tcp_ip6_spec.ip6src, rule->dst_ip6, sizeof(__be32) * 4); } else { /* Reverse the src and dest notion, since the HW views them * from Tx perspective where as the user expects it from * Rx filter view. */ fsp->h_u.tcp_ip4_spec.psrc = rule->dst_port; fsp->h_u.tcp_ip4_spec.pdst = rule->src_port; fsp->h_u.tcp_ip4_spec.ip4src = rule->dst_ip; fsp->h_u.tcp_ip4_spec.ip4dst = rule->src_ip; } switch (rule->flow_type) { case SCTP_V4_FLOW: index = I40E_FILTER_PCTYPE_NONF_IPV4_SCTP; break; case TCP_V4_FLOW: index = I40E_FILTER_PCTYPE_NONF_IPV4_TCP; break; case UDP_V4_FLOW: index = I40E_FILTER_PCTYPE_NONF_IPV4_UDP; break; case SCTP_V6_FLOW: index = I40E_FILTER_PCTYPE_NONF_IPV6_SCTP; break; case TCP_V6_FLOW: index = I40E_FILTER_PCTYPE_NONF_IPV6_TCP; break; case UDP_V6_FLOW: index = I40E_FILTER_PCTYPE_NONF_IPV6_UDP; break; case IP_USER_FLOW: index = I40E_FILTER_PCTYPE_NONF_IPV4_OTHER; break; case IPV6_USER_FLOW: index = I40E_FILTER_PCTYPE_NONF_IPV6_OTHER; break; default: /* If we have stored a filter with a flow type not listed here * it is almost certainly a driver bug. WARN(), and then * assign the input_set as if all fields are enabled to avoid * reading unassigned memory. */ WARN(1, "Missing input set index for flow_type %d\n", rule->flow_type); input_set = 0xFFFFFFFFFFFFFFFFULL; goto no_input_set; } input_set = i40e_read_fd_input_set(pf, index); no_input_set: if (input_set & I40E_L3_V6_SRC_MASK) { fsp->m_u.tcp_ip6_spec.ip6src[0] = htonl(0xFFFFFFFF); fsp->m_u.tcp_ip6_spec.ip6src[1] = htonl(0xFFFFFFFF); fsp->m_u.tcp_ip6_spec.ip6src[2] = htonl(0xFFFFFFFF); fsp->m_u.tcp_ip6_spec.ip6src[3] = htonl(0xFFFFFFFF); } if (input_set & I40E_L3_V6_DST_MASK) { fsp->m_u.tcp_ip6_spec.ip6dst[0] = htonl(0xFFFFFFFF); fsp->m_u.tcp_ip6_spec.ip6dst[1] = htonl(0xFFFFFFFF); fsp->m_u.tcp_ip6_spec.ip6dst[2] = htonl(0xFFFFFFFF); fsp->m_u.tcp_ip6_spec.ip6dst[3] = htonl(0xFFFFFFFF); } if (input_set & I40E_L3_SRC_MASK) fsp->m_u.tcp_ip4_spec.ip4src = htonl(0xFFFFFFFF); if (input_set & I40E_L3_DST_MASK) fsp->m_u.tcp_ip4_spec.ip4dst = htonl(0xFFFFFFFF); if (input_set & I40E_L4_SRC_MASK) fsp->m_u.tcp_ip4_spec.psrc = htons(0xFFFF); if (input_set & I40E_L4_DST_MASK) fsp->m_u.tcp_ip4_spec.pdst = htons(0xFFFF); if (rule->dest_ctl == I40E_FILTER_PROGRAM_DESC_DEST_DROP_PACKET) fsp->ring_cookie = RX_CLS_FLOW_DISC; else fsp->ring_cookie = rule->q_index; if (rule->vlan_tag) { fsp->h_ext.vlan_etype = rule->vlan_etype; fsp->m_ext.vlan_etype = htons(0xFFFF); fsp->h_ext.vlan_tci = rule->vlan_tag; fsp->m_ext.vlan_tci = htons(0xFFFF); fsp->flow_type |= FLOW_EXT; } if (rule->dest_vsi != pf->vsi[pf->lan_vsi]->id) { struct i40e_vsi *vsi; vsi = i40e_find_vsi_from_id(pf, rule->dest_vsi); if (vsi && vsi->type == I40E_VSI_SRIOV) { /* VFs are zero-indexed by the driver, but ethtool * expects them to be one-indexed, so add one here */ u64 ring_vf = vsi->vf_id + 1; ring_vf <<= ETHTOOL_RX_FLOW_SPEC_RING_VF_OFF; fsp->ring_cookie |= ring_vf; } } if (rule->flex_filter) { userdef.flex_filter = true; userdef.flex_word = be16_to_cpu(rule->flex_word); userdef.flex_offset = rule->flex_offset; } i40e_fill_rx_flow_user_data(fsp, &userdef); return 0; } /** * i40e_get_rxnfc - command to get RX flow classification rules * @netdev: network interface device structure * @cmd: ethtool rxnfc command * @rule_locs: pointer to store rule data * * Returns Success if the command is supported. **/ static int i40e_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd, u32 *rule_locs) { struct i40e_netdev_priv *np = netdev_priv(netdev); struct i40e_vsi *vsi = np->vsi; struct i40e_pf *pf = vsi->back; int ret = -EOPNOTSUPP; switch (cmd->cmd) { case ETHTOOL_GRXRINGS: cmd->data = vsi->rss_size; ret = 0; break; case ETHTOOL_GRXFH: ret = i40e_get_rss_hash_opts(pf, cmd); break; case ETHTOOL_GRXCLSRLCNT: cmd->rule_cnt = pf->fdir_pf_active_filters; /* report total rule count */ cmd->data = i40e_get_fd_cnt_all(pf); ret = 0; break; case ETHTOOL_GRXCLSRULE: ret = i40e_get_ethtool_fdir_entry(pf, cmd); break; case ETHTOOL_GRXCLSRLALL: ret = i40e_get_ethtool_fdir_all(pf, cmd, rule_locs); break; default: break; } return ret; } /** * i40e_get_rss_hash_bits - Read RSS Hash bits from register * @hw: hw structure * @nfc: pointer to user request * @i_setc: bits currently set * * Returns value of bits to be set per user request **/ static u64 i40e_get_rss_hash_bits(struct i40e_hw *hw, struct ethtool_rxnfc *nfc, u64 i_setc) { u64 i_set = i_setc; u64 src_l3 = 0, dst_l3 = 0; if (nfc->data & RXH_L4_B_0_1) i_set |= I40E_L4_SRC_MASK; else i_set &= ~I40E_L4_SRC_MASK; if (nfc->data & RXH_L4_B_2_3) i_set |= I40E_L4_DST_MASK; else i_set &= ~I40E_L4_DST_MASK; if (nfc->flow_type == TCP_V6_FLOW || nfc->flow_type == UDP_V6_FLOW) { src_l3 = I40E_L3_V6_SRC_MASK; dst_l3 = I40E_L3_V6_DST_MASK; } else if (nfc->flow_type == TCP_V4_FLOW || nfc->flow_type == UDP_V4_FLOW) { if (hw->mac.type == I40E_MAC_X722) { src_l3 = I40E_X722_L3_SRC_MASK; dst_l3 = I40E_X722_L3_DST_MASK; } else { src_l3 = I40E_L3_SRC_MASK; dst_l3 = I40E_L3_DST_MASK; } } else { /* Any other flow type are not supported here */ return i_set; } if (nfc->data & RXH_IP_SRC) i_set |= src_l3; else i_set &= ~src_l3; if (nfc->data & RXH_IP_DST) i_set |= dst_l3; else i_set &= ~dst_l3; return i_set; } #define FLOW_PCTYPES_SIZE 64 /** * i40e_set_rss_hash_opt - Enable/Disable flow types for RSS hash * @pf: pointer to the physical function struct * @nfc: ethtool rxnfc command * * Returns Success if the flow input set is supported. **/ static int i40e_set_rss_hash_opt(struct i40e_pf *pf, struct ethtool_rxnfc *nfc) { struct i40e_hw *hw = &pf->hw; u64 hena = (u64)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(0)) | ((u64)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(1)) << 32); DECLARE_BITMAP(flow_pctypes, FLOW_PCTYPES_SIZE); u64 i_set, i_setc; bitmap_zero(flow_pctypes, FLOW_PCTYPES_SIZE); if (pf->flags & I40E_FLAG_MFP_ENABLED) { dev_err(&pf->pdev->dev, "Change of RSS hash input set is not supported when MFP mode is enabled\n"); return -EOPNOTSUPP; } /* RSS does not support anything other than hashing * to queues on src and dst IPs and ports */ if (nfc->data & ~(RXH_IP_SRC | RXH_IP_DST | RXH_L4_B_0_1 | RXH_L4_B_2_3)) return -EINVAL; switch (nfc->flow_type) { case TCP_V4_FLOW: set_bit(I40E_FILTER_PCTYPE_NONF_IPV4_TCP, flow_pctypes); if (pf->hw_features & I40E_HW_MULTIPLE_TCP_UDP_RSS_PCTYPE) set_bit(I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK, flow_pctypes); break; case TCP_V6_FLOW: set_bit(I40E_FILTER_PCTYPE_NONF_IPV6_TCP, flow_pctypes); if (pf->hw_features & I40E_HW_MULTIPLE_TCP_UDP_RSS_PCTYPE) set_bit(I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK, flow_pctypes); break; case UDP_V4_FLOW: set_bit(I40E_FILTER_PCTYPE_NONF_IPV4_UDP, flow_pctypes); if (pf->hw_features & I40E_HW_MULTIPLE_TCP_UDP_RSS_PCTYPE) { set_bit(I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP, flow_pctypes); set_bit(I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP, flow_pctypes); } hena |= BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV4); break; case UDP_V6_FLOW: set_bit(I40E_FILTER_PCTYPE_NONF_IPV6_UDP, flow_pctypes); if (pf->hw_features & I40E_HW_MULTIPLE_TCP_UDP_RSS_PCTYPE) { set_bit(I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP, flow_pctypes); set_bit(I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP, flow_pctypes); } hena |= BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV6); break; case AH_ESP_V4_FLOW: case AH_V4_FLOW: case ESP_V4_FLOW: case SCTP_V4_FLOW: if ((nfc->data & RXH_L4_B_0_1) || (nfc->data & RXH_L4_B_2_3)) return -EINVAL; hena |= BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_OTHER); break; case AH_ESP_V6_FLOW: case AH_V6_FLOW: case ESP_V6_FLOW: case SCTP_V6_FLOW: if ((nfc->data & RXH_L4_B_0_1) || (nfc->data & RXH_L4_B_2_3)) return -EINVAL; hena |= BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_OTHER); break; case IPV4_FLOW: hena |= BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_OTHER) | BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV4); break; case IPV6_FLOW: hena |= BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_OTHER) | BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV6); break; default: return -EINVAL; } if (bitmap_weight(flow_pctypes, FLOW_PCTYPES_SIZE)) { u8 flow_id; for_each_set_bit(flow_id, flow_pctypes, FLOW_PCTYPES_SIZE) { i_setc = (u64)i40e_read_rx_ctl(hw, I40E_GLQF_HASH_INSET(0, flow_id)) | ((u64)i40e_read_rx_ctl(hw, I40E_GLQF_HASH_INSET(1, flow_id)) << 32); i_set = i40e_get_rss_hash_bits(&pf->hw, nfc, i_setc); i40e_write_rx_ctl(hw, I40E_GLQF_HASH_INSET(0, flow_id), (u32)i_set); i40e_write_rx_ctl(hw, I40E_GLQF_HASH_INSET(1, flow_id), (u32)(i_set >> 32)); hena |= BIT_ULL(flow_id); } } i40e_write_rx_ctl(hw, I40E_PFQF_HENA(0), (u32)hena); i40e_write_rx_ctl(hw, I40E_PFQF_HENA(1), (u32)(hena >> 32)); i40e_flush(hw); return 0; } /** * i40e_update_ethtool_fdir_entry - Updates the fdir filter entry * @vsi: Pointer to the targeted VSI * @input: The filter to update or NULL to indicate deletion * @sw_idx: Software index to the filter * @cmd: The command to get or set Rx flow classification rules * * This function updates (or deletes) a Flow Director entry from * the hlist of the corresponding PF * * Returns 0 on success **/ static int i40e_update_ethtool_fdir_entry(struct i40e_vsi *vsi, struct i40e_fdir_filter *input, u16 sw_idx, struct ethtool_rxnfc *cmd) { struct i40e_fdir_filter *rule, *parent; struct i40e_pf *pf = vsi->back; struct hlist_node *node2; int err = -EINVAL; parent = NULL; rule = NULL; hlist_for_each_entry_safe(rule, node2, &pf->fdir_filter_list, fdir_node) { /* hash found, or no matching entry */ if (rule->fd_id >= sw_idx) break; parent = rule; } /* if there is an old rule occupying our place remove it */ if (rule && (rule->fd_id == sw_idx)) { /* Remove this rule, since we're either deleting it, or * replacing it. */ err = i40e_add_del_fdir(vsi, rule, false); hlist_del(&rule->fdir_node); kfree(rule); pf->fdir_pf_active_filters--; } /* If we weren't given an input, this is a delete, so just return the * error code indicating if there was an entry at the requested slot */ if (!input) return err; /* Otherwise, install the new rule as requested */ INIT_HLIST_NODE(&input->fdir_node); /* add filter to the list */ if (parent) hlist_add_behind(&input->fdir_node, &parent->fdir_node); else hlist_add_head(&input->fdir_node, &pf->fdir_filter_list); /* update counts */ pf->fdir_pf_active_filters++; return 0; } /** * i40e_prune_flex_pit_list - Cleanup unused entries in FLX_PIT table * @pf: pointer to PF structure * * This function searches the list of filters and determines which FLX_PIT * entries are still required. It will prune any entries which are no longer * in use after the deletion. **/ static void i40e_prune_flex_pit_list(struct i40e_pf *pf) { struct i40e_flex_pit *entry, *tmp; struct i40e_fdir_filter *rule; /* First, we'll check the l3 table */ list_for_each_entry_safe(entry, tmp, &pf->l3_flex_pit_list, list) { bool found = false; hlist_for_each_entry(rule, &pf->fdir_filter_list, fdir_node) { if (rule->flow_type != IP_USER_FLOW) continue; if (rule->flex_filter && rule->flex_offset == entry->src_offset) { found = true; break; } } /* If we didn't find the filter, then we can prune this entry * from the list. */ if (!found) { list_del(&entry->list); kfree(entry); } } /* Followed by the L4 table */ list_for_each_entry_safe(entry, tmp, &pf->l4_flex_pit_list, list) { bool found = false; hlist_for_each_entry(rule, &pf->fdir_filter_list, fdir_node) { /* Skip this filter if it's L3, since we already * checked those in the above loop */ if (rule->flow_type == IP_USER_FLOW) continue; if (rule->flex_filter && rule->flex_offset == entry->src_offset) { found = true; break; } } /* If we didn't find the filter, then we can prune this entry * from the list. */ if (!found) { list_del(&entry->list); kfree(entry); } } } /** * i40e_del_fdir_entry - Deletes a Flow Director filter entry * @vsi: Pointer to the targeted VSI * @cmd: The command to get or set Rx flow classification rules * * The function removes a Flow Director filter entry from the * hlist of the corresponding PF * * Returns 0 on success */ static int i40e_del_fdir_entry(struct i40e_vsi *vsi, struct ethtool_rxnfc *cmd) { struct ethtool_rx_flow_spec *fsp = (struct ethtool_rx_flow_spec *)&cmd->fs; struct i40e_pf *pf = vsi->back; int ret = 0; if (test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state) || test_bit(__I40E_RESET_INTR_RECEIVED, pf->state)) return -EBUSY; if (test_bit(__I40E_FD_FLUSH_REQUESTED, pf->state)) return -EBUSY; ret = i40e_update_ethtool_fdir_entry(vsi, NULL, fsp->location, cmd); i40e_prune_flex_pit_list(pf); i40e_fdir_check_and_reenable(pf); return ret; } /** * i40e_unused_pit_index - Find an unused PIT index for given list * @pf: the PF data structure * * Find the first unused flexible PIT index entry. We search both the L3 and * L4 flexible PIT lists so that the returned index is unique and unused by * either currently programmed L3 or L4 filters. We use a bit field as storage * to track which indexes are already used. **/ static u8 i40e_unused_pit_index(struct i40e_pf *pf) { unsigned long available_index = 0xFF; struct i40e_flex_pit *entry; /* We need to make sure that the new index isn't in use by either L3 * or L4 filters so that IP_USER_FLOW filters can program both L3 and * L4 to use the same index. */ list_for_each_entry(entry, &pf->l4_flex_pit_list, list) clear_bit(entry->pit_index, &available_index); list_for_each_entry(entry, &pf->l3_flex_pit_list, list) clear_bit(entry->pit_index, &available_index); return find_first_bit(&available_index, 8); } /** * i40e_find_flex_offset - Find an existing flex src_offset * @flex_pit_list: L3 or L4 flex PIT list * @src_offset: new src_offset to find * * Searches the flex_pit_list for an existing offset. If no offset is * currently programmed, then this will return an ERR_PTR if there is no space * to add a new offset, otherwise it returns NULL. **/ static struct i40e_flex_pit *i40e_find_flex_offset(struct list_head *flex_pit_list, u16 src_offset) { struct i40e_flex_pit *entry; int size = 0; /* Search for the src_offset first. If we find a matching entry * already programmed, we can simply re-use it. */ list_for_each_entry(entry, flex_pit_list, list) { size++; if (entry->src_offset == src_offset) return entry; } /* If we haven't found an entry yet, then the provided src offset has * not yet been programmed. We will program the src offset later on, * but we need to indicate whether there is enough space to do so * here. We'll make use of ERR_PTR for this purpose. */ if (size >= I40E_FLEX_PIT_TABLE_SIZE) return ERR_PTR(-ENOSPC); return NULL; } /** * i40e_add_flex_offset - Add src_offset to flex PIT table list * @flex_pit_list: L3 or L4 flex PIT list * @src_offset: new src_offset to add * @pit_index: the PIT index to program * * This function programs the new src_offset to the list. It is expected that * i40e_find_flex_offset has already been tried and returned NULL, indicating * that this offset is not programmed, and that the list has enough space to * store another offset. * * Returns 0 on success, and negative value on error. **/ static int i40e_add_flex_offset(struct list_head *flex_pit_list, u16 src_offset, u8 pit_index) { struct i40e_flex_pit *new_pit, *entry; new_pit = kzalloc(sizeof(*entry), GFP_KERNEL); if (!new_pit) return -ENOMEM; new_pit->src_offset = src_offset; new_pit->pit_index = pit_index; /* We need to insert this item such that the list is sorted by * src_offset in ascending order. */ list_for_each_entry(entry, flex_pit_list, list) { if (new_pit->src_offset < entry->src_offset) { list_add_tail(&new_pit->list, &entry->list); return 0; } /* If we found an entry with our offset already programmed we * can simply return here, after freeing the memory. However, * if the pit_index does not match we need to report an error. */ if (new_pit->src_offset == entry->src_offset) { int err = 0; /* If the PIT index is not the same we can't re-use * the entry, so we must report an error. */ if (new_pit->pit_index != entry->pit_index) err = -EINVAL; kfree(new_pit); return err; } } /* If we reached here, then we haven't yet added the item. This means * that we should add the item at the end of the list. */ list_add_tail(&new_pit->list, flex_pit_list); return 0; } /** * __i40e_reprogram_flex_pit - Re-program specific FLX_PIT table * @pf: Pointer to the PF structure * @flex_pit_list: list of flexible src offsets in use * @flex_pit_start: index to first entry for this section of the table * * In order to handle flexible data, the hardware uses a table of values * called the FLX_PIT table. This table is used to indicate which sections of * the input correspond to what PIT index values. Unfortunately, hardware is * very restrictive about programming this table. Entries must be ordered by * src_offset in ascending order, without duplicates. Additionally, unused * entries must be set to the unused index value, and must have valid size and * length according to the src_offset ordering. * * This function will reprogram the FLX_PIT register from a book-keeping * structure that we guarantee is already ordered correctly, and has no more * than 3 entries. * * To make things easier, we only support flexible values of one word length, * rather than allowing variable length flexible values. **/ static void __i40e_reprogram_flex_pit(struct i40e_pf *pf, struct list_head *flex_pit_list, int flex_pit_start) { struct i40e_flex_pit *entry = NULL; u16 last_offset = 0; int i = 0, j = 0; /* First, loop over the list of flex PIT entries, and reprogram the * registers. */ list_for_each_entry(entry, flex_pit_list, list) { /* We have to be careful when programming values for the * largest SRC_OFFSET value. It is possible that adding * additional empty values at the end would overflow the space * for the SRC_OFFSET in the FLX_PIT register. To avoid this, * we check here and add the empty values prior to adding the * largest value. * * To determine this, we will use a loop from i+1 to 3, which * will determine whether the unused entries would have valid * SRC_OFFSET. Note that there cannot be extra entries past * this value, because the only valid values would have been * larger than I40E_MAX_FLEX_SRC_OFFSET, and thus would not * have been added to the list in the first place. */ for (j = i + 1; j < 3; j++) { u16 offset = entry->src_offset + j; int index = flex_pit_start + i; u32 value = I40E_FLEX_PREP_VAL(I40E_FLEX_DEST_UNUSED, 1, offset - 3); if (offset > I40E_MAX_FLEX_SRC_OFFSET) { i40e_write_rx_ctl(&pf->hw, I40E_PRTQF_FLX_PIT(index), value); i++; } } /* Now, we can program the actual value into the table */ i40e_write_rx_ctl(&pf->hw, I40E_PRTQF_FLX_PIT(flex_pit_start + i), I40E_FLEX_PREP_VAL(entry->pit_index + 50, 1, entry->src_offset)); i++; } /* In order to program the last entries in the table, we need to * determine the valid offset. If the list is empty, we'll just start * with 0. Otherwise, we'll start with the last item offset and add 1. * This ensures that all entries have valid sizes. If we don't do this * correctly, the hardware will disable flexible field parsing. */ if (!list_empty(flex_pit_list)) last_offset = list_prev_entry(entry, list)->src_offset + 1; for (; i < 3; i++, last_offset++) { i40e_write_rx_ctl(&pf->hw, I40E_PRTQF_FLX_PIT(flex_pit_start + i), I40E_FLEX_PREP_VAL(I40E_FLEX_DEST_UNUSED, 1, last_offset)); } } /** * i40e_reprogram_flex_pit - Reprogram all FLX_PIT tables after input set change * @pf: pointer to the PF structure * * This function reprograms both the L3 and L4 FLX_PIT tables. See the * internal helper function for implementation details. **/ static void i40e_reprogram_flex_pit(struct i40e_pf *pf) { __i40e_reprogram_flex_pit(pf, &pf->l3_flex_pit_list, I40E_FLEX_PIT_IDX_START_L3); __i40e_reprogram_flex_pit(pf, &pf->l4_flex_pit_list, I40E_FLEX_PIT_IDX_START_L4); /* We also need to program the L3 and L4 GLQF ORT register */ i40e_write_rx_ctl(&pf->hw, I40E_GLQF_ORT(I40E_L3_GLQF_ORT_IDX), I40E_ORT_PREP_VAL(I40E_FLEX_PIT_IDX_START_L3, 3, 1)); i40e_write_rx_ctl(&pf->hw, I40E_GLQF_ORT(I40E_L4_GLQF_ORT_IDX), I40E_ORT_PREP_VAL(I40E_FLEX_PIT_IDX_START_L4, 3, 1)); } /** * i40e_flow_str - Converts a flow_type into a human readable string * @fsp: the flow specification * * Currently only flow types we support are included here, and the string * value attempts to match what ethtool would use to configure this flow type. **/ static const char *i40e_flow_str(struct ethtool_rx_flow_spec *fsp) { switch (fsp->flow_type & ~FLOW_EXT) { case TCP_V4_FLOW: return "tcp4"; case UDP_V4_FLOW: return "udp4"; case SCTP_V4_FLOW: return "sctp4"; case IP_USER_FLOW: return "ip4"; case TCP_V6_FLOW: return "tcp6"; case UDP_V6_FLOW: return "udp6"; case SCTP_V6_FLOW: return "sctp6"; case IPV6_USER_FLOW: return "ip6"; default: return "unknown"; } } /** * i40e_pit_index_to_mask - Return the FLEX mask for a given PIT index * @pit_index: PIT index to convert * * Returns the mask for a given PIT index. Will return 0 if the pit_index is * of range. **/ static u64 i40e_pit_index_to_mask(int pit_index) { switch (pit_index) { case 0: return I40E_FLEX_50_MASK; case 1: return I40E_FLEX_51_MASK; case 2: return I40E_FLEX_52_MASK; case 3: return I40E_FLEX_53_MASK; case 4: return I40E_FLEX_54_MASK; case 5: return I40E_FLEX_55_MASK; case 6: return I40E_FLEX_56_MASK; case 7: return I40E_FLEX_57_MASK; default: return 0; } } /** * i40e_print_input_set - Show changes between two input sets * @vsi: the vsi being configured * @old: the old input set * @new: the new input set * * Print the difference between old and new input sets by showing which series * of words are toggled on or off. Only displays the bits we actually support * changing. **/ static void i40e_print_input_set(struct i40e_vsi *vsi, u64 old, u64 new) { struct i40e_pf *pf = vsi->back; bool old_value, new_value; int i; old_value = !!(old & I40E_L3_SRC_MASK); new_value = !!(new & I40E_L3_SRC_MASK); if (old_value != new_value) netif_info(pf, drv, vsi->netdev, "L3 source address: %s -> %s\n", old_value ? "ON" : "OFF", new_value ? "ON" : "OFF"); old_value = !!(old & I40E_L3_DST_MASK); new_value = !!(new & I40E_L3_DST_MASK); if (old_value != new_value) netif_info(pf, drv, vsi->netdev, "L3 destination address: %s -> %s\n", old_value ? "ON" : "OFF", new_value ? "ON" : "OFF"); old_value = !!(old & I40E_L4_SRC_MASK); new_value = !!(new & I40E_L4_SRC_MASK); if (old_value != new_value) netif_info(pf, drv, vsi->netdev, "L4 source port: %s -> %s\n", old_value ? "ON" : "OFF", new_value ? "ON" : "OFF"); old_value = !!(old & I40E_L4_DST_MASK); new_value = !!(new & I40E_L4_DST_MASK); if (old_value != new_value) netif_info(pf, drv, vsi->netdev, "L4 destination port: %s -> %s\n", old_value ? "ON" : "OFF", new_value ? "ON" : "OFF"); old_value = !!(old & I40E_VERIFY_TAG_MASK); new_value = !!(new & I40E_VERIFY_TAG_MASK); if (old_value != new_value) netif_info(pf, drv, vsi->netdev, "SCTP verification tag: %s -> %s\n", old_value ? "ON" : "OFF", new_value ? "ON" : "OFF"); /* Show change of flexible filter entries */ for (i = 0; i < I40E_FLEX_INDEX_ENTRIES; i++) { u64 flex_mask = i40e_pit_index_to_mask(i); old_value = !!(old & flex_mask); new_value = !!(new & flex_mask); if (old_value != new_value) netif_info(pf, drv, vsi->netdev, "FLEX index %d: %s -> %s\n", i, old_value ? "ON" : "OFF", new_value ? "ON" : "OFF"); } netif_info(pf, drv, vsi->netdev, " Current input set: %0llx\n", old); netif_info(pf, drv, vsi->netdev, "Requested input set: %0llx\n", new); } /** * i40e_check_fdir_input_set - Check that a given rx_flow_spec mask is valid * @vsi: pointer to the targeted VSI * @fsp: pointer to Rx flow specification * @userdef: userdefined data from flow specification * * Ensures that a given ethtool_rx_flow_spec has a valid mask. Some support * for partial matches exists with a few limitations. First, hardware only * supports masking by word boundary (2 bytes) and not per individual bit. * Second, hardware is limited to using one mask for a flow type and cannot * use a separate mask for each filter. * * To support these limitations, if we already have a configured filter for * the specified type, this function enforces that new filters of the type * match the configured input set. Otherwise, if we do not have a filter of * the specified type, we allow the input set to be updated to match the * desired filter. * * To help ensure that administrators understand why filters weren't displayed * as supported, we print a diagnostic message displaying how the input set * would change and warning to delete the preexisting filters if required. * * Returns 0 on successful input set match, and a negative return code on * failure. **/ static int i40e_check_fdir_input_set(struct i40e_vsi *vsi, struct ethtool_rx_flow_spec *fsp, struct i40e_rx_flow_userdef *userdef) { static const __be32 ipv6_full_mask[4] = {cpu_to_be32(0xffffffff), cpu_to_be32(0xffffffff), cpu_to_be32(0xffffffff), cpu_to_be32(0xffffffff)}; struct ethtool_tcpip6_spec *tcp_ip6_spec; struct ethtool_usrip6_spec *usr_ip6_spec; struct ethtool_tcpip4_spec *tcp_ip4_spec; struct ethtool_usrip4_spec *usr_ip4_spec; struct i40e_pf *pf = vsi->back; u64 current_mask, new_mask; bool new_flex_offset = false; bool flex_l3 = false; u16 *fdir_filter_count; u16 index, src_offset = 0; u8 pit_index = 0; int err; switch (fsp->flow_type & ~FLOW_EXT) { case SCTP_V4_FLOW: index = I40E_FILTER_PCTYPE_NONF_IPV4_SCTP; fdir_filter_count = &pf->fd_sctp4_filter_cnt; break; case TCP_V4_FLOW: index = I40E_FILTER_PCTYPE_NONF_IPV4_TCP; fdir_filter_count = &pf->fd_tcp4_filter_cnt; break; case UDP_V4_FLOW: index = I40E_FILTER_PCTYPE_NONF_IPV4_UDP; fdir_filter_count = &pf->fd_udp4_filter_cnt; break; case SCTP_V6_FLOW: index = I40E_FILTER_PCTYPE_NONF_IPV6_SCTP; fdir_filter_count = &pf->fd_sctp6_filter_cnt; break; case TCP_V6_FLOW: index = I40E_FILTER_PCTYPE_NONF_IPV6_TCP; fdir_filter_count = &pf->fd_tcp6_filter_cnt; break; case UDP_V6_FLOW: index = I40E_FILTER_PCTYPE_NONF_IPV6_UDP; fdir_filter_count = &pf->fd_udp6_filter_cnt; break; case IP_USER_FLOW: index = I40E_FILTER_PCTYPE_NONF_IPV4_OTHER; fdir_filter_count = &pf->fd_ip4_filter_cnt; flex_l3 = true; break; case IPV6_USER_FLOW: index = I40E_FILTER_PCTYPE_NONF_IPV6_OTHER; fdir_filter_count = &pf->fd_ip6_filter_cnt; flex_l3 = true; break; default: return -EOPNOTSUPP; } /* Read the current input set from register memory. */ current_mask = i40e_read_fd_input_set(pf, index); new_mask = current_mask; /* Determine, if any, the required changes to the input set in order * to support the provided mask. * * Hardware only supports masking at word (2 byte) granularity and does * not support full bitwise masking. This implementation simplifies * even further and only supports fully enabled or fully disabled * masks for each field, even though we could split the ip4src and * ip4dst fields. */ switch (fsp->flow_type & ~FLOW_EXT) { case SCTP_V4_FLOW: new_mask &= ~I40E_VERIFY_TAG_MASK; fallthrough; case TCP_V4_FLOW: case UDP_V4_FLOW: tcp_ip4_spec = &fsp->m_u.tcp_ip4_spec; /* IPv4 source address */ if (tcp_ip4_spec->ip4src == htonl(0xFFFFFFFF)) new_mask |= I40E_L3_SRC_MASK; else if (!tcp_ip4_spec->ip4src) new_mask &= ~I40E_L3_SRC_MASK; else return -EOPNOTSUPP; /* IPv4 destination address */ if (tcp_ip4_spec->ip4dst == htonl(0xFFFFFFFF)) new_mask |= I40E_L3_DST_MASK; else if (!tcp_ip4_spec->ip4dst) new_mask &= ~I40E_L3_DST_MASK; else return -EOPNOTSUPP; /* L4 source port */ if (tcp_ip4_spec->psrc == htons(0xFFFF)) new_mask |= I40E_L4_SRC_MASK; else if (!tcp_ip4_spec->psrc) new_mask &= ~I40E_L4_SRC_MASK; else return -EOPNOTSUPP; /* L4 destination port */ if (tcp_ip4_spec->pdst == htons(0xFFFF)) new_mask |= I40E_L4_DST_MASK; else if (!tcp_ip4_spec->pdst) new_mask &= ~I40E_L4_DST_MASK; else return -EOPNOTSUPP; /* Filtering on Type of Service is not supported. */ if (tcp_ip4_spec->tos) return -EOPNOTSUPP; break; case SCTP_V6_FLOW: new_mask &= ~I40E_VERIFY_TAG_MASK; fallthrough; case TCP_V6_FLOW: case UDP_V6_FLOW: tcp_ip6_spec = &fsp->m_u.tcp_ip6_spec; /* Check if user provided IPv6 source address. */ if (ipv6_addr_equal((struct in6_addr *)&tcp_ip6_spec->ip6src, (struct in6_addr *)&ipv6_full_mask)) new_mask |= I40E_L3_V6_SRC_MASK; else if (ipv6_addr_any((struct in6_addr *) &tcp_ip6_spec->ip6src)) new_mask &= ~I40E_L3_V6_SRC_MASK; else return -EOPNOTSUPP; /* Check if user provided destination address. */ if (ipv6_addr_equal((struct in6_addr *)&tcp_ip6_spec->ip6dst, (struct in6_addr *)&ipv6_full_mask)) new_mask |= I40E_L3_V6_DST_MASK; else if (ipv6_addr_any((struct in6_addr *) &tcp_ip6_spec->ip6dst)) new_mask &= ~I40E_L3_V6_DST_MASK; else return -EOPNOTSUPP; /* L4 source port */ if (tcp_ip6_spec->psrc == htons(0xFFFF)) new_mask |= I40E_L4_SRC_MASK; else if (!tcp_ip6_spec->psrc) new_mask &= ~I40E_L4_SRC_MASK; else return -EOPNOTSUPP; /* L4 destination port */ if (tcp_ip6_spec->pdst == htons(0xFFFF)) new_mask |= I40E_L4_DST_MASK; else if (!tcp_ip6_spec->pdst) new_mask &= ~I40E_L4_DST_MASK; else return -EOPNOTSUPP; /* Filtering on Traffic Classes is not supported. */ if (tcp_ip6_spec->tclass) return -EOPNOTSUPP; break; case IP_USER_FLOW: usr_ip4_spec = &fsp->m_u.usr_ip4_spec; /* IPv4 source address */ if (usr_ip4_spec->ip4src == htonl(0xFFFFFFFF)) new_mask |= I40E_L3_SRC_MASK; else if (!usr_ip4_spec->ip4src) new_mask &= ~I40E_L3_SRC_MASK; else return -EOPNOTSUPP; /* IPv4 destination address */ if (usr_ip4_spec->ip4dst == htonl(0xFFFFFFFF)) new_mask |= I40E_L3_DST_MASK; else if (!usr_ip4_spec->ip4dst) new_mask &= ~I40E_L3_DST_MASK; else return -EOPNOTSUPP; /* First 4 bytes of L4 header */ if (usr_ip4_spec->l4_4_bytes) return -EOPNOTSUPP; /* Filtering on Type of Service is not supported. */ if (usr_ip4_spec->tos) return -EOPNOTSUPP; /* Filtering on IP version is not supported */ if (usr_ip4_spec->ip_ver) return -EINVAL; /* Filtering on L4 protocol is not supported */ if (usr_ip4_spec->proto) return -EINVAL; break; case IPV6_USER_FLOW: usr_ip6_spec = &fsp->m_u.usr_ip6_spec; /* Check if user provided IPv6 source address. */ if (ipv6_addr_equal((struct in6_addr *)&usr_ip6_spec->ip6src, (struct in6_addr *)&ipv6_full_mask)) new_mask |= I40E_L3_V6_SRC_MASK; else if (ipv6_addr_any((struct in6_addr *) &usr_ip6_spec->ip6src)) new_mask &= ~I40E_L3_V6_SRC_MASK; else return -EOPNOTSUPP; /* Check if user provided destination address. */ if (ipv6_addr_equal((struct in6_addr *)&usr_ip6_spec->ip6dst, (struct in6_addr *)&ipv6_full_mask)) new_mask |= I40E_L3_V6_DST_MASK; else if (ipv6_addr_any((struct in6_addr *) &usr_ip6_spec->ip6dst)) new_mask &= ~I40E_L3_V6_DST_MASK; else return -EOPNOTSUPP; if (usr_ip6_spec->l4_4_bytes) return -EOPNOTSUPP; /* Filtering on Traffic class is not supported. */ if (usr_ip6_spec->tclass) return -EOPNOTSUPP; /* Filtering on L4 protocol is not supported */ if (usr_ip6_spec->l4_proto) return -EINVAL; break; default: return -EOPNOTSUPP; } if (fsp->flow_type & FLOW_EXT) { /* Allow only 802.1Q and no etype defined, as * later it's modified to 0x8100 */ if (fsp->h_ext.vlan_etype != htons(ETH_P_8021Q) && fsp->h_ext.vlan_etype != 0) return -EOPNOTSUPP; if (fsp->m_ext.vlan_tci == htons(0xFFFF)) new_mask |= I40E_VLAN_SRC_MASK; else new_mask &= ~I40E_VLAN_SRC_MASK; } /* First, clear all flexible filter entries */ new_mask &= ~I40E_FLEX_INPUT_MASK; /* If we have a flexible filter, try to add this offset to the correct * flexible filter PIT list. Once finished, we can update the mask. * If the src_offset changed, we will get a new mask value which will * trigger an input set change. */ if (userdef->flex_filter) { struct i40e_flex_pit *l3_flex_pit = NULL, *flex_pit = NULL; /* Flexible offset must be even, since the flexible payload * must be aligned on 2-byte boundary. */ if (userdef->flex_offset & 0x1) { dev_warn(&pf->pdev->dev, "Flexible data offset must be 2-byte aligned\n"); return -EINVAL; } src_offset = userdef->flex_offset >> 1; /* FLX_PIT source offset value is only so large */ if (src_offset > I40E_MAX_FLEX_SRC_OFFSET) { dev_warn(&pf->pdev->dev, "Flexible data must reside within first 64 bytes of the packet payload\n"); return -EINVAL; } /* See if this offset has already been programmed. If we get * an ERR_PTR, then the filter is not safe to add. Otherwise, * if we get a NULL pointer, this means we will need to add * the offset. */ flex_pit = i40e_find_flex_offset(&pf->l4_flex_pit_list, src_offset); if (IS_ERR(flex_pit)) return PTR_ERR(flex_pit); /* IP_USER_FLOW filters match both L4 (ICMP) and L3 (unknown) * packet types, and thus we need to program both L3 and L4 * flexible values. These must have identical flexible index, * as otherwise we can't correctly program the input set. So * we'll find both an L3 and L4 index and make sure they are * the same. */ if (flex_l3) { l3_flex_pit = i40e_find_flex_offset(&pf->l3_flex_pit_list, src_offset); if (IS_ERR(l3_flex_pit)) return PTR_ERR(l3_flex_pit); if (flex_pit) { /* If we already had a matching L4 entry, we * need to make sure that the L3 entry we * obtained uses the same index. */ if (l3_flex_pit) { if (l3_flex_pit->pit_index != flex_pit->pit_index) { return -EINVAL; } } else { new_flex_offset = true; } } else { flex_pit = l3_flex_pit; } } /* If we didn't find an existing flex offset, we need to * program a new one. However, we don't immediately program it * here because we will wait to program until after we check * that it is safe to change the input set. */ if (!flex_pit) { new_flex_offset = true; pit_index = i40e_unused_pit_index(pf); } else { pit_index = flex_pit->pit_index; } /* Update the mask with the new offset */ new_mask |= i40e_pit_index_to_mask(pit_index); } /* If the mask and flexible filter offsets for this filter match the * currently programmed values we don't need any input set change, so * this filter is safe to install. */ if (new_mask == current_mask && !new_flex_offset) return 0; netif_info(pf, drv, vsi->netdev, "Input set change requested for %s flows:\n", i40e_flow_str(fsp)); i40e_print_input_set(vsi, current_mask, new_mask); if (new_flex_offset) { netif_info(pf, drv, vsi->netdev, "FLEX index %d: Offset -> %d", pit_index, src_offset); } /* Hardware input sets are global across multiple ports, so even the * main port cannot change them when in MFP mode as this would impact * any filters on the other ports. */ if (pf->flags & I40E_FLAG_MFP_ENABLED) { netif_err(pf, drv, vsi->netdev, "Cannot change Flow Director input sets while MFP is enabled\n"); return -EOPNOTSUPP; } /* This filter requires us to update the input set. However, hardware * only supports one input set per flow type, and does not support * separate masks for each filter. This means that we can only support * a single mask for all filters of a specific type. * * If we have preexisting filters, they obviously depend on the * current programmed input set. Display a diagnostic message in this * case explaining why the filter could not be accepted. */ if (*fdir_filter_count) { netif_err(pf, drv, vsi->netdev, "Cannot change input set for %s flows until %d preexisting filters are removed\n", i40e_flow_str(fsp), *fdir_filter_count); return -EOPNOTSUPP; } i40e_write_fd_input_set(pf, index, new_mask); /* IP_USER_FLOW filters match both IPv4/Other and IPv4/Fragmented * frames. If we're programming the input set for IPv4/Other, we also * need to program the IPv4/Fragmented input set. Since we don't have * separate support, we'll always assume and enforce that the two flow * types must have matching input sets. */ if (index == I40E_FILTER_PCTYPE_NONF_IPV4_OTHER) i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_FRAG_IPV4, new_mask); /* Add the new offset and update table, if necessary */ if (new_flex_offset) { err = i40e_add_flex_offset(&pf->l4_flex_pit_list, src_offset, pit_index); if (err) return err; if (flex_l3) { err = i40e_add_flex_offset(&pf->l3_flex_pit_list, src_offset, pit_index); if (err) return err; } i40e_reprogram_flex_pit(pf); } return 0; } /** * i40e_match_fdir_filter - Return true of two filters match * @a: pointer to filter struct * @b: pointer to filter struct * * Returns true if the two filters match exactly the same criteria. I.e. they * match the same flow type and have the same parameters. We don't need to * check any input-set since all filters of the same flow type must use the * same input set. **/ static bool i40e_match_fdir_filter(struct i40e_fdir_filter *a, struct i40e_fdir_filter *b) { /* The filters do not much if any of these criteria differ. */ if (a->dst_ip != b->dst_ip || a->src_ip != b->src_ip || a->dst_port != b->dst_port || a->src_port != b->src_port || a->flow_type != b->flow_type || a->ipl4_proto != b->ipl4_proto || a->vlan_tag != b->vlan_tag || a->vlan_etype != b->vlan_etype) return false; return true; } /** * i40e_disallow_matching_filters - Check that new filters differ * @vsi: pointer to the targeted VSI * @input: new filter to check * * Due to hardware limitations, it is not possible for two filters that match * similar criteria to be programmed at the same time. This is true for a few * reasons: * * (a) all filters matching a particular flow type must use the same input * set, that is they must match the same criteria. * (b) different flow types will never match the same packet, as the flow type * is decided by hardware before checking which rules apply. * (c) hardware has no way to distinguish which order filters apply in. * * Due to this, we can't really support using the location data to order * filters in the hardware parsing. It is technically possible for the user to * request two filters matching the same criteria but which select different * queues. In this case, rather than keep both filters in the list, we reject * the 2nd filter when the user requests adding it. * * This avoids needing to track location for programming the filter to * hardware, and ensures that we avoid some strange scenarios involving * deleting filters which match the same criteria. **/ static int i40e_disallow_matching_filters(struct i40e_vsi *vsi, struct i40e_fdir_filter *input) { struct i40e_pf *pf = vsi->back; struct i40e_fdir_filter *rule; struct hlist_node *node2; /* Loop through every filter, and check that it doesn't match */ hlist_for_each_entry_safe(rule, node2, &pf->fdir_filter_list, fdir_node) { /* Don't check the filters match if they share the same fd_id, * since the new filter is actually just updating the target * of the old filter. */ if (rule->fd_id == input->fd_id) continue; /* If any filters match, then print a warning message to the * kernel message buffer and bail out. */ if (i40e_match_fdir_filter(rule, input)) { dev_warn(&pf->pdev->dev, "Existing user defined filter %d already matches this flow.\n", rule->fd_id); return -EINVAL; } } return 0; } /** * i40e_add_fdir_ethtool - Add/Remove Flow Director filters * @vsi: pointer to the targeted VSI * @cmd: command to get or set RX flow classification rules * * Add Flow Director filters for a specific flow spec based on their * protocol. Returns 0 if the filters were successfully added. **/ static int i40e_add_fdir_ethtool(struct i40e_vsi *vsi, struct ethtool_rxnfc *cmd) { struct i40e_rx_flow_userdef userdef; struct ethtool_rx_flow_spec *fsp; struct i40e_fdir_filter *input; u16 dest_vsi = 0, q_index = 0; struct i40e_pf *pf; int ret = -EINVAL; u8 dest_ctl; if (!vsi) return -EINVAL; pf = vsi->back; if (!(pf->flags & I40E_FLAG_FD_SB_ENABLED)) return -EOPNOTSUPP; if (test_bit(__I40E_FD_SB_AUTO_DISABLED, pf->state)) return -ENOSPC; if (test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state) || test_bit(__I40E_RESET_INTR_RECEIVED, pf->state)) return -EBUSY; if (test_bit(__I40E_FD_FLUSH_REQUESTED, pf->state)) return -EBUSY; fsp = (struct ethtool_rx_flow_spec *)&cmd->fs; /* Parse the user-defined field */ if (i40e_parse_rx_flow_user_data(fsp, &userdef)) return -EINVAL; /* Extended MAC field is not supported */ if (fsp->flow_type & FLOW_MAC_EXT) return -EINVAL; ret = i40e_check_fdir_input_set(vsi, fsp, &userdef); if (ret) return ret; if (fsp->location >= (pf->hw.func_caps.fd_filters_best_effort + pf->hw.func_caps.fd_filters_guaranteed)) { return -EINVAL; } /* ring_cookie is either the drop index, or is a mask of the queue * index and VF id we wish to target. */ if (fsp->ring_cookie == RX_CLS_FLOW_DISC) { dest_ctl = I40E_FILTER_PROGRAM_DESC_DEST_DROP_PACKET; } else { u32 ring = ethtool_get_flow_spec_ring(fsp->ring_cookie); u8 vf = ethtool_get_flow_spec_ring_vf(fsp->ring_cookie); if (!vf) { if (ring >= vsi->num_queue_pairs) return -EINVAL; dest_vsi = vsi->id; } else { /* VFs are zero-indexed, so we subtract one here */ vf--; if (vf >= pf->num_alloc_vfs) return -EINVAL; if (ring >= pf->vf[vf].num_queue_pairs) return -EINVAL; dest_vsi = pf->vf[vf].lan_vsi_id; } dest_ctl = I40E_FILTER_PROGRAM_DESC_DEST_DIRECT_PACKET_QINDEX; q_index = ring; } input = kzalloc(sizeof(*input), GFP_KERNEL); if (!input) return -ENOMEM; input->fd_id = fsp->location; input->q_index = q_index; input->dest_vsi = dest_vsi; input->dest_ctl = dest_ctl; input->fd_status = I40E_FILTER_PROGRAM_DESC_FD_STATUS_FD_ID; input->cnt_index = I40E_FD_SB_STAT_IDX(pf->hw.pf_id); input->dst_ip = fsp->h_u.tcp_ip4_spec.ip4src; input->src_ip = fsp->h_u.tcp_ip4_spec.ip4dst; input->flow_type = fsp->flow_type & ~FLOW_EXT; input->vlan_etype = fsp->h_ext.vlan_etype; if (!fsp->m_ext.vlan_etype && fsp->h_ext.vlan_tci) input->vlan_etype = cpu_to_be16(ETH_P_8021Q); if (fsp->m_ext.vlan_tci && input->vlan_etype) input->vlan_tag = fsp->h_ext.vlan_tci; if (input->flow_type == IPV6_USER_FLOW || input->flow_type == UDP_V6_FLOW || input->flow_type == TCP_V6_FLOW || input->flow_type == SCTP_V6_FLOW) { /* Reverse the src and dest notion, since the HW expects them * to be from Tx perspective where as the input from user is * from Rx filter view. */ input->ipl4_proto = fsp->h_u.usr_ip6_spec.l4_proto; input->dst_port = fsp->h_u.tcp_ip6_spec.psrc; input->src_port = fsp->h_u.tcp_ip6_spec.pdst; memcpy(input->dst_ip6, fsp->h_u.ah_ip6_spec.ip6src, sizeof(__be32) * 4); memcpy(input->src_ip6, fsp->h_u.ah_ip6_spec.ip6dst, sizeof(__be32) * 4); } else { /* Reverse the src and dest notion, since the HW expects them * to be from Tx perspective where as the input from user is * from Rx filter view. */ input->ipl4_proto = fsp->h_u.usr_ip4_spec.proto; input->dst_port = fsp->h_u.tcp_ip4_spec.psrc; input->src_port = fsp->h_u.tcp_ip4_spec.pdst; input->dst_ip = fsp->h_u.tcp_ip4_spec.ip4src; input->src_ip = fsp->h_u.tcp_ip4_spec.ip4dst; } if (userdef.flex_filter) { input->flex_filter = true; input->flex_word = cpu_to_be16(userdef.flex_word); input->flex_offset = userdef.flex_offset; } /* Avoid programming two filters with identical match criteria. */ ret = i40e_disallow_matching_filters(vsi, input); if (ret) goto free_filter_memory; /* Add the input filter to the fdir_input_list, possibly replacing * a previous filter. Do not free the input structure after adding it * to the list as this would cause a use-after-free bug. */ i40e_update_ethtool_fdir_entry(vsi, input, fsp->location, NULL); ret = i40e_add_del_fdir(vsi, input, true); if (ret) goto remove_sw_rule; return 0; remove_sw_rule: hlist_del(&input->fdir_node); pf->fdir_pf_active_filters--; free_filter_memory: kfree(input); return ret; } /** * i40e_set_rxnfc - command to set RX flow classification rules * @netdev: network interface device structure * @cmd: ethtool rxnfc command * * Returns Success if the command is supported. **/ static int i40e_set_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd) { struct i40e_netdev_priv *np = netdev_priv(netdev); struct i40e_vsi *vsi = np->vsi; struct i40e_pf *pf = vsi->back; int ret = -EOPNOTSUPP; switch (cmd->cmd) { case ETHTOOL_SRXFH: ret = i40e_set_rss_hash_opt(pf, cmd); break; case ETHTOOL_SRXCLSRLINS: ret = i40e_add_fdir_ethtool(vsi, cmd); break; case ETHTOOL_SRXCLSRLDEL: ret = i40e_del_fdir_entry(vsi, cmd); break; default: break; } return ret; } /** * i40e_max_channels - get Max number of combined channels supported * @vsi: vsi pointer **/ static unsigned int i40e_max_channels(struct i40e_vsi *vsi) { /* TODO: This code assumes DCB and FD is disabled for now. */ return vsi->alloc_queue_pairs; } /** * i40e_get_channels - Get the current channels enabled and max supported etc. * @dev: network interface device structure * @ch: ethtool channels structure * * We don't support separate tx and rx queues as channels. The other count * represents how many queues are being used for control. max_combined counts * how many queue pairs we can support. They may not be mapped 1 to 1 with * q_vectors since we support a lot more queue pairs than q_vectors. **/ static void i40e_get_channels(struct net_device *dev, struct ethtool_channels *ch) { struct i40e_netdev_priv *np = netdev_priv(dev); struct i40e_vsi *vsi = np->vsi; struct i40e_pf *pf = vsi->back; /* report maximum channels */ ch->max_combined = i40e_max_channels(vsi); /* report info for other vector */ ch->other_count = (pf->flags & I40E_FLAG_FD_SB_ENABLED) ? 1 : 0; ch->max_other = ch->other_count; /* Note: This code assumes DCB is disabled for now. */ ch->combined_count = vsi->num_queue_pairs; } /** * i40e_set_channels - Set the new channels count. * @dev: network interface device structure * @ch: ethtool channels structure * * The new channels count may not be the same as requested by the user * since it gets rounded down to a power of 2 value. **/ static int i40e_set_channels(struct net_device *dev, struct ethtool_channels *ch) { const u8 drop = I40E_FILTER_PROGRAM_DESC_DEST_DROP_PACKET; struct i40e_netdev_priv *np = netdev_priv(dev); unsigned int count = ch->combined_count; struct i40e_vsi *vsi = np->vsi; struct i40e_pf *pf = vsi->back; struct i40e_fdir_filter *rule; struct hlist_node *node2; int new_count; int err = 0; /* We do not support setting channels for any other VSI at present */ if (vsi->type != I40E_VSI_MAIN) return -EINVAL; /* We do not support setting channels via ethtool when TCs are * configured through mqprio */ if (i40e_is_tc_mqprio_enabled(pf)) return -EINVAL; /* verify they are not requesting separate vectors */ if (!count || ch->rx_count || ch->tx_count) return -EINVAL; /* verify other_count has not changed */ if (ch->other_count != ((pf->flags & I40E_FLAG_FD_SB_ENABLED) ? 1 : 0)) return -EINVAL; /* verify the number of channels does not exceed hardware limits */ if (count > i40e_max_channels(vsi)) return -EINVAL; /* verify that the number of channels does not invalidate any current * flow director rules */ hlist_for_each_entry_safe(rule, node2, &pf->fdir_filter_list, fdir_node) { if (rule->dest_ctl != drop && count <= rule->q_index) { dev_warn(&pf->pdev->dev, "Existing user defined filter %d assigns flow to queue %d\n", rule->fd_id, rule->q_index); err = -EINVAL; } } if (err) { dev_err(&pf->pdev->dev, "Existing filter rules must be deleted to reduce combined channel count to %d\n", count); return err; } /* update feature limits from largest to smallest supported values */ /* TODO: Flow director limit, DCB etc */ /* use rss_reconfig to rebuild with new queue count and update traffic * class queue mapping */ new_count = i40e_reconfig_rss_queues(pf, count); if (new_count > 0) return 0; else return -EINVAL; } /** * i40e_get_rxfh_key_size - get the RSS hash key size * @netdev: network interface device structure * * Returns the table size. **/ static u32 i40e_get_rxfh_key_size(struct net_device *netdev) { return I40E_HKEY_ARRAY_SIZE; } /** * i40e_get_rxfh_indir_size - get the rx flow hash indirection table size * @netdev: network interface device structure * * Returns the table size. **/ static u32 i40e_get_rxfh_indir_size(struct net_device *netdev) { return I40E_HLUT_ARRAY_SIZE; } /** * i40e_get_rxfh - get the rx flow hash indirection table * @netdev: network interface device structure * @indir: indirection table * @key: hash key * @hfunc: hash function * * Reads the indirection table directly from the hardware. Returns 0 on * success. **/ static int i40e_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key, u8 *hfunc) { struct i40e_netdev_priv *np = netdev_priv(netdev); struct i40e_vsi *vsi = np->vsi; u8 *lut, *seed = NULL; int ret; u16 i; if (hfunc) *hfunc = ETH_RSS_HASH_TOP; if (!indir) return 0; seed = key; lut = kzalloc(I40E_HLUT_ARRAY_SIZE, GFP_KERNEL); if (!lut) return -ENOMEM; ret = i40e_get_rss(vsi, seed, lut, I40E_HLUT_ARRAY_SIZE); if (ret) goto out; for (i = 0; i < I40E_HLUT_ARRAY_SIZE; i++) indir[i] = (u32)(lut[i]); out: kfree(lut); return ret; } /** * i40e_set_rxfh - set the rx flow hash indirection table * @netdev: network interface device structure * @indir: indirection table * @key: hash key * @hfunc: hash function to use * * Returns -EINVAL if the table specifies an invalid queue id, otherwise * returns 0 after programming the table. **/ static int i40e_set_rxfh(struct net_device *netdev, const u32 *indir, const u8 *key, const u8 hfunc) { struct i40e_netdev_priv *np = netdev_priv(netdev); struct i40e_vsi *vsi = np->vsi; struct i40e_pf *pf = vsi->back; u8 *seed = NULL; u16 i; if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP) return -EOPNOTSUPP; if (key) { if (!vsi->rss_hkey_user) { vsi->rss_hkey_user = kzalloc(I40E_HKEY_ARRAY_SIZE, GFP_KERNEL); if (!vsi->rss_hkey_user) return -ENOMEM; } memcpy(vsi->rss_hkey_user, key, I40E_HKEY_ARRAY_SIZE); seed = vsi->rss_hkey_user; } if (!vsi->rss_lut_user) { vsi->rss_lut_user = kzalloc(I40E_HLUT_ARRAY_SIZE, GFP_KERNEL); if (!vsi->rss_lut_user) return -ENOMEM; } /* Each 32 bits pointed by 'indir' is stored with a lut entry */ if (indir) for (i = 0; i < I40E_HLUT_ARRAY_SIZE; i++) vsi->rss_lut_user[i] = (u8)(indir[i]); else i40e_fill_rss_lut(pf, vsi->rss_lut_user, I40E_HLUT_ARRAY_SIZE, vsi->rss_size); return i40e_config_rss(vsi, seed, vsi->rss_lut_user, I40E_HLUT_ARRAY_SIZE); } /** * i40e_get_priv_flags - report device private flags * @dev: network interface device structure * * The get string set count and the string set should be matched for each * flag returned. Add new strings for each flag to the i40e_gstrings_priv_flags * array. * * Returns a u32 bitmap of flags. **/ static u32 i40e_get_priv_flags(struct net_device *dev) { struct i40e_netdev_priv *np = netdev_priv(dev); struct i40e_vsi *vsi = np->vsi; struct i40e_pf *pf = vsi->back; u32 i, j, ret_flags = 0; for (i = 0; i < I40E_PRIV_FLAGS_STR_LEN; i++) { const struct i40e_priv_flags *priv_flags; priv_flags = &i40e_gstrings_priv_flags[i]; if (priv_flags->flag & pf->flags) ret_flags |= BIT(i); } if (pf->hw.pf_id != 0) return ret_flags; for (j = 0; j < I40E_GL_PRIV_FLAGS_STR_LEN; j++) { const struct i40e_priv_flags *priv_flags; priv_flags = &i40e_gl_gstrings_priv_flags[j]; if (priv_flags->flag & pf->flags) ret_flags |= BIT(i + j); } return ret_flags; } /** * i40e_set_priv_flags - set private flags * @dev: network interface device structure * @flags: bit flags to be set **/ static int i40e_set_priv_flags(struct net_device *dev, u32 flags) { struct i40e_netdev_priv *np = netdev_priv(dev); u64 orig_flags, new_flags, changed_flags; enum i40e_admin_queue_err adq_err; struct i40e_vsi *vsi = np->vsi; struct i40e_pf *pf = vsi->back; u32 reset_needed = 0; int status; u32 i, j; orig_flags = READ_ONCE(pf->flags); new_flags = orig_flags; for (i = 0; i < I40E_PRIV_FLAGS_STR_LEN; i++) { const struct i40e_priv_flags *priv_flags; priv_flags = &i40e_gstrings_priv_flags[i]; if (flags & BIT(i)) new_flags |= priv_flags->flag; else new_flags &= ~(priv_flags->flag); /* If this is a read-only flag, it can't be changed */ if (priv_flags->read_only && ((orig_flags ^ new_flags) & ~BIT(i))) return -EOPNOTSUPP; } if (pf->hw.pf_id != 0) goto flags_complete; for (j = 0; j < I40E_GL_PRIV_FLAGS_STR_LEN; j++) { const struct i40e_priv_flags *priv_flags; priv_flags = &i40e_gl_gstrings_priv_flags[j]; if (flags & BIT(i + j)) new_flags |= priv_flags->flag; else new_flags &= ~(priv_flags->flag); /* If this is a read-only flag, it can't be changed */ if (priv_flags->read_only && ((orig_flags ^ new_flags) & ~BIT(i))) return -EOPNOTSUPP; } flags_complete: changed_flags = orig_flags ^ new_flags; if (changed_flags & I40E_FLAG_DISABLE_FW_LLDP) reset_needed = I40E_PF_RESET_AND_REBUILD_FLAG; if (changed_flags & (I40E_FLAG_VEB_STATS_ENABLED | I40E_FLAG_LEGACY_RX | I40E_FLAG_SOURCE_PRUNING_DISABLED)) reset_needed = BIT(__I40E_PF_RESET_REQUESTED); /* Before we finalize any flag changes, we need to perform some * checks to ensure that the changes are supported and safe. */ /* ATR eviction is not supported on all devices */ if ((new_flags & I40E_FLAG_HW_ATR_EVICT_ENABLED) && !(pf->hw_features & I40E_HW_ATR_EVICT_CAPABLE)) return -EOPNOTSUPP; /* If the driver detected FW LLDP was disabled on init, this flag could * be set, however we do not support _changing_ the flag: * - on XL710 if NPAR is enabled or FW API version < 1.7 * - on X722 with FW API version < 1.6 * There are situations where older FW versions/NPAR enabled PFs could * disable LLDP, however we _must_ not allow the user to enable/disable * LLDP with this flag on unsupported FW versions. */ if (changed_flags & I40E_FLAG_DISABLE_FW_LLDP) { if (!(pf->hw.flags & I40E_HW_FLAG_FW_LLDP_STOPPABLE)) { dev_warn(&pf->pdev->dev, "Device does not support changing FW LLDP\n"); return -EOPNOTSUPP; } } if (changed_flags & I40E_FLAG_RS_FEC && pf->hw.device_id != I40E_DEV_ID_25G_SFP28 && pf->hw.device_id != I40E_DEV_ID_25G_B) { dev_warn(&pf->pdev->dev, "Device does not support changing FEC configuration\n"); return -EOPNOTSUPP; } if (changed_flags & I40E_FLAG_BASE_R_FEC && pf->hw.device_id != I40E_DEV_ID_25G_SFP28 && pf->hw.device_id != I40E_DEV_ID_25G_B && pf->hw.device_id != I40E_DEV_ID_KX_X722) { dev_warn(&pf->pdev->dev, "Device does not support changing FEC configuration\n"); return -EOPNOTSUPP; } /* Process any additional changes needed as a result of flag changes. * The changed_flags value reflects the list of bits that were * changed in the code above. */ /* Flush current ATR settings if ATR was disabled */ if ((changed_flags & I40E_FLAG_FD_ATR_ENABLED) && !(new_flags & I40E_FLAG_FD_ATR_ENABLED)) { set_bit(__I40E_FD_ATR_AUTO_DISABLED, pf->state); set_bit(__I40E_FD_FLUSH_REQUESTED, pf->state); } if (changed_flags & I40E_FLAG_TRUE_PROMISC_SUPPORT) { u16 sw_flags = 0, valid_flags = 0; int ret; if (!(new_flags & I40E_FLAG_TRUE_PROMISC_SUPPORT)) sw_flags = I40E_AQ_SET_SWITCH_CFG_PROMISC; valid_flags = I40E_AQ_SET_SWITCH_CFG_PROMISC; ret = i40e_aq_set_switch_config(&pf->hw, sw_flags, valid_flags, 0, NULL); if (ret && pf->hw.aq.asq_last_status != I40E_AQ_RC_ESRCH) { dev_info(&pf->pdev->dev, "couldn't set switch config bits, err %pe aq_err %s\n", ERR_PTR(ret), i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); /* not a fatal problem, just keep going */ } } if ((changed_flags & I40E_FLAG_RS_FEC) || (changed_flags & I40E_FLAG_BASE_R_FEC)) { u8 fec_cfg = 0; if (new_flags & I40E_FLAG_RS_FEC && new_flags & I40E_FLAG_BASE_R_FEC) { fec_cfg = I40E_AQ_SET_FEC_AUTO; } else if (new_flags & I40E_FLAG_RS_FEC) { fec_cfg = (I40E_AQ_SET_FEC_REQUEST_RS | I40E_AQ_SET_FEC_ABILITY_RS); } else if (new_flags & I40E_FLAG_BASE_R_FEC) { fec_cfg = (I40E_AQ_SET_FEC_REQUEST_KR | I40E_AQ_SET_FEC_ABILITY_KR); } if (i40e_set_fec_cfg(dev, fec_cfg)) dev_warn(&pf->pdev->dev, "Cannot change FEC config\n"); } if ((changed_flags & I40E_FLAG_LINK_DOWN_ON_CLOSE_ENABLED) && (orig_flags & I40E_FLAG_TOTAL_PORT_SHUTDOWN_ENABLED)) { dev_err(&pf->pdev->dev, "Setting link-down-on-close not supported on this port (because total-port-shutdown is enabled)\n"); return -EOPNOTSUPP; } if ((changed_flags & I40E_FLAG_VF_VLAN_PRUNING) && pf->num_alloc_vfs) { dev_warn(&pf->pdev->dev, "Changing vf-vlan-pruning flag while VF(s) are active is not supported\n"); return -EOPNOTSUPP; } if ((changed_flags & I40E_FLAG_LEGACY_RX) && I40E_2K_TOO_SMALL_WITH_PADDING) { dev_warn(&pf->pdev->dev, "2k Rx buffer is too small to fit standard MTU and skb_shared_info\n"); return -EOPNOTSUPP; } if ((changed_flags & new_flags & I40E_FLAG_LINK_DOWN_ON_CLOSE_ENABLED) && (new_flags & I40E_FLAG_MFP_ENABLED)) dev_warn(&pf->pdev->dev, "Turning on link-down-on-close flag may affect other partitions\n"); if (changed_flags & I40E_FLAG_DISABLE_FW_LLDP) { if (new_flags & I40E_FLAG_DISABLE_FW_LLDP) { #ifdef CONFIG_I40E_DCB i40e_dcb_sw_default_config(pf); #endif /* CONFIG_I40E_DCB */ i40e_aq_cfg_lldp_mib_change_event(&pf->hw, false, NULL); i40e_aq_stop_lldp(&pf->hw, true, false, NULL); } else { status = i40e_aq_start_lldp(&pf->hw, false, NULL); if (status) { adq_err = pf->hw.aq.asq_last_status; switch (adq_err) { case I40E_AQ_RC_EEXIST: dev_warn(&pf->pdev->dev, "FW LLDP agent is already running\n"); reset_needed = 0; break; case I40E_AQ_RC_EPERM: dev_warn(&pf->pdev->dev, "Device configuration forbids SW from starting the LLDP agent.\n"); return -EINVAL; case I40E_AQ_RC_EAGAIN: dev_warn(&pf->pdev->dev, "Stop FW LLDP agent command is still being processed, please try again in a second.\n"); return -EBUSY; default: dev_warn(&pf->pdev->dev, "Starting FW LLDP agent failed: error: %pe, %s\n", ERR_PTR(status), i40e_aq_str(&pf->hw, adq_err)); return -EINVAL; } } } } /* Now that we've checked to ensure that the new flags are valid, load * them into place. Since we only modify flags either (a) during * initialization or (b) while holding the RTNL lock, we don't need * anything fancy here. */ pf->flags = new_flags; /* Issue reset to cause things to take effect, as additional bits * are added we will need to create a mask of bits requiring reset */ if (reset_needed) i40e_do_reset(pf, reset_needed, true); return 0; } /** * i40e_get_module_info - get (Q)SFP+ module type info * @netdev: network interface device structure * @modinfo: module EEPROM size and layout information structure **/ static int i40e_get_module_info(struct net_device *netdev, struct ethtool_modinfo *modinfo) { struct i40e_netdev_priv *np = netdev_priv(netdev); struct i40e_vsi *vsi = np->vsi; struct i40e_pf *pf = vsi->back; struct i40e_hw *hw = &pf->hw; u32 sff8472_comp = 0; u32 sff8472_swap = 0; u32 sff8636_rev = 0; u32 type = 0; int status; /* Check if firmware supports reading module EEPROM. */ if (!(hw->flags & I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE)) { netdev_err(vsi->netdev, "Module EEPROM memory read not supported. Please update the NVM image.\n"); return -EINVAL; } status = i40e_update_link_info(hw); if (status) return -EIO; if (hw->phy.link_info.phy_type == I40E_PHY_TYPE_EMPTY) { netdev_err(vsi->netdev, "Cannot read module EEPROM memory. No module connected.\n"); return -EINVAL; } type = hw->phy.link_info.module_type[0]; switch (type) { case I40E_MODULE_TYPE_SFP: status = i40e_aq_get_phy_register(hw, I40E_AQ_PHY_REG_ACCESS_EXTERNAL_MODULE, I40E_I2C_EEPROM_DEV_ADDR, true, I40E_MODULE_SFF_8472_COMP, &sff8472_comp, NULL); if (status) return -EIO; status = i40e_aq_get_phy_register(hw, I40E_AQ_PHY_REG_ACCESS_EXTERNAL_MODULE, I40E_I2C_EEPROM_DEV_ADDR, true, I40E_MODULE_SFF_8472_SWAP, &sff8472_swap, NULL); if (status) return -EIO; /* Check if the module requires address swap to access * the other EEPROM memory page. */ if (sff8472_swap & I40E_MODULE_SFF_ADDR_MODE) { netdev_warn(vsi->netdev, "Module address swap to access page 0xA2 is not supported.\n"); modinfo->type = ETH_MODULE_SFF_8079; modinfo->eeprom_len = ETH_MODULE_SFF_8079_LEN; } else if (sff8472_comp == 0x00) { /* Module is not SFF-8472 compliant */ modinfo->type = ETH_MODULE_SFF_8079; modinfo->eeprom_len = ETH_MODULE_SFF_8079_LEN; } else if (!(sff8472_swap & I40E_MODULE_SFF_DDM_IMPLEMENTED)) { /* Module is SFF-8472 compliant but doesn't implement * Digital Diagnostic Monitoring (DDM). */ modinfo->type = ETH_MODULE_SFF_8079; modinfo->eeprom_len = ETH_MODULE_SFF_8079_LEN; } else { modinfo->type = ETH_MODULE_SFF_8472; modinfo->eeprom_len = ETH_MODULE_SFF_8472_LEN; } break; case I40E_MODULE_TYPE_QSFP_PLUS: /* Read from memory page 0. */ status = i40e_aq_get_phy_register(hw, I40E_AQ_PHY_REG_ACCESS_EXTERNAL_MODULE, 0, true, I40E_MODULE_REVISION_ADDR, &sff8636_rev, NULL); if (status) return -EIO; /* Determine revision compliance byte */ if (sff8636_rev > 0x02) { /* Module is SFF-8636 compliant */ modinfo->type = ETH_MODULE_SFF_8636; modinfo->eeprom_len = I40E_MODULE_QSFP_MAX_LEN; } else { modinfo->type = ETH_MODULE_SFF_8436; modinfo->eeprom_len = I40E_MODULE_QSFP_MAX_LEN; } break; case I40E_MODULE_TYPE_QSFP28: modinfo->type = ETH_MODULE_SFF_8636; modinfo->eeprom_len = I40E_MODULE_QSFP_MAX_LEN; break; default: netdev_err(vsi->netdev, "Module type unrecognized\n"); return -EINVAL; } return 0; } /** * i40e_get_module_eeprom - fills buffer with (Q)SFP+ module memory contents * @netdev: network interface device structure * @ee: EEPROM dump request structure * @data: buffer to be filled with EEPROM contents **/ static int i40e_get_module_eeprom(struct net_device *netdev, struct ethtool_eeprom *ee, u8 *data) { struct i40e_netdev_priv *np = netdev_priv(netdev); struct i40e_vsi *vsi = np->vsi; struct i40e_pf *pf = vsi->back; struct i40e_hw *hw = &pf->hw; bool is_sfp = false; u32 value = 0; int status; int i; if (!ee || !ee->len || !data) return -EINVAL; if (hw->phy.link_info.module_type[0] == I40E_MODULE_TYPE_SFP) is_sfp = true; for (i = 0; i < ee->len; i++) { u32 offset = i + ee->offset; u32 addr = is_sfp ? I40E_I2C_EEPROM_DEV_ADDR : 0; /* Check if we need to access the other memory page */ if (is_sfp) { if (offset >= ETH_MODULE_SFF_8079_LEN) { offset -= ETH_MODULE_SFF_8079_LEN; addr = I40E_I2C_EEPROM_DEV_ADDR2; } } else { while (offset >= ETH_MODULE_SFF_8436_LEN) { /* Compute memory page number and offset. */ offset -= ETH_MODULE_SFF_8436_LEN / 2; addr++; } } status = i40e_aq_get_phy_register(hw, I40E_AQ_PHY_REG_ACCESS_EXTERNAL_MODULE, addr, true, offset, &value, NULL); if (status) return -EIO; data[i] = value; } return 0; } static int i40e_get_eee(struct net_device *netdev, struct ethtool_eee *edata) { struct i40e_netdev_priv *np = netdev_priv(netdev); struct i40e_aq_get_phy_abilities_resp phy_cfg; struct i40e_vsi *vsi = np->vsi; struct i40e_pf *pf = vsi->back; struct i40e_hw *hw = &pf->hw; int status = 0; /* Get initial PHY capabilities */ status = i40e_aq_get_phy_capabilities(hw, false, true, &phy_cfg, NULL); if (status) return -EAGAIN; /* Check whether NIC configuration is compatible with Energy Efficient * Ethernet (EEE) mode. */ if (phy_cfg.eee_capability == 0) return -EOPNOTSUPP; edata->supported = SUPPORTED_Autoneg; edata->lp_advertised = edata->supported; /* Get current configuration */ status = i40e_aq_get_phy_capabilities(hw, false, false, &phy_cfg, NULL); if (status) return -EAGAIN; edata->advertised = phy_cfg.eee_capability ? SUPPORTED_Autoneg : 0U; edata->eee_enabled = !!edata->advertised; edata->tx_lpi_enabled = pf->stats.tx_lpi_status; edata->eee_active = pf->stats.tx_lpi_status && pf->stats.rx_lpi_status; return 0; } static int i40e_is_eee_param_supported(struct net_device *netdev, struct ethtool_eee *edata) { struct i40e_netdev_priv *np = netdev_priv(netdev); struct i40e_vsi *vsi = np->vsi; struct i40e_pf *pf = vsi->back; struct i40e_ethtool_not_used { u32 value; const char *name; } param[] = { {edata->advertised & ~SUPPORTED_Autoneg, "advertise"}, {edata->tx_lpi_timer, "tx-timer"}, {edata->tx_lpi_enabled != pf->stats.tx_lpi_status, "tx-lpi"} }; int i; for (i = 0; i < ARRAY_SIZE(param); i++) { if (param[i].value) { netdev_info(netdev, "EEE setting %s not supported\n", param[i].name); return -EOPNOTSUPP; } } return 0; } static int i40e_set_eee(struct net_device *netdev, struct ethtool_eee *edata) { struct i40e_netdev_priv *np = netdev_priv(netdev); struct i40e_aq_get_phy_abilities_resp abilities; struct i40e_aq_set_phy_config config; struct i40e_vsi *vsi = np->vsi; struct i40e_pf *pf = vsi->back; struct i40e_hw *hw = &pf->hw; __le16 eee_capability; int status = 0; /* Deny parameters we don't support */ if (i40e_is_eee_param_supported(netdev, edata)) return -EOPNOTSUPP; /* Get initial PHY capabilities */ status = i40e_aq_get_phy_capabilities(hw, false, true, &abilities, NULL); if (status) return -EAGAIN; /* Check whether NIC configuration is compatible with Energy Efficient * Ethernet (EEE) mode. */ if (abilities.eee_capability == 0) return -EOPNOTSUPP; /* Cache initial EEE capability */ eee_capability = abilities.eee_capability; /* Get current PHY configuration */ status = i40e_aq_get_phy_capabilities(hw, false, false, &abilities, NULL); if (status) return -EAGAIN; /* Cache current PHY configuration */ config.phy_type = abilities.phy_type; config.phy_type_ext = abilities.phy_type_ext; config.link_speed = abilities.link_speed; config.abilities = abilities.abilities | I40E_AQ_PHY_ENABLE_ATOMIC_LINK; config.eeer = abilities.eeer_val; config.low_power_ctrl = abilities.d3_lpan; config.fec_config = abilities.fec_cfg_curr_mod_ext_info & I40E_AQ_PHY_FEC_CONFIG_MASK; /* Set desired EEE state */ if (edata->eee_enabled) { config.eee_capability = eee_capability; config.eeer |= cpu_to_le32(I40E_PRTPM_EEER_TX_LPI_EN_MASK); } else { config.eee_capability = 0; config.eeer &= cpu_to_le32(~I40E_PRTPM_EEER_TX_LPI_EN_MASK); } /* Apply modified PHY configuration */ status = i40e_aq_set_phy_config(hw, &config, NULL); if (status) return -EAGAIN; return 0; } static const struct ethtool_ops i40e_ethtool_recovery_mode_ops = { .get_drvinfo = i40e_get_drvinfo, .set_eeprom = i40e_set_eeprom, .get_eeprom_len = i40e_get_eeprom_len, .get_eeprom = i40e_get_eeprom, }; static const struct ethtool_ops i40e_ethtool_ops = { .supported_coalesce_params = ETHTOOL_COALESCE_USECS | ETHTOOL_COALESCE_MAX_FRAMES_IRQ | ETHTOOL_COALESCE_USE_ADAPTIVE | ETHTOOL_COALESCE_RX_USECS_HIGH | ETHTOOL_COALESCE_TX_USECS_HIGH, .get_drvinfo = i40e_get_drvinfo, .get_regs_len = i40e_get_regs_len, .get_regs = i40e_get_regs, .nway_reset = i40e_nway_reset, .get_link = ethtool_op_get_link, .get_wol = i40e_get_wol, .set_wol = i40e_set_wol, .set_eeprom = i40e_set_eeprom, .get_eeprom_len = i40e_get_eeprom_len, .get_eeprom = i40e_get_eeprom, .get_ringparam = i40e_get_ringparam, .set_ringparam = i40e_set_ringparam, .get_pauseparam = i40e_get_pauseparam, .set_pauseparam = i40e_set_pauseparam, .get_msglevel = i40e_get_msglevel, .set_msglevel = i40e_set_msglevel, .get_rxnfc = i40e_get_rxnfc, .set_rxnfc = i40e_set_rxnfc, .self_test = i40e_diag_test, .get_strings = i40e_get_strings, .get_eee = i40e_get_eee, .set_eee = i40e_set_eee, .set_phys_id = i40e_set_phys_id, .get_sset_count = i40e_get_sset_count, .get_ethtool_stats = i40e_get_ethtool_stats, .get_coalesce = i40e_get_coalesce, .set_coalesce = i40e_set_coalesce, .get_rxfh_key_size = i40e_get_rxfh_key_size, .get_rxfh_indir_size = i40e_get_rxfh_indir_size, .get_rxfh = i40e_get_rxfh, .set_rxfh = i40e_set_rxfh, .get_channels = i40e_get_channels, .set_channels = i40e_set_channels, .get_module_info = i40e_get_module_info, .get_module_eeprom = i40e_get_module_eeprom, .get_ts_info = i40e_get_ts_info, .get_priv_flags = i40e_get_priv_flags, .set_priv_flags = i40e_set_priv_flags, .get_per_queue_coalesce = i40e_get_per_queue_coalesce, .set_per_queue_coalesce = i40e_set_per_queue_coalesce, .get_link_ksettings = i40e_get_link_ksettings, .set_link_ksettings = i40e_set_link_ksettings, .get_fecparam = i40e_get_fec_param, .set_fecparam = i40e_set_fec_param, .flash_device = i40e_ddp_flash, }; void i40e_set_ethtool_ops(struct net_device *netdev) { struct i40e_netdev_priv *np = netdev_priv(netdev); struct i40e_pf *pf = np->vsi->back; if (!test_bit(__I40E_RECOVERY_MODE, pf->state)) netdev->ethtool_ops = &i40e_ethtool_ops; else netdev->ethtool_ops = &i40e_ethtool_recovery_mode_ops; }
linux-master
drivers/net/ethernet/intel/i40e/i40e_ethtool.c
// SPDX-License-Identifier: GPL-2.0 /* Copyright(c) 2013 - 2018 Intel Corporation. */ #include "i40e_type.h" #include "i40e_register.h" #include "i40e_adminq.h" #include "i40e_prototype.h" static void i40e_resume_aq(struct i40e_hw *hw); /** * i40e_adminq_init_regs - Initialize AdminQ registers * @hw: pointer to the hardware structure * * This assumes the alloc_asq and alloc_arq functions have already been called **/ static void i40e_adminq_init_regs(struct i40e_hw *hw) { /* set head and tail registers in our local struct */ if (i40e_is_vf(hw)) { hw->aq.asq.tail = I40E_VF_ATQT1; hw->aq.asq.head = I40E_VF_ATQH1; hw->aq.asq.len = I40E_VF_ATQLEN1; hw->aq.asq.bal = I40E_VF_ATQBAL1; hw->aq.asq.bah = I40E_VF_ATQBAH1; hw->aq.arq.tail = I40E_VF_ARQT1; hw->aq.arq.head = I40E_VF_ARQH1; hw->aq.arq.len = I40E_VF_ARQLEN1; hw->aq.arq.bal = I40E_VF_ARQBAL1; hw->aq.arq.bah = I40E_VF_ARQBAH1; } else { hw->aq.asq.tail = I40E_PF_ATQT; hw->aq.asq.head = I40E_PF_ATQH; hw->aq.asq.len = I40E_PF_ATQLEN; hw->aq.asq.bal = I40E_PF_ATQBAL; hw->aq.asq.bah = I40E_PF_ATQBAH; hw->aq.arq.tail = I40E_PF_ARQT; hw->aq.arq.head = I40E_PF_ARQH; hw->aq.arq.len = I40E_PF_ARQLEN; hw->aq.arq.bal = I40E_PF_ARQBAL; hw->aq.arq.bah = I40E_PF_ARQBAH; } } /** * i40e_alloc_adminq_asq_ring - Allocate Admin Queue send rings * @hw: pointer to the hardware structure **/ static int i40e_alloc_adminq_asq_ring(struct i40e_hw *hw) { int ret_code; ret_code = i40e_allocate_dma_mem(hw, &hw->aq.asq.desc_buf, i40e_mem_atq_ring, (hw->aq.num_asq_entries * sizeof(struct i40e_aq_desc)), I40E_ADMINQ_DESC_ALIGNMENT); if (ret_code) return ret_code; ret_code = i40e_allocate_virt_mem(hw, &hw->aq.asq.cmd_buf, (hw->aq.num_asq_entries * sizeof(struct i40e_asq_cmd_details))); if (ret_code) { i40e_free_dma_mem(hw, &hw->aq.asq.desc_buf); return ret_code; } return ret_code; } /** * i40e_alloc_adminq_arq_ring - Allocate Admin Queue receive rings * @hw: pointer to the hardware structure **/ static int i40e_alloc_adminq_arq_ring(struct i40e_hw *hw) { int ret_code; ret_code = i40e_allocate_dma_mem(hw, &hw->aq.arq.desc_buf, i40e_mem_arq_ring, (hw->aq.num_arq_entries * sizeof(struct i40e_aq_desc)), I40E_ADMINQ_DESC_ALIGNMENT); return ret_code; } /** * i40e_free_adminq_asq - Free Admin Queue send rings * @hw: pointer to the hardware structure * * This assumes the posted send buffers have already been cleaned * and de-allocated **/ static void i40e_free_adminq_asq(struct i40e_hw *hw) { i40e_free_dma_mem(hw, &hw->aq.asq.desc_buf); } /** * i40e_free_adminq_arq - Free Admin Queue receive rings * @hw: pointer to the hardware structure * * This assumes the posted receive buffers have already been cleaned * and de-allocated **/ static void i40e_free_adminq_arq(struct i40e_hw *hw) { i40e_free_dma_mem(hw, &hw->aq.arq.desc_buf); } /** * i40e_alloc_arq_bufs - Allocate pre-posted buffers for the receive queue * @hw: pointer to the hardware structure **/ static int i40e_alloc_arq_bufs(struct i40e_hw *hw) { struct i40e_aq_desc *desc; struct i40e_dma_mem *bi; int ret_code; int i; /* We'll be allocating the buffer info memory first, then we can * allocate the mapped buffers for the event processing */ /* buffer_info structures do not need alignment */ ret_code = i40e_allocate_virt_mem(hw, &hw->aq.arq.dma_head, (hw->aq.num_arq_entries * sizeof(struct i40e_dma_mem))); if (ret_code) goto alloc_arq_bufs; hw->aq.arq.r.arq_bi = (struct i40e_dma_mem *)hw->aq.arq.dma_head.va; /* allocate the mapped buffers */ for (i = 0; i < hw->aq.num_arq_entries; i++) { bi = &hw->aq.arq.r.arq_bi[i]; ret_code = i40e_allocate_dma_mem(hw, bi, i40e_mem_arq_buf, hw->aq.arq_buf_size, I40E_ADMINQ_DESC_ALIGNMENT); if (ret_code) goto unwind_alloc_arq_bufs; /* now configure the descriptors for use */ desc = I40E_ADMINQ_DESC(hw->aq.arq, i); desc->flags = cpu_to_le16(I40E_AQ_FLAG_BUF); if (hw->aq.arq_buf_size > I40E_AQ_LARGE_BUF) desc->flags |= cpu_to_le16(I40E_AQ_FLAG_LB); desc->opcode = 0; /* This is in accordance with Admin queue design, there is no * register for buffer size configuration */ desc->datalen = cpu_to_le16((u16)bi->size); desc->retval = 0; desc->cookie_high = 0; desc->cookie_low = 0; desc->params.external.addr_high = cpu_to_le32(upper_32_bits(bi->pa)); desc->params.external.addr_low = cpu_to_le32(lower_32_bits(bi->pa)); desc->params.external.param0 = 0; desc->params.external.param1 = 0; } alloc_arq_bufs: return ret_code; unwind_alloc_arq_bufs: /* don't try to free the one that failed... */ i--; for (; i >= 0; i--) i40e_free_dma_mem(hw, &hw->aq.arq.r.arq_bi[i]); i40e_free_virt_mem(hw, &hw->aq.arq.dma_head); return ret_code; } /** * i40e_alloc_asq_bufs - Allocate empty buffer structs for the send queue * @hw: pointer to the hardware structure **/ static int i40e_alloc_asq_bufs(struct i40e_hw *hw) { struct i40e_dma_mem *bi; int ret_code; int i; /* No mapped memory needed yet, just the buffer info structures */ ret_code = i40e_allocate_virt_mem(hw, &hw->aq.asq.dma_head, (hw->aq.num_asq_entries * sizeof(struct i40e_dma_mem))); if (ret_code) goto alloc_asq_bufs; hw->aq.asq.r.asq_bi = (struct i40e_dma_mem *)hw->aq.asq.dma_head.va; /* allocate the mapped buffers */ for (i = 0; i < hw->aq.num_asq_entries; i++) { bi = &hw->aq.asq.r.asq_bi[i]; ret_code = i40e_allocate_dma_mem(hw, bi, i40e_mem_asq_buf, hw->aq.asq_buf_size, I40E_ADMINQ_DESC_ALIGNMENT); if (ret_code) goto unwind_alloc_asq_bufs; } alloc_asq_bufs: return ret_code; unwind_alloc_asq_bufs: /* don't try to free the one that failed... */ i--; for (; i >= 0; i--) i40e_free_dma_mem(hw, &hw->aq.asq.r.asq_bi[i]); i40e_free_virt_mem(hw, &hw->aq.asq.dma_head); return ret_code; } /** * i40e_free_arq_bufs - Free receive queue buffer info elements * @hw: pointer to the hardware structure **/ static void i40e_free_arq_bufs(struct i40e_hw *hw) { int i; /* free descriptors */ for (i = 0; i < hw->aq.num_arq_entries; i++) i40e_free_dma_mem(hw, &hw->aq.arq.r.arq_bi[i]); /* free the descriptor memory */ i40e_free_dma_mem(hw, &hw->aq.arq.desc_buf); /* free the dma header */ i40e_free_virt_mem(hw, &hw->aq.arq.dma_head); } /** * i40e_free_asq_bufs - Free send queue buffer info elements * @hw: pointer to the hardware structure **/ static void i40e_free_asq_bufs(struct i40e_hw *hw) { int i; /* only unmap if the address is non-NULL */ for (i = 0; i < hw->aq.num_asq_entries; i++) if (hw->aq.asq.r.asq_bi[i].pa) i40e_free_dma_mem(hw, &hw->aq.asq.r.asq_bi[i]); /* free the buffer info list */ i40e_free_virt_mem(hw, &hw->aq.asq.cmd_buf); /* free the descriptor memory */ i40e_free_dma_mem(hw, &hw->aq.asq.desc_buf); /* free the dma header */ i40e_free_virt_mem(hw, &hw->aq.asq.dma_head); } /** * i40e_config_asq_regs - configure ASQ registers * @hw: pointer to the hardware structure * * Configure base address and length registers for the transmit queue **/ static int i40e_config_asq_regs(struct i40e_hw *hw) { int ret_code = 0; u32 reg = 0; /* Clear Head and Tail */ wr32(hw, hw->aq.asq.head, 0); wr32(hw, hw->aq.asq.tail, 0); /* set starting point */ wr32(hw, hw->aq.asq.len, (hw->aq.num_asq_entries | I40E_PF_ATQLEN_ATQENABLE_MASK)); wr32(hw, hw->aq.asq.bal, lower_32_bits(hw->aq.asq.desc_buf.pa)); wr32(hw, hw->aq.asq.bah, upper_32_bits(hw->aq.asq.desc_buf.pa)); /* Check one register to verify that config was applied */ reg = rd32(hw, hw->aq.asq.bal); if (reg != lower_32_bits(hw->aq.asq.desc_buf.pa)) ret_code = -EIO; return ret_code; } /** * i40e_config_arq_regs - ARQ register configuration * @hw: pointer to the hardware structure * * Configure base address and length registers for the receive (event queue) **/ static int i40e_config_arq_regs(struct i40e_hw *hw) { int ret_code = 0; u32 reg = 0; /* Clear Head and Tail */ wr32(hw, hw->aq.arq.head, 0); wr32(hw, hw->aq.arq.tail, 0); /* set starting point */ wr32(hw, hw->aq.arq.len, (hw->aq.num_arq_entries | I40E_PF_ARQLEN_ARQENABLE_MASK)); wr32(hw, hw->aq.arq.bal, lower_32_bits(hw->aq.arq.desc_buf.pa)); wr32(hw, hw->aq.arq.bah, upper_32_bits(hw->aq.arq.desc_buf.pa)); /* Update tail in the HW to post pre-allocated buffers */ wr32(hw, hw->aq.arq.tail, hw->aq.num_arq_entries - 1); /* Check one register to verify that config was applied */ reg = rd32(hw, hw->aq.arq.bal); if (reg != lower_32_bits(hw->aq.arq.desc_buf.pa)) ret_code = -EIO; return ret_code; } /** * i40e_init_asq - main initialization routine for ASQ * @hw: pointer to the hardware structure * * This is the main initialization routine for the Admin Send Queue * Prior to calling this function, drivers *MUST* set the following fields * in the hw->aq structure: * - hw->aq.num_asq_entries * - hw->aq.arq_buf_size * * Do *NOT* hold the lock when calling this as the memory allocation routines * called are not going to be atomic context safe **/ static int i40e_init_asq(struct i40e_hw *hw) { int ret_code = 0; if (hw->aq.asq.count > 0) { /* queue already initialized */ ret_code = -EBUSY; goto init_adminq_exit; } /* verify input for valid configuration */ if ((hw->aq.num_asq_entries == 0) || (hw->aq.asq_buf_size == 0)) { ret_code = -EIO; goto init_adminq_exit; } hw->aq.asq.next_to_use = 0; hw->aq.asq.next_to_clean = 0; /* allocate the ring memory */ ret_code = i40e_alloc_adminq_asq_ring(hw); if (ret_code) goto init_adminq_exit; /* allocate buffers in the rings */ ret_code = i40e_alloc_asq_bufs(hw); if (ret_code) goto init_adminq_free_rings; /* initialize base registers */ ret_code = i40e_config_asq_regs(hw); if (ret_code) goto init_adminq_free_rings; /* success! */ hw->aq.asq.count = hw->aq.num_asq_entries; goto init_adminq_exit; init_adminq_free_rings: i40e_free_adminq_asq(hw); init_adminq_exit: return ret_code; } /** * i40e_init_arq - initialize ARQ * @hw: pointer to the hardware structure * * The main initialization routine for the Admin Receive (Event) Queue. * Prior to calling this function, drivers *MUST* set the following fields * in the hw->aq structure: * - hw->aq.num_asq_entries * - hw->aq.arq_buf_size * * Do *NOT* hold the lock when calling this as the memory allocation routines * called are not going to be atomic context safe **/ static int i40e_init_arq(struct i40e_hw *hw) { int ret_code = 0; if (hw->aq.arq.count > 0) { /* queue already initialized */ ret_code = -EBUSY; goto init_adminq_exit; } /* verify input for valid configuration */ if ((hw->aq.num_arq_entries == 0) || (hw->aq.arq_buf_size == 0)) { ret_code = -EIO; goto init_adminq_exit; } hw->aq.arq.next_to_use = 0; hw->aq.arq.next_to_clean = 0; /* allocate the ring memory */ ret_code = i40e_alloc_adminq_arq_ring(hw); if (ret_code) goto init_adminq_exit; /* allocate buffers in the rings */ ret_code = i40e_alloc_arq_bufs(hw); if (ret_code) goto init_adminq_free_rings; /* initialize base registers */ ret_code = i40e_config_arq_regs(hw); if (ret_code) goto init_adminq_free_rings; /* success! */ hw->aq.arq.count = hw->aq.num_arq_entries; goto init_adminq_exit; init_adminq_free_rings: i40e_free_adminq_arq(hw); init_adminq_exit: return ret_code; } /** * i40e_shutdown_asq - shutdown the ASQ * @hw: pointer to the hardware structure * * The main shutdown routine for the Admin Send Queue **/ static int i40e_shutdown_asq(struct i40e_hw *hw) { int ret_code = 0; mutex_lock(&hw->aq.asq_mutex); if (hw->aq.asq.count == 0) { ret_code = -EBUSY; goto shutdown_asq_out; } /* Stop firmware AdminQ processing */ wr32(hw, hw->aq.asq.head, 0); wr32(hw, hw->aq.asq.tail, 0); wr32(hw, hw->aq.asq.len, 0); wr32(hw, hw->aq.asq.bal, 0); wr32(hw, hw->aq.asq.bah, 0); hw->aq.asq.count = 0; /* to indicate uninitialized queue */ /* free ring buffers */ i40e_free_asq_bufs(hw); shutdown_asq_out: mutex_unlock(&hw->aq.asq_mutex); return ret_code; } /** * i40e_shutdown_arq - shutdown ARQ * @hw: pointer to the hardware structure * * The main shutdown routine for the Admin Receive Queue **/ static int i40e_shutdown_arq(struct i40e_hw *hw) { int ret_code = 0; mutex_lock(&hw->aq.arq_mutex); if (hw->aq.arq.count == 0) { ret_code = -EBUSY; goto shutdown_arq_out; } /* Stop firmware AdminQ processing */ wr32(hw, hw->aq.arq.head, 0); wr32(hw, hw->aq.arq.tail, 0); wr32(hw, hw->aq.arq.len, 0); wr32(hw, hw->aq.arq.bal, 0); wr32(hw, hw->aq.arq.bah, 0); hw->aq.arq.count = 0; /* to indicate uninitialized queue */ /* free ring buffers */ i40e_free_arq_bufs(hw); shutdown_arq_out: mutex_unlock(&hw->aq.arq_mutex); return ret_code; } /** * i40e_set_hw_flags - set HW flags * @hw: pointer to the hardware structure **/ static void i40e_set_hw_flags(struct i40e_hw *hw) { struct i40e_adminq_info *aq = &hw->aq; hw->flags = 0; switch (hw->mac.type) { case I40E_MAC_XL710: if (aq->api_maj_ver > 1 || (aq->api_maj_ver == 1 && aq->api_min_ver >= I40E_MINOR_VER_GET_LINK_INFO_XL710)) { hw->flags |= I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE; hw->flags |= I40E_HW_FLAG_FW_LLDP_STOPPABLE; /* The ability to RX (not drop) 802.1ad frames */ hw->flags |= I40E_HW_FLAG_802_1AD_CAPABLE; } break; case I40E_MAC_X722: hw->flags |= I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE | I40E_HW_FLAG_NVM_READ_REQUIRES_LOCK; if (aq->api_maj_ver > 1 || (aq->api_maj_ver == 1 && aq->api_min_ver >= I40E_MINOR_VER_FW_LLDP_STOPPABLE_X722)) hw->flags |= I40E_HW_FLAG_FW_LLDP_STOPPABLE; if (aq->api_maj_ver > 1 || (aq->api_maj_ver == 1 && aq->api_min_ver >= I40E_MINOR_VER_GET_LINK_INFO_X722)) hw->flags |= I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE; if (aq->api_maj_ver > 1 || (aq->api_maj_ver == 1 && aq->api_min_ver >= I40E_MINOR_VER_FW_REQUEST_FEC_X722)) hw->flags |= I40E_HW_FLAG_X722_FEC_REQUEST_CAPABLE; fallthrough; default: break; } /* Newer versions of firmware require lock when reading the NVM */ if (aq->api_maj_ver > 1 || (aq->api_maj_ver == 1 && aq->api_min_ver >= 5)) hw->flags |= I40E_HW_FLAG_NVM_READ_REQUIRES_LOCK; if (aq->api_maj_ver > 1 || (aq->api_maj_ver == 1 && aq->api_min_ver >= 8)) { hw->flags |= I40E_HW_FLAG_FW_LLDP_PERSISTENT; hw->flags |= I40E_HW_FLAG_DROP_MODE; } if (aq->api_maj_ver > 1 || (aq->api_maj_ver == 1 && aq->api_min_ver >= 9)) hw->flags |= I40E_HW_FLAG_AQ_PHY_ACCESS_EXTENDED; } /** * i40e_init_adminq - main initialization routine for Admin Queue * @hw: pointer to the hardware structure * * Prior to calling this function, drivers *MUST* set the following fields * in the hw->aq structure: * - hw->aq.num_asq_entries * - hw->aq.num_arq_entries * - hw->aq.arq_buf_size * - hw->aq.asq_buf_size **/ int i40e_init_adminq(struct i40e_hw *hw) { u16 cfg_ptr, oem_hi, oem_lo; u16 eetrack_lo, eetrack_hi; int retry = 0; int ret_code; /* verify input for valid configuration */ if ((hw->aq.num_arq_entries == 0) || (hw->aq.num_asq_entries == 0) || (hw->aq.arq_buf_size == 0) || (hw->aq.asq_buf_size == 0)) { ret_code = -EIO; goto init_adminq_exit; } /* Set up register offsets */ i40e_adminq_init_regs(hw); /* setup ASQ command write back timeout */ hw->aq.asq_cmd_timeout = I40E_ASQ_CMD_TIMEOUT; /* allocate the ASQ */ ret_code = i40e_init_asq(hw); if (ret_code) goto init_adminq_destroy_locks; /* allocate the ARQ */ ret_code = i40e_init_arq(hw); if (ret_code) goto init_adminq_free_asq; /* There are some cases where the firmware may not be quite ready * for AdminQ operations, so we retry the AdminQ setup a few times * if we see timeouts in this first AQ call. */ do { ret_code = i40e_aq_get_firmware_version(hw, &hw->aq.fw_maj_ver, &hw->aq.fw_min_ver, &hw->aq.fw_build, &hw->aq.api_maj_ver, &hw->aq.api_min_ver, NULL); if (ret_code != -EIO) break; retry++; msleep(100); i40e_resume_aq(hw); } while (retry < 10); if (ret_code != 0) goto init_adminq_free_arq; /* Some features were introduced in different FW API version * for different MAC type. */ i40e_set_hw_flags(hw); /* get the NVM version info */ i40e_read_nvm_word(hw, I40E_SR_NVM_DEV_STARTER_VERSION, &hw->nvm.version); i40e_read_nvm_word(hw, I40E_SR_NVM_EETRACK_LO, &eetrack_lo); i40e_read_nvm_word(hw, I40E_SR_NVM_EETRACK_HI, &eetrack_hi); hw->nvm.eetrack = (eetrack_hi << 16) | eetrack_lo; i40e_read_nvm_word(hw, I40E_SR_BOOT_CONFIG_PTR, &cfg_ptr); i40e_read_nvm_word(hw, (cfg_ptr + I40E_NVM_OEM_VER_OFF), &oem_hi); i40e_read_nvm_word(hw, (cfg_ptr + (I40E_NVM_OEM_VER_OFF + 1)), &oem_lo); hw->nvm.oem_ver = ((u32)oem_hi << 16) | oem_lo; if (hw->mac.type == I40E_MAC_XL710 && hw->aq.api_maj_ver == I40E_FW_API_VERSION_MAJOR && hw->aq.api_min_ver >= I40E_MINOR_VER_GET_LINK_INFO_XL710) { hw->flags |= I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE; hw->flags |= I40E_HW_FLAG_FW_LLDP_STOPPABLE; } if (hw->mac.type == I40E_MAC_X722 && hw->aq.api_maj_ver == I40E_FW_API_VERSION_MAJOR && hw->aq.api_min_ver >= I40E_MINOR_VER_FW_LLDP_STOPPABLE_X722) { hw->flags |= I40E_HW_FLAG_FW_LLDP_STOPPABLE; } /* The ability to RX (not drop) 802.1ad frames was added in API 1.7 */ if (hw->aq.api_maj_ver > 1 || (hw->aq.api_maj_ver == 1 && hw->aq.api_min_ver >= 7)) hw->flags |= I40E_HW_FLAG_802_1AD_CAPABLE; if (hw->aq.api_maj_ver > I40E_FW_API_VERSION_MAJOR) { ret_code = -EIO; goto init_adminq_free_arq; } /* pre-emptive resource lock release */ i40e_aq_release_resource(hw, I40E_NVM_RESOURCE_ID, 0, NULL); hw->nvm_release_on_done = false; hw->nvmupd_state = I40E_NVMUPD_STATE_INIT; ret_code = 0; /* success! */ goto init_adminq_exit; init_adminq_free_arq: i40e_shutdown_arq(hw); init_adminq_free_asq: i40e_shutdown_asq(hw); init_adminq_destroy_locks: init_adminq_exit: return ret_code; } /** * i40e_shutdown_adminq - shutdown routine for the Admin Queue * @hw: pointer to the hardware structure **/ void i40e_shutdown_adminq(struct i40e_hw *hw) { if (i40e_check_asq_alive(hw)) i40e_aq_queue_shutdown(hw, true); i40e_shutdown_asq(hw); i40e_shutdown_arq(hw); if (hw->nvm_buff.va) i40e_free_virt_mem(hw, &hw->nvm_buff); } /** * i40e_clean_asq - cleans Admin send queue * @hw: pointer to the hardware structure * * returns the number of free desc **/ static u16 i40e_clean_asq(struct i40e_hw *hw) { struct i40e_adminq_ring *asq = &(hw->aq.asq); struct i40e_asq_cmd_details *details; u16 ntc = asq->next_to_clean; struct i40e_aq_desc desc_cb; struct i40e_aq_desc *desc; desc = I40E_ADMINQ_DESC(*asq, ntc); details = I40E_ADMINQ_DETAILS(*asq, ntc); while (rd32(hw, hw->aq.asq.head) != ntc) { i40e_debug(hw, I40E_DEBUG_AQ_COMMAND, "ntc %d head %d.\n", ntc, rd32(hw, hw->aq.asq.head)); if (details->callback) { I40E_ADMINQ_CALLBACK cb_func = (I40E_ADMINQ_CALLBACK)details->callback; desc_cb = *desc; cb_func(hw, &desc_cb); } memset(desc, 0, sizeof(*desc)); memset(details, 0, sizeof(*details)); ntc++; if (ntc == asq->count) ntc = 0; desc = I40E_ADMINQ_DESC(*asq, ntc); details = I40E_ADMINQ_DETAILS(*asq, ntc); } asq->next_to_clean = ntc; return I40E_DESC_UNUSED(asq); } /** * i40e_asq_done - check if FW has processed the Admin Send Queue * @hw: pointer to the hw struct * * Returns true if the firmware has processed all descriptors on the * admin send queue. Returns false if there are still requests pending. **/ static bool i40e_asq_done(struct i40e_hw *hw) { /* AQ designers suggest use of head for better * timing reliability than DD bit */ return rd32(hw, hw->aq.asq.head) == hw->aq.asq.next_to_use; } /** * i40e_asq_send_command_atomic_exec - send command to Admin Queue * @hw: pointer to the hw struct * @desc: prefilled descriptor describing the command (non DMA mem) * @buff: buffer to use for indirect commands * @buff_size: size of buffer for indirect commands * @cmd_details: pointer to command details structure * @is_atomic_context: is the function called in an atomic context? * * This is the main send command driver routine for the Admin Queue send * queue. It runs the queue, cleans the queue, etc **/ static int i40e_asq_send_command_atomic_exec(struct i40e_hw *hw, struct i40e_aq_desc *desc, void *buff, /* can be NULL */ u16 buff_size, struct i40e_asq_cmd_details *cmd_details, bool is_atomic_context) { struct i40e_dma_mem *dma_buff = NULL; struct i40e_asq_cmd_details *details; struct i40e_aq_desc *desc_on_ring; bool cmd_completed = false; u16 retval = 0; int status = 0; u32 val = 0; if (hw->aq.asq.count == 0) { i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, "AQTX: Admin queue not initialized.\n"); status = -EIO; goto asq_send_command_error; } hw->aq.asq_last_status = I40E_AQ_RC_OK; val = rd32(hw, hw->aq.asq.head); if (val >= hw->aq.num_asq_entries) { i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, "AQTX: head overrun at %d\n", val); status = -ENOSPC; goto asq_send_command_error; } details = I40E_ADMINQ_DETAILS(hw->aq.asq, hw->aq.asq.next_to_use); if (cmd_details) { *details = *cmd_details; /* If the cmd_details are defined copy the cookie. The * cpu_to_le32 is not needed here because the data is ignored * by the FW, only used by the driver */ if (details->cookie) { desc->cookie_high = cpu_to_le32(upper_32_bits(details->cookie)); desc->cookie_low = cpu_to_le32(lower_32_bits(details->cookie)); } } else { memset(details, 0, sizeof(struct i40e_asq_cmd_details)); } /* clear requested flags and then set additional flags if defined */ desc->flags &= ~cpu_to_le16(details->flags_dis); desc->flags |= cpu_to_le16(details->flags_ena); if (buff_size > hw->aq.asq_buf_size) { i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, "AQTX: Invalid buffer size: %d.\n", buff_size); status = -EINVAL; goto asq_send_command_error; } if (details->postpone && !details->async) { i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, "AQTX: Async flag not set along with postpone flag"); status = -EINVAL; goto asq_send_command_error; } /* call clean and check queue available function to reclaim the * descriptors that were processed by FW, the function returns the * number of desc available */ /* the clean function called here could be called in a separate thread * in case of asynchronous completions */ if (i40e_clean_asq(hw) == 0) { i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, "AQTX: Error queue is full.\n"); status = -ENOSPC; goto asq_send_command_error; } /* initialize the temp desc pointer with the right desc */ desc_on_ring = I40E_ADMINQ_DESC(hw->aq.asq, hw->aq.asq.next_to_use); /* if the desc is available copy the temp desc to the right place */ *desc_on_ring = *desc; /* if buff is not NULL assume indirect command */ if (buff != NULL) { dma_buff = &(hw->aq.asq.r.asq_bi[hw->aq.asq.next_to_use]); /* copy the user buff into the respective DMA buff */ memcpy(dma_buff->va, buff, buff_size); desc_on_ring->datalen = cpu_to_le16(buff_size); /* Update the address values in the desc with the pa value * for respective buffer */ desc_on_ring->params.external.addr_high = cpu_to_le32(upper_32_bits(dma_buff->pa)); desc_on_ring->params.external.addr_low = cpu_to_le32(lower_32_bits(dma_buff->pa)); } /* bump the tail */ i40e_debug(hw, I40E_DEBUG_AQ_COMMAND, "AQTX: desc and buffer:\n"); i40e_debug_aq(hw, I40E_DEBUG_AQ_COMMAND, (void *)desc_on_ring, buff, buff_size); (hw->aq.asq.next_to_use)++; if (hw->aq.asq.next_to_use == hw->aq.asq.count) hw->aq.asq.next_to_use = 0; if (!details->postpone) wr32(hw, hw->aq.asq.tail, hw->aq.asq.next_to_use); /* if cmd_details are not defined or async flag is not set, * we need to wait for desc write back */ if (!details->async && !details->postpone) { u32 total_delay = 0; do { /* AQ designers suggest use of head for better * timing reliability than DD bit */ if (i40e_asq_done(hw)) break; if (is_atomic_context) udelay(50); else usleep_range(40, 60); total_delay += 50; } while (total_delay < hw->aq.asq_cmd_timeout); } /* if ready, copy the desc back to temp */ if (i40e_asq_done(hw)) { *desc = *desc_on_ring; if (buff != NULL) memcpy(buff, dma_buff->va, buff_size); retval = le16_to_cpu(desc->retval); if (retval != 0) { i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, "AQTX: Command completed with error 0x%X.\n", retval); /* strip off FW internal code */ retval &= 0xff; } cmd_completed = true; if ((enum i40e_admin_queue_err)retval == I40E_AQ_RC_OK) status = 0; else if ((enum i40e_admin_queue_err)retval == I40E_AQ_RC_EBUSY) status = -EBUSY; else status = -EIO; hw->aq.asq_last_status = (enum i40e_admin_queue_err)retval; } i40e_debug(hw, I40E_DEBUG_AQ_COMMAND, "AQTX: desc and buffer writeback:\n"); i40e_debug_aq(hw, I40E_DEBUG_AQ_COMMAND, (void *)desc, buff, buff_size); /* save writeback aq if requested */ if (details->wb_desc) *details->wb_desc = *desc_on_ring; /* update the error if time out occurred */ if ((!cmd_completed) && (!details->async && !details->postpone)) { if (rd32(hw, hw->aq.asq.len) & I40E_GL_ATQLEN_ATQCRIT_MASK) { i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, "AQTX: AQ Critical error.\n"); status = -EIO; } else { i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, "AQTX: Writeback timeout.\n"); status = -EIO; } } asq_send_command_error: return status; } /** * i40e_asq_send_command_atomic - send command to Admin Queue * @hw: pointer to the hw struct * @desc: prefilled descriptor describing the command (non DMA mem) * @buff: buffer to use for indirect commands * @buff_size: size of buffer for indirect commands * @cmd_details: pointer to command details structure * @is_atomic_context: is the function called in an atomic context? * * Acquires the lock and calls the main send command execution * routine. **/ int i40e_asq_send_command_atomic(struct i40e_hw *hw, struct i40e_aq_desc *desc, void *buff, /* can be NULL */ u16 buff_size, struct i40e_asq_cmd_details *cmd_details, bool is_atomic_context) { int status; mutex_lock(&hw->aq.asq_mutex); status = i40e_asq_send_command_atomic_exec(hw, desc, buff, buff_size, cmd_details, is_atomic_context); mutex_unlock(&hw->aq.asq_mutex); return status; } int i40e_asq_send_command(struct i40e_hw *hw, struct i40e_aq_desc *desc, void *buff, /* can be NULL */ u16 buff_size, struct i40e_asq_cmd_details *cmd_details) { return i40e_asq_send_command_atomic(hw, desc, buff, buff_size, cmd_details, false); } /** * i40e_asq_send_command_atomic_v2 - send command to Admin Queue * @hw: pointer to the hw struct * @desc: prefilled descriptor describing the command (non DMA mem) * @buff: buffer to use for indirect commands * @buff_size: size of buffer for indirect commands * @cmd_details: pointer to command details structure * @is_atomic_context: is the function called in an atomic context? * @aq_status: pointer to Admin Queue status return value * * Acquires the lock and calls the main send command execution * routine. Returns the last Admin Queue status in aq_status * to avoid race conditions in access to hw->aq.asq_last_status. **/ int i40e_asq_send_command_atomic_v2(struct i40e_hw *hw, struct i40e_aq_desc *desc, void *buff, /* can be NULL */ u16 buff_size, struct i40e_asq_cmd_details *cmd_details, bool is_atomic_context, enum i40e_admin_queue_err *aq_status) { int status; mutex_lock(&hw->aq.asq_mutex); status = i40e_asq_send_command_atomic_exec(hw, desc, buff, buff_size, cmd_details, is_atomic_context); if (aq_status) *aq_status = hw->aq.asq_last_status; mutex_unlock(&hw->aq.asq_mutex); return status; } int i40e_asq_send_command_v2(struct i40e_hw *hw, struct i40e_aq_desc *desc, void *buff, /* can be NULL */ u16 buff_size, struct i40e_asq_cmd_details *cmd_details, enum i40e_admin_queue_err *aq_status) { return i40e_asq_send_command_atomic_v2(hw, desc, buff, buff_size, cmd_details, true, aq_status); } /** * i40e_fill_default_direct_cmd_desc - AQ descriptor helper function * @desc: pointer to the temp descriptor (non DMA mem) * @opcode: the opcode can be used to decide which flags to turn off or on * * Fill the desc with default values **/ void i40e_fill_default_direct_cmd_desc(struct i40e_aq_desc *desc, u16 opcode) { /* zero out the desc */ memset((void *)desc, 0, sizeof(struct i40e_aq_desc)); desc->opcode = cpu_to_le16(opcode); desc->flags = cpu_to_le16(I40E_AQ_FLAG_SI); } /** * i40e_clean_arq_element * @hw: pointer to the hw struct * @e: event info from the receive descriptor, includes any buffers * @pending: number of events that could be left to process * * This function cleans one Admin Receive Queue element and returns * the contents through e. It can also return how many events are * left to process through 'pending' **/ int i40e_clean_arq_element(struct i40e_hw *hw, struct i40e_arq_event_info *e, u16 *pending) { u16 ntc = hw->aq.arq.next_to_clean; struct i40e_aq_desc *desc; struct i40e_dma_mem *bi; int ret_code = 0; u16 desc_idx; u16 datalen; u16 flags; u16 ntu; /* pre-clean the event info */ memset(&e->desc, 0, sizeof(e->desc)); /* take the lock before we start messing with the ring */ mutex_lock(&hw->aq.arq_mutex); if (hw->aq.arq.count == 0) { i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, "AQRX: Admin queue not initialized.\n"); ret_code = -EIO; goto clean_arq_element_err; } /* set next_to_use to head */ ntu = rd32(hw, hw->aq.arq.head) & I40E_PF_ARQH_ARQH_MASK; if (ntu == ntc) { /* nothing to do - shouldn't need to update ring's values */ ret_code = -EALREADY; goto clean_arq_element_out; } /* now clean the next descriptor */ desc = I40E_ADMINQ_DESC(hw->aq.arq, ntc); desc_idx = ntc; hw->aq.arq_last_status = (enum i40e_admin_queue_err)le16_to_cpu(desc->retval); flags = le16_to_cpu(desc->flags); if (flags & I40E_AQ_FLAG_ERR) { ret_code = -EIO; i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, "AQRX: Event received with error 0x%X.\n", hw->aq.arq_last_status); } e->desc = *desc; datalen = le16_to_cpu(desc->datalen); e->msg_len = min(datalen, e->buf_len); if (e->msg_buf != NULL && (e->msg_len != 0)) memcpy(e->msg_buf, hw->aq.arq.r.arq_bi[desc_idx].va, e->msg_len); i40e_debug(hw, I40E_DEBUG_AQ_COMMAND, "AQRX: desc and buffer:\n"); i40e_debug_aq(hw, I40E_DEBUG_AQ_COMMAND, (void *)desc, e->msg_buf, hw->aq.arq_buf_size); /* Restore the original datalen and buffer address in the desc, * FW updates datalen to indicate the event message * size */ bi = &hw->aq.arq.r.arq_bi[ntc]; memset((void *)desc, 0, sizeof(struct i40e_aq_desc)); desc->flags = cpu_to_le16(I40E_AQ_FLAG_BUF); if (hw->aq.arq_buf_size > I40E_AQ_LARGE_BUF) desc->flags |= cpu_to_le16(I40E_AQ_FLAG_LB); desc->datalen = cpu_to_le16((u16)bi->size); desc->params.external.addr_high = cpu_to_le32(upper_32_bits(bi->pa)); desc->params.external.addr_low = cpu_to_le32(lower_32_bits(bi->pa)); /* set tail = the last cleaned desc index. */ wr32(hw, hw->aq.arq.tail, ntc); /* ntc is updated to tail + 1 */ ntc++; if (ntc == hw->aq.num_arq_entries) ntc = 0; hw->aq.arq.next_to_clean = ntc; hw->aq.arq.next_to_use = ntu; i40e_nvmupd_check_wait_event(hw, le16_to_cpu(e->desc.opcode), &e->desc); clean_arq_element_out: /* Set pending if needed, unlock and return */ if (pending) *pending = (ntc > ntu ? hw->aq.arq.count : 0) + (ntu - ntc); clean_arq_element_err: mutex_unlock(&hw->aq.arq_mutex); return ret_code; } static void i40e_resume_aq(struct i40e_hw *hw) { /* Registers are reset after PF reset */ hw->aq.asq.next_to_use = 0; hw->aq.asq.next_to_clean = 0; i40e_config_asq_regs(hw); hw->aq.arq.next_to_use = 0; hw->aq.arq.next_to_clean = 0; i40e_config_arq_regs(hw); }
linux-master
drivers/net/ethernet/intel/i40e/i40e_adminq.c
// SPDX-License-Identifier: GPL-2.0 /* Copyright(c) 2013 - 2018 Intel Corporation. */ #include "i40e.h" #include <linux/ptp_classify.h> #include <linux/posix-clock.h> /* The XL710 timesync is very much like Intel's 82599 design when it comes to * the fundamental clock design. However, the clock operations are much simpler * in the XL710 because the device supports a full 64 bits of nanoseconds. * Because the field is so wide, we can forgo the cycle counter and just * operate with the nanosecond field directly without fear of overflow. * * Much like the 82599, the update period is dependent upon the link speed: * At 40Gb, 25Gb, or no link, the period is 1.6ns. * At 10Gb or 5Gb link, the period is multiplied by 2. (3.2ns) * At 1Gb link, the period is multiplied by 20. (32ns) * 1588 functionality is not supported at 100Mbps. */ #define I40E_PTP_40GB_INCVAL 0x0199999999ULL #define I40E_PTP_10GB_INCVAL_MULT 2 #define I40E_PTP_5GB_INCVAL_MULT 2 #define I40E_PTP_1GB_INCVAL_MULT 20 #define I40E_ISGN 0x80000000 #define I40E_PRTTSYN_CTL1_TSYNTYPE_V1 BIT(I40E_PRTTSYN_CTL1_TSYNTYPE_SHIFT) #define I40E_PRTTSYN_CTL1_TSYNTYPE_V2 (2 << \ I40E_PRTTSYN_CTL1_TSYNTYPE_SHIFT) #define I40E_SUBDEV_ID_25G_PTP_PIN 0xB enum i40e_ptp_pin { SDP3_2 = 0, SDP3_3, GPIO_4 }; enum i40e_can_set_pins_t { CANT_DO_PINS = -1, CAN_SET_PINS, CAN_DO_PINS }; static struct ptp_pin_desc sdp_desc[] = { /* name idx func chan */ {"SDP3_2", SDP3_2, PTP_PF_NONE, 0}, {"SDP3_3", SDP3_3, PTP_PF_NONE, 1}, {"GPIO_4", GPIO_4, PTP_PF_NONE, 1}, }; enum i40e_ptp_gpio_pin_state { end = -2, invalid, off, in_A, in_B, out_A, out_B, }; static const char * const i40e_ptp_gpio_pin_state2str[] = { "off", "in_A", "in_B", "out_A", "out_B" }; enum i40e_ptp_led_pin_state { led_end = -2, low = 0, high, }; struct i40e_ptp_pins_settings { enum i40e_ptp_gpio_pin_state sdp3_2; enum i40e_ptp_gpio_pin_state sdp3_3; enum i40e_ptp_gpio_pin_state gpio_4; enum i40e_ptp_led_pin_state led2_0; enum i40e_ptp_led_pin_state led2_1; enum i40e_ptp_led_pin_state led3_0; enum i40e_ptp_led_pin_state led3_1; }; static const struct i40e_ptp_pins_settings i40e_ptp_pin_led_allowed_states[] = { {off, off, off, high, high, high, high}, {off, in_A, off, high, high, high, low}, {off, out_A, off, high, low, high, high}, {off, in_B, off, high, high, high, low}, {off, out_B, off, high, low, high, high}, {in_A, off, off, high, high, high, low}, {in_A, in_B, off, high, high, high, low}, {in_A, out_B, off, high, low, high, high}, {out_A, off, off, high, low, high, high}, {out_A, in_B, off, high, low, high, high}, {in_B, off, off, high, high, high, low}, {in_B, in_A, off, high, high, high, low}, {in_B, out_A, off, high, low, high, high}, {out_B, off, off, high, low, high, high}, {out_B, in_A, off, high, low, high, high}, {off, off, in_A, high, high, low, high}, {off, out_A, in_A, high, low, low, high}, {off, in_B, in_A, high, high, low, low}, {off, out_B, in_A, high, low, low, high}, {out_A, off, in_A, high, low, low, high}, {out_A, in_B, in_A, high, low, low, high}, {in_B, off, in_A, high, high, low, low}, {in_B, out_A, in_A, high, low, low, high}, {out_B, off, in_A, high, low, low, high}, {off, off, out_A, low, high, high, high}, {off, in_A, out_A, low, high, high, low}, {off, in_B, out_A, low, high, high, low}, {off, out_B, out_A, low, low, high, high}, {in_A, off, out_A, low, high, high, low}, {in_A, in_B, out_A, low, high, high, low}, {in_A, out_B, out_A, low, low, high, high}, {in_B, off, out_A, low, high, high, low}, {in_B, in_A, out_A, low, high, high, low}, {out_B, off, out_A, low, low, high, high}, {out_B, in_A, out_A, low, low, high, high}, {off, off, in_B, high, high, low, high}, {off, in_A, in_B, high, high, low, low}, {off, out_A, in_B, high, low, low, high}, {off, out_B, in_B, high, low, low, high}, {in_A, off, in_B, high, high, low, low}, {in_A, out_B, in_B, high, low, low, high}, {out_A, off, in_B, high, low, low, high}, {out_B, off, in_B, high, low, low, high}, {out_B, in_A, in_B, high, low, low, high}, {off, off, out_B, low, high, high, high}, {off, in_A, out_B, low, high, high, low}, {off, out_A, out_B, low, low, high, high}, {off, in_B, out_B, low, high, high, low}, {in_A, off, out_B, low, high, high, low}, {in_A, in_B, out_B, low, high, high, low}, {out_A, off, out_B, low, low, high, high}, {out_A, in_B, out_B, low, low, high, high}, {in_B, off, out_B, low, high, high, low}, {in_B, in_A, out_B, low, high, high, low}, {in_B, out_A, out_B, low, low, high, high}, {end, end, end, led_end, led_end, led_end, led_end} }; static int i40e_ptp_set_pins(struct i40e_pf *pf, struct i40e_ptp_pins_settings *pins); /** * i40e_ptp_extts0_work - workqueue task function * @work: workqueue task structure * * Service for PTP external clock event **/ static void i40e_ptp_extts0_work(struct work_struct *work) { struct i40e_pf *pf = container_of(work, struct i40e_pf, ptp_extts0_work); struct i40e_hw *hw = &pf->hw; struct ptp_clock_event event; u32 hi, lo; /* Event time is captured by one of the two matched registers * PRTTSYN_EVNT_L: 32 LSB of sampled time event * PRTTSYN_EVNT_H: 32 MSB of sampled time event * Event is defined in PRTTSYN_EVNT_0 register */ lo = rd32(hw, I40E_PRTTSYN_EVNT_L(0)); hi = rd32(hw, I40E_PRTTSYN_EVNT_H(0)); event.timestamp = (((u64)hi) << 32) | lo; event.type = PTP_CLOCK_EXTTS; event.index = hw->pf_id; /* fire event */ ptp_clock_event(pf->ptp_clock, &event); } /** * i40e_is_ptp_pin_dev - check if device supports PTP pins * @hw: pointer to the hardware structure * * Return true if device supports PTP pins, false otherwise. **/ static bool i40e_is_ptp_pin_dev(struct i40e_hw *hw) { return hw->device_id == I40E_DEV_ID_25G_SFP28 && hw->subsystem_device_id == I40E_SUBDEV_ID_25G_PTP_PIN; } /** * i40e_can_set_pins - check possibility of manipulating the pins * @pf: board private structure * * Check if all conditions are satisfied to manipulate PTP pins. * Return CAN_SET_PINS if pins can be set on a specific PF or * return CAN_DO_PINS if pins can be manipulated within a NIC or * return CANT_DO_PINS otherwise. **/ static enum i40e_can_set_pins_t i40e_can_set_pins(struct i40e_pf *pf) { if (!i40e_is_ptp_pin_dev(&pf->hw)) { dev_warn(&pf->pdev->dev, "PTP external clock not supported.\n"); return CANT_DO_PINS; } if (!pf->ptp_pins) { dev_warn(&pf->pdev->dev, "PTP PIN manipulation not allowed.\n"); return CANT_DO_PINS; } if (pf->hw.pf_id) { dev_warn(&pf->pdev->dev, "PTP PINs should be accessed via PF0.\n"); return CAN_DO_PINS; } return CAN_SET_PINS; } /** * i40_ptp_reset_timing_events - Reset PTP timing events * @pf: Board private structure * * This function resets timing events for pf. **/ static void i40_ptp_reset_timing_events(struct i40e_pf *pf) { u32 i; spin_lock_bh(&pf->ptp_rx_lock); for (i = 0; i <= I40E_PRTTSYN_RXTIME_L_MAX_INDEX; i++) { /* reading and automatically clearing timing events registers */ rd32(&pf->hw, I40E_PRTTSYN_RXTIME_L(i)); rd32(&pf->hw, I40E_PRTTSYN_RXTIME_H(i)); pf->latch_events[i] = 0; } /* reading and automatically clearing timing events registers */ rd32(&pf->hw, I40E_PRTTSYN_TXTIME_L); rd32(&pf->hw, I40E_PRTTSYN_TXTIME_H); pf->tx_hwtstamp_timeouts = 0; pf->tx_hwtstamp_skipped = 0; pf->rx_hwtstamp_cleared = 0; pf->latch_event_flags = 0; spin_unlock_bh(&pf->ptp_rx_lock); } /** * i40e_ptp_verify - check pins * @ptp: ptp clock * @pin: pin index * @func: assigned function * @chan: channel * * Check pins consistency. * Return 0 on success or error on failure. **/ static int i40e_ptp_verify(struct ptp_clock_info *ptp, unsigned int pin, enum ptp_pin_function func, unsigned int chan) { switch (func) { case PTP_PF_NONE: case PTP_PF_EXTTS: case PTP_PF_PEROUT: break; case PTP_PF_PHYSYNC: return -EOPNOTSUPP; } return 0; } /** * i40e_ptp_read - Read the PHC time from the device * @pf: Board private structure * @ts: timespec structure to hold the current time value * @sts: structure to hold the system time before and after reading the PHC * * This function reads the PRTTSYN_TIME registers and stores them in a * timespec. However, since the registers are 64 bits of nanoseconds, we must * convert the result to a timespec before we can return. **/ static void i40e_ptp_read(struct i40e_pf *pf, struct timespec64 *ts, struct ptp_system_timestamp *sts) { struct i40e_hw *hw = &pf->hw; u32 hi, lo; u64 ns; /* The timer latches on the lowest register read. */ ptp_read_system_prets(sts); lo = rd32(hw, I40E_PRTTSYN_TIME_L); ptp_read_system_postts(sts); hi = rd32(hw, I40E_PRTTSYN_TIME_H); ns = (((u64)hi) << 32) | lo; *ts = ns_to_timespec64(ns); } /** * i40e_ptp_write - Write the PHC time to the device * @pf: Board private structure * @ts: timespec structure that holds the new time value * * This function writes the PRTTSYN_TIME registers with the user value. Since * we receive a timespec from the stack, we must convert that timespec into * nanoseconds before programming the registers. **/ static void i40e_ptp_write(struct i40e_pf *pf, const struct timespec64 *ts) { struct i40e_hw *hw = &pf->hw; u64 ns = timespec64_to_ns(ts); /* The timer will not update until the high register is written, so * write the low register first. */ wr32(hw, I40E_PRTTSYN_TIME_L, ns & 0xFFFFFFFF); wr32(hw, I40E_PRTTSYN_TIME_H, ns >> 32); } /** * i40e_ptp_convert_to_hwtstamp - Convert device clock to system time * @hwtstamps: Timestamp structure to update * @timestamp: Timestamp from the hardware * * We need to convert the NIC clock value into a hwtstamp which can be used by * the upper level timestamping functions. Since the timestamp is simply a 64- * bit nanosecond value, we can call ns_to_ktime directly to handle this. **/ static void i40e_ptp_convert_to_hwtstamp(struct skb_shared_hwtstamps *hwtstamps, u64 timestamp) { memset(hwtstamps, 0, sizeof(*hwtstamps)); hwtstamps->hwtstamp = ns_to_ktime(timestamp); } /** * i40e_ptp_adjfine - Adjust the PHC frequency * @ptp: The PTP clock structure * @scaled_ppm: Scaled parts per million adjustment from base * * Adjust the frequency of the PHC by the indicated delta from the base * frequency. * * Scaled parts per million is ppm with a 16 bit binary fractional field. **/ static int i40e_ptp_adjfine(struct ptp_clock_info *ptp, long scaled_ppm) { struct i40e_pf *pf = container_of(ptp, struct i40e_pf, ptp_caps); struct i40e_hw *hw = &pf->hw; u64 adj, base_adj; smp_mb(); /* Force any pending update before accessing. */ base_adj = I40E_PTP_40GB_INCVAL * READ_ONCE(pf->ptp_adj_mult); adj = adjust_by_scaled_ppm(base_adj, scaled_ppm); wr32(hw, I40E_PRTTSYN_INC_L, adj & 0xFFFFFFFF); wr32(hw, I40E_PRTTSYN_INC_H, adj >> 32); return 0; } /** * i40e_ptp_set_1pps_signal_hw - configure 1PPS PTP signal for pins * @pf: the PF private data structure * * Configure 1PPS signal used for PTP pins **/ static void i40e_ptp_set_1pps_signal_hw(struct i40e_pf *pf) { struct i40e_hw *hw = &pf->hw; struct timespec64 now; u64 ns; wr32(hw, I40E_PRTTSYN_AUX_0(1), 0); wr32(hw, I40E_PRTTSYN_AUX_1(1), I40E_PRTTSYN_AUX_1_INSTNT); wr32(hw, I40E_PRTTSYN_AUX_0(1), I40E_PRTTSYN_AUX_0_OUT_ENABLE); i40e_ptp_read(pf, &now, NULL); now.tv_sec += I40E_PTP_2_SEC_DELAY; now.tv_nsec = 0; ns = timespec64_to_ns(&now); /* I40E_PRTTSYN_TGT_L(1) */ wr32(hw, I40E_PRTTSYN_TGT_L(1), ns & 0xFFFFFFFF); /* I40E_PRTTSYN_TGT_H(1) */ wr32(hw, I40E_PRTTSYN_TGT_H(1), ns >> 32); wr32(hw, I40E_PRTTSYN_CLKO(1), I40E_PTP_HALF_SECOND); wr32(hw, I40E_PRTTSYN_AUX_1(1), I40E_PRTTSYN_AUX_1_INSTNT); wr32(hw, I40E_PRTTSYN_AUX_0(1), I40E_PRTTSYN_AUX_0_OUT_ENABLE_CLK_MOD); } /** * i40e_ptp_adjtime - Adjust the PHC time * @ptp: The PTP clock structure * @delta: Offset in nanoseconds to adjust the PHC time by * * Adjust the current clock time by a delta specified in nanoseconds. **/ static int i40e_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta) { struct i40e_pf *pf = container_of(ptp, struct i40e_pf, ptp_caps); struct i40e_hw *hw = &pf->hw; mutex_lock(&pf->tmreg_lock); if (delta > -999999900LL && delta < 999999900LL) { int neg_adj = 0; u32 timadj; u64 tohw; if (delta < 0) { neg_adj = 1; tohw = -delta; } else { tohw = delta; } timadj = tohw & 0x3FFFFFFF; if (neg_adj) timadj |= I40E_ISGN; wr32(hw, I40E_PRTTSYN_ADJ, timadj); } else { struct timespec64 then, now; then = ns_to_timespec64(delta); i40e_ptp_read(pf, &now, NULL); now = timespec64_add(now, then); i40e_ptp_write(pf, (const struct timespec64 *)&now); i40e_ptp_set_1pps_signal_hw(pf); } mutex_unlock(&pf->tmreg_lock); return 0; } /** * i40e_ptp_gettimex - Get the time of the PHC * @ptp: The PTP clock structure * @ts: timespec structure to hold the current time value * @sts: structure to hold the system time before and after reading the PHC * * Read the device clock and return the correct value on ns, after converting it * into a timespec struct. **/ static int i40e_ptp_gettimex(struct ptp_clock_info *ptp, struct timespec64 *ts, struct ptp_system_timestamp *sts) { struct i40e_pf *pf = container_of(ptp, struct i40e_pf, ptp_caps); mutex_lock(&pf->tmreg_lock); i40e_ptp_read(pf, ts, sts); mutex_unlock(&pf->tmreg_lock); return 0; } /** * i40e_ptp_settime - Set the time of the PHC * @ptp: The PTP clock structure * @ts: timespec64 structure that holds the new time value * * Set the device clock to the user input value. The conversion from timespec * to ns happens in the write function. **/ static int i40e_ptp_settime(struct ptp_clock_info *ptp, const struct timespec64 *ts) { struct i40e_pf *pf = container_of(ptp, struct i40e_pf, ptp_caps); mutex_lock(&pf->tmreg_lock); i40e_ptp_write(pf, ts); mutex_unlock(&pf->tmreg_lock); return 0; } /** * i40e_pps_configure - configure PPS events * @ptp: ptp clock * @rq: clock request * @on: status * * Configure PPS events for external clock source. * Return 0 on success or error on failure. **/ static int i40e_pps_configure(struct ptp_clock_info *ptp, struct ptp_clock_request *rq, int on) { struct i40e_pf *pf = container_of(ptp, struct i40e_pf, ptp_caps); if (!!on) i40e_ptp_set_1pps_signal_hw(pf); return 0; } /** * i40e_pin_state - determine PIN state * @index: PIN index * @func: function assigned to PIN * * Determine PIN state based on PIN index and function assigned. * Return PIN state. **/ static enum i40e_ptp_gpio_pin_state i40e_pin_state(int index, int func) { enum i40e_ptp_gpio_pin_state state = off; if (index == 0 && func == PTP_PF_EXTTS) state = in_A; if (index == 1 && func == PTP_PF_EXTTS) state = in_B; if (index == 0 && func == PTP_PF_PEROUT) state = out_A; if (index == 1 && func == PTP_PF_PEROUT) state = out_B; return state; } /** * i40e_ptp_enable_pin - enable PINs. * @pf: private board structure * @chan: channel * @func: PIN function * @on: state * * Enable PTP pins for external clock source. * Return 0 on success or error code on failure. **/ static int i40e_ptp_enable_pin(struct i40e_pf *pf, unsigned int chan, enum ptp_pin_function func, int on) { enum i40e_ptp_gpio_pin_state *pin = NULL; struct i40e_ptp_pins_settings pins; int pin_index; /* Use PF0 to set pins. Return success for user space tools */ if (pf->hw.pf_id) return 0; /* Preserve previous state of pins that we don't touch */ pins.sdp3_2 = pf->ptp_pins->sdp3_2; pins.sdp3_3 = pf->ptp_pins->sdp3_3; pins.gpio_4 = pf->ptp_pins->gpio_4; /* To turn on the pin - find the corresponding one based on * the given index. To to turn the function off - find * which pin had it assigned. Don't use ptp_find_pin here * because it tries to lock the pincfg_mux which is locked by * ptp_pin_store() that calls here. */ if (on) { pin_index = ptp_find_pin(pf->ptp_clock, func, chan); if (pin_index < 0) return -EBUSY; switch (pin_index) { case SDP3_2: pin = &pins.sdp3_2; break; case SDP3_3: pin = &pins.sdp3_3; break; case GPIO_4: pin = &pins.gpio_4; break; default: return -EINVAL; } *pin = i40e_pin_state(chan, func); } else { pins.sdp3_2 = off; pins.sdp3_3 = off; pins.gpio_4 = off; } return i40e_ptp_set_pins(pf, &pins) ? -EINVAL : 0; } /** * i40e_ptp_feature_enable - Enable external clock pins * @ptp: The PTP clock structure * @rq: The PTP clock request structure * @on: To turn feature on/off * * Setting on/off PTP PPS feature for pin. **/ static int i40e_ptp_feature_enable(struct ptp_clock_info *ptp, struct ptp_clock_request *rq, int on) { struct i40e_pf *pf = container_of(ptp, struct i40e_pf, ptp_caps); enum ptp_pin_function func; unsigned int chan; /* TODO: Implement flags handling for EXTTS and PEROUT */ switch (rq->type) { case PTP_CLK_REQ_EXTTS: func = PTP_PF_EXTTS; chan = rq->extts.index; break; case PTP_CLK_REQ_PEROUT: func = PTP_PF_PEROUT; chan = rq->perout.index; break; case PTP_CLK_REQ_PPS: return i40e_pps_configure(ptp, rq, on); default: return -EOPNOTSUPP; } return i40e_ptp_enable_pin(pf, chan, func, on); } /** * i40e_ptp_get_rx_events - Read I40E_PRTTSYN_STAT_1 and latch events * @pf: the PF data structure * * This function reads I40E_PRTTSYN_STAT_1 and updates the corresponding timers * for noticed latch events. This allows the driver to keep track of the first * time a latch event was noticed which will be used to help clear out Rx * timestamps for packets that got dropped or lost. * * This function will return the current value of I40E_PRTTSYN_STAT_1 and is * expected to be called only while under the ptp_rx_lock. **/ static u32 i40e_ptp_get_rx_events(struct i40e_pf *pf) { struct i40e_hw *hw = &pf->hw; u32 prttsyn_stat, new_latch_events; int i; prttsyn_stat = rd32(hw, I40E_PRTTSYN_STAT_1); new_latch_events = prttsyn_stat & ~pf->latch_event_flags; /* Update the jiffies time for any newly latched timestamp. This * ensures that we store the time that we first discovered a timestamp * was latched by the hardware. The service task will later determine * if we should free the latch and drop that timestamp should too much * time pass. This flow ensures that we only update jiffies for new * events latched since the last time we checked, and not all events * currently latched, so that the service task accounting remains * accurate. */ for (i = 0; i < 4; i++) { if (new_latch_events & BIT(i)) pf->latch_events[i] = jiffies; } /* Finally, we store the current status of the Rx timestamp latches */ pf->latch_event_flags = prttsyn_stat; return prttsyn_stat; } /** * i40e_ptp_rx_hang - Detect error case when Rx timestamp registers are hung * @pf: The PF private data structure * * This watchdog task is scheduled to detect error case where hardware has * dropped an Rx packet that was timestamped when the ring is full. The * particular error is rare but leaves the device in a state unable to timestamp * any future packets. **/ void i40e_ptp_rx_hang(struct i40e_pf *pf) { struct i40e_hw *hw = &pf->hw; unsigned int i, cleared = 0; /* Since we cannot turn off the Rx timestamp logic if the device is * configured for Tx timestamping, we check if Rx timestamping is * configured. We don't want to spuriously warn about Rx timestamp * hangs if we don't care about the timestamps. */ if (!(pf->flags & I40E_FLAG_PTP) || !pf->ptp_rx) return; spin_lock_bh(&pf->ptp_rx_lock); /* Update current latch times for Rx events */ i40e_ptp_get_rx_events(pf); /* Check all the currently latched Rx events and see whether they have * been latched for over a second. It is assumed that any timestamp * should have been cleared within this time, or else it was captured * for a dropped frame that the driver never received. Thus, we will * clear any timestamp that has been latched for over 1 second. */ for (i = 0; i < 4; i++) { if ((pf->latch_event_flags & BIT(i)) && time_is_before_jiffies(pf->latch_events[i] + HZ)) { rd32(hw, I40E_PRTTSYN_RXTIME_H(i)); pf->latch_event_flags &= ~BIT(i); cleared++; } } spin_unlock_bh(&pf->ptp_rx_lock); /* Log a warning if more than 2 timestamps got dropped in the same * check. We don't want to warn about all drops because it can occur * in normal scenarios such as PTP frames on multicast addresses we * aren't listening to. However, administrator should know if this is * the reason packets aren't receiving timestamps. */ if (cleared > 2) dev_dbg(&pf->pdev->dev, "Dropped %d missed RXTIME timestamp events\n", cleared); /* Finally, update the rx_hwtstamp_cleared counter */ pf->rx_hwtstamp_cleared += cleared; } /** * i40e_ptp_tx_hang - Detect error case when Tx timestamp register is hung * @pf: The PF private data structure * * This watchdog task is run periodically to make sure that we clear the Tx * timestamp logic if we don't obtain a timestamp in a reasonable amount of * time. It is unexpected in the normal case but if it occurs it results in * permanently preventing timestamps of future packets. **/ void i40e_ptp_tx_hang(struct i40e_pf *pf) { struct sk_buff *skb; if (!(pf->flags & I40E_FLAG_PTP) || !pf->ptp_tx) return; /* Nothing to do if we're not already waiting for a timestamp */ if (!test_bit(__I40E_PTP_TX_IN_PROGRESS, pf->state)) return; /* We already have a handler routine which is run when we are notified * of a Tx timestamp in the hardware. If we don't get an interrupt * within a second it is reasonable to assume that we never will. */ if (time_is_before_jiffies(pf->ptp_tx_start + HZ)) { skb = pf->ptp_tx_skb; pf->ptp_tx_skb = NULL; clear_bit_unlock(__I40E_PTP_TX_IN_PROGRESS, pf->state); /* Free the skb after we clear the bitlock */ dev_kfree_skb_any(skb); pf->tx_hwtstamp_timeouts++; } } /** * i40e_ptp_tx_hwtstamp - Utility function which returns the Tx timestamp * @pf: Board private structure * * Read the value of the Tx timestamp from the registers, convert it into a * value consumable by the stack, and store that result into the shhwtstamps * struct before returning it up the stack. **/ void i40e_ptp_tx_hwtstamp(struct i40e_pf *pf) { struct skb_shared_hwtstamps shhwtstamps; struct sk_buff *skb = pf->ptp_tx_skb; struct i40e_hw *hw = &pf->hw; u32 hi, lo; u64 ns; if (!(pf->flags & I40E_FLAG_PTP) || !pf->ptp_tx) return; /* don't attempt to timestamp if we don't have an skb */ if (!pf->ptp_tx_skb) return; lo = rd32(hw, I40E_PRTTSYN_TXTIME_L); hi = rd32(hw, I40E_PRTTSYN_TXTIME_H); ns = (((u64)hi) << 32) | lo; i40e_ptp_convert_to_hwtstamp(&shhwtstamps, ns); /* Clear the bit lock as soon as possible after reading the register, * and prior to notifying the stack via skb_tstamp_tx(). Otherwise * applications might wake up and attempt to request another transmit * timestamp prior to the bit lock being cleared. */ pf->ptp_tx_skb = NULL; clear_bit_unlock(__I40E_PTP_TX_IN_PROGRESS, pf->state); /* Notify the stack and free the skb after we've unlocked */ skb_tstamp_tx(skb, &shhwtstamps); dev_kfree_skb_any(skb); } /** * i40e_ptp_rx_hwtstamp - Utility function which checks for an Rx timestamp * @pf: Board private structure * @skb: Particular skb to send timestamp with * @index: Index into the receive timestamp registers for the timestamp * * The XL710 receives a notification in the receive descriptor with an offset * into the set of RXTIME registers where the timestamp is for that skb. This * function goes and fetches the receive timestamp from that offset, if a valid * one exists. The RXTIME registers are in ns, so we must convert the result * first. **/ void i40e_ptp_rx_hwtstamp(struct i40e_pf *pf, struct sk_buff *skb, u8 index) { u32 prttsyn_stat, hi, lo; struct i40e_hw *hw; u64 ns; /* Since we cannot turn off the Rx timestamp logic if the device is * doing Tx timestamping, check if Rx timestamping is configured. */ if (!(pf->flags & I40E_FLAG_PTP) || !pf->ptp_rx) return; hw = &pf->hw; spin_lock_bh(&pf->ptp_rx_lock); /* Get current Rx events and update latch times */ prttsyn_stat = i40e_ptp_get_rx_events(pf); /* TODO: Should we warn about missing Rx timestamp event? */ if (!(prttsyn_stat & BIT(index))) { spin_unlock_bh(&pf->ptp_rx_lock); return; } /* Clear the latched event since we're about to read its register */ pf->latch_event_flags &= ~BIT(index); lo = rd32(hw, I40E_PRTTSYN_RXTIME_L(index)); hi = rd32(hw, I40E_PRTTSYN_RXTIME_H(index)); spin_unlock_bh(&pf->ptp_rx_lock); ns = (((u64)hi) << 32) | lo; i40e_ptp_convert_to_hwtstamp(skb_hwtstamps(skb), ns); } /** * i40e_ptp_set_increment - Utility function to update clock increment rate * @pf: Board private structure * * During a link change, the DMA frequency that drives the 1588 logic will * change. In order to keep the PRTTSYN_TIME registers in units of nanoseconds, * we must update the increment value per clock tick. **/ void i40e_ptp_set_increment(struct i40e_pf *pf) { struct i40e_link_status *hw_link_info; struct i40e_hw *hw = &pf->hw; u64 incval; u32 mult; hw_link_info = &hw->phy.link_info; i40e_aq_get_link_info(&pf->hw, true, NULL, NULL); switch (hw_link_info->link_speed) { case I40E_LINK_SPEED_10GB: mult = I40E_PTP_10GB_INCVAL_MULT; break; case I40E_LINK_SPEED_5GB: mult = I40E_PTP_5GB_INCVAL_MULT; break; case I40E_LINK_SPEED_1GB: mult = I40E_PTP_1GB_INCVAL_MULT; break; case I40E_LINK_SPEED_100MB: { static int warn_once; if (!warn_once) { dev_warn(&pf->pdev->dev, "1588 functionality is not supported at 100 Mbps. Stopping the PHC.\n"); warn_once++; } mult = 0; break; } case I40E_LINK_SPEED_40GB: default: mult = 1; break; } /* The increment value is calculated by taking the base 40GbE incvalue * and multiplying it by a factor based on the link speed. */ incval = I40E_PTP_40GB_INCVAL * mult; /* Write the new increment value into the increment register. The * hardware will not update the clock until both registers have been * written. */ wr32(hw, I40E_PRTTSYN_INC_L, incval & 0xFFFFFFFF); wr32(hw, I40E_PRTTSYN_INC_H, incval >> 32); /* Update the base adjustement value. */ WRITE_ONCE(pf->ptp_adj_mult, mult); smp_mb(); /* Force the above update. */ } /** * i40e_ptp_get_ts_config - ioctl interface to read the HW timestamping * @pf: Board private structure * @ifr: ioctl data * * Obtain the current hardware timestamping settigs as requested. To do this, * keep a shadow copy of the timestamp settings rather than attempting to * deconstruct it from the registers. **/ int i40e_ptp_get_ts_config(struct i40e_pf *pf, struct ifreq *ifr) { struct hwtstamp_config *config = &pf->tstamp_config; if (!(pf->flags & I40E_FLAG_PTP)) return -EOPNOTSUPP; return copy_to_user(ifr->ifr_data, config, sizeof(*config)) ? -EFAULT : 0; } /** * i40e_ptp_free_pins - free memory used by PTP pins * @pf: Board private structure * * Release memory allocated for PTP pins. **/ static void i40e_ptp_free_pins(struct i40e_pf *pf) { if (i40e_is_ptp_pin_dev(&pf->hw)) { kfree(pf->ptp_pins); kfree(pf->ptp_caps.pin_config); pf->ptp_pins = NULL; } } /** * i40e_ptp_set_pin_hw - Set HW GPIO pin * @hw: pointer to the hardware structure * @pin: pin index * @state: pin state * * Set status of GPIO pin for external clock handling. **/ static void i40e_ptp_set_pin_hw(struct i40e_hw *hw, unsigned int pin, enum i40e_ptp_gpio_pin_state state) { switch (state) { case off: wr32(hw, I40E_GLGEN_GPIO_CTL(pin), 0); break; case in_A: wr32(hw, I40E_GLGEN_GPIO_CTL(pin), I40E_GLGEN_GPIO_CTL_PORT_0_IN_TIMESYNC_0); break; case in_B: wr32(hw, I40E_GLGEN_GPIO_CTL(pin), I40E_GLGEN_GPIO_CTL_PORT_1_IN_TIMESYNC_0); break; case out_A: wr32(hw, I40E_GLGEN_GPIO_CTL(pin), I40E_GLGEN_GPIO_CTL_PORT_0_OUT_TIMESYNC_1); break; case out_B: wr32(hw, I40E_GLGEN_GPIO_CTL(pin), I40E_GLGEN_GPIO_CTL_PORT_1_OUT_TIMESYNC_1); break; default: break; } } /** * i40e_ptp_set_led_hw - Set HW GPIO led * @hw: pointer to the hardware structure * @led: led index * @state: led state * * Set status of GPIO led for external clock handling. **/ static void i40e_ptp_set_led_hw(struct i40e_hw *hw, unsigned int led, enum i40e_ptp_led_pin_state state) { switch (state) { case low: wr32(hw, I40E_GLGEN_GPIO_SET, I40E_GLGEN_GPIO_SET_DRV_SDP_DATA | led); break; case high: wr32(hw, I40E_GLGEN_GPIO_SET, I40E_GLGEN_GPIO_SET_DRV_SDP_DATA | I40E_GLGEN_GPIO_SET_SDP_DATA_HI | led); break; default: break; } } /** * i40e_ptp_init_leds_hw - init LEDs * @hw: pointer to a hardware structure * * Set initial state of LEDs **/ static void i40e_ptp_init_leds_hw(struct i40e_hw *hw) { wr32(hw, I40E_GLGEN_GPIO_CTL(I40E_LED2_0), I40E_GLGEN_GPIO_CTL_LED_INIT); wr32(hw, I40E_GLGEN_GPIO_CTL(I40E_LED2_1), I40E_GLGEN_GPIO_CTL_LED_INIT); wr32(hw, I40E_GLGEN_GPIO_CTL(I40E_LED3_0), I40E_GLGEN_GPIO_CTL_LED_INIT); wr32(hw, I40E_GLGEN_GPIO_CTL(I40E_LED3_1), I40E_GLGEN_GPIO_CTL_LED_INIT); } /** * i40e_ptp_set_pins_hw - Set HW GPIO pins * @pf: Board private structure * * This function sets GPIO pins for PTP **/ static void i40e_ptp_set_pins_hw(struct i40e_pf *pf) { const struct i40e_ptp_pins_settings *pins = pf->ptp_pins; struct i40e_hw *hw = &pf->hw; /* pin must be disabled before it may be used */ i40e_ptp_set_pin_hw(hw, I40E_SDP3_2, off); i40e_ptp_set_pin_hw(hw, I40E_SDP3_3, off); i40e_ptp_set_pin_hw(hw, I40E_GPIO_4, off); i40e_ptp_set_pin_hw(hw, I40E_SDP3_2, pins->sdp3_2); i40e_ptp_set_pin_hw(hw, I40E_SDP3_3, pins->sdp3_3); i40e_ptp_set_pin_hw(hw, I40E_GPIO_4, pins->gpio_4); i40e_ptp_set_led_hw(hw, I40E_LED2_0, pins->led2_0); i40e_ptp_set_led_hw(hw, I40E_LED2_1, pins->led2_1); i40e_ptp_set_led_hw(hw, I40E_LED3_0, pins->led3_0); i40e_ptp_set_led_hw(hw, I40E_LED3_1, pins->led3_1); dev_info(&pf->pdev->dev, "PTP configuration set to: SDP3_2: %s, SDP3_3: %s, GPIO_4: %s.\n", i40e_ptp_gpio_pin_state2str[pins->sdp3_2], i40e_ptp_gpio_pin_state2str[pins->sdp3_3], i40e_ptp_gpio_pin_state2str[pins->gpio_4]); } /** * i40e_ptp_set_pins - set PTP pins in HW * @pf: Board private structure * @pins: PTP pins to be applied * * Validate and set PTP pins in HW for specific PF. * Return 0 on success or negative value on error. **/ static int i40e_ptp_set_pins(struct i40e_pf *pf, struct i40e_ptp_pins_settings *pins) { enum i40e_can_set_pins_t pin_caps = i40e_can_set_pins(pf); int i = 0; if (pin_caps == CANT_DO_PINS) return -EOPNOTSUPP; else if (pin_caps == CAN_DO_PINS) return 0; if (pins->sdp3_2 == invalid) pins->sdp3_2 = pf->ptp_pins->sdp3_2; if (pins->sdp3_3 == invalid) pins->sdp3_3 = pf->ptp_pins->sdp3_3; if (pins->gpio_4 == invalid) pins->gpio_4 = pf->ptp_pins->gpio_4; while (i40e_ptp_pin_led_allowed_states[i].sdp3_2 != end) { if (pins->sdp3_2 == i40e_ptp_pin_led_allowed_states[i].sdp3_2 && pins->sdp3_3 == i40e_ptp_pin_led_allowed_states[i].sdp3_3 && pins->gpio_4 == i40e_ptp_pin_led_allowed_states[i].gpio_4) { pins->led2_0 = i40e_ptp_pin_led_allowed_states[i].led2_0; pins->led2_1 = i40e_ptp_pin_led_allowed_states[i].led2_1; pins->led3_0 = i40e_ptp_pin_led_allowed_states[i].led3_0; pins->led3_1 = i40e_ptp_pin_led_allowed_states[i].led3_1; break; } i++; } if (i40e_ptp_pin_led_allowed_states[i].sdp3_2 == end) { dev_warn(&pf->pdev->dev, "Unsupported PTP pin configuration: SDP3_2: %s, SDP3_3: %s, GPIO_4: %s.\n", i40e_ptp_gpio_pin_state2str[pins->sdp3_2], i40e_ptp_gpio_pin_state2str[pins->sdp3_3], i40e_ptp_gpio_pin_state2str[pins->gpio_4]); return -EPERM; } memcpy(pf->ptp_pins, pins, sizeof(*pins)); i40e_ptp_set_pins_hw(pf); i40_ptp_reset_timing_events(pf); return 0; } /** * i40e_ptp_alloc_pins - allocate PTP pins structure * @pf: Board private structure * * allocate PTP pins structure **/ int i40e_ptp_alloc_pins(struct i40e_pf *pf) { if (!i40e_is_ptp_pin_dev(&pf->hw)) return 0; pf->ptp_pins = kzalloc(sizeof(struct i40e_ptp_pins_settings), GFP_KERNEL); if (!pf->ptp_pins) { dev_warn(&pf->pdev->dev, "Cannot allocate memory for PTP pins structure.\n"); return -ENOMEM; } pf->ptp_pins->sdp3_2 = off; pf->ptp_pins->sdp3_3 = off; pf->ptp_pins->gpio_4 = off; pf->ptp_pins->led2_0 = high; pf->ptp_pins->led2_1 = high; pf->ptp_pins->led3_0 = high; pf->ptp_pins->led3_1 = high; /* Use PF0 to set pins in HW. Return success for user space tools */ if (pf->hw.pf_id) return 0; i40e_ptp_init_leds_hw(&pf->hw); i40e_ptp_set_pins_hw(pf); return 0; } /** * i40e_ptp_set_timestamp_mode - setup hardware for requested timestamp mode * @pf: Board private structure * @config: hwtstamp settings requested or saved * * Control hardware registers to enter the specific mode requested by the * user. Also used during reset path to ensure that timestamp settings are * maintained. * * Note: modifies config in place, and may update the requested mode to be * more broad if the specific filter is not directly supported. **/ static int i40e_ptp_set_timestamp_mode(struct i40e_pf *pf, struct hwtstamp_config *config) { struct i40e_hw *hw = &pf->hw; u32 tsyntype, regval; /* Selects external trigger to cause event */ regval = rd32(hw, I40E_PRTTSYN_AUX_0(0)); /* Bit 17:16 is EVNTLVL, 01B rising edge */ regval &= 0; regval |= (1 << I40E_PRTTSYN_AUX_0_EVNTLVL_SHIFT); /* regval: 0001 0000 0000 0000 0000 */ wr32(hw, I40E_PRTTSYN_AUX_0(0), regval); /* Enabel interrupts */ regval = rd32(hw, I40E_PRTTSYN_CTL0); regval |= 1 << I40E_PRTTSYN_CTL0_EVENT_INT_ENA_SHIFT; wr32(hw, I40E_PRTTSYN_CTL0, regval); INIT_WORK(&pf->ptp_extts0_work, i40e_ptp_extts0_work); switch (config->tx_type) { case HWTSTAMP_TX_OFF: pf->ptp_tx = false; break; case HWTSTAMP_TX_ON: pf->ptp_tx = true; break; default: return -ERANGE; } switch (config->rx_filter) { case HWTSTAMP_FILTER_NONE: pf->ptp_rx = false; /* We set the type to V1, but do not enable UDP packet * recognition. In this way, we should be as close to * disabling PTP Rx timestamps as possible since V1 packets * are always UDP, since L2 packets are a V2 feature. */ tsyntype = I40E_PRTTSYN_CTL1_TSYNTYPE_V1; break; case HWTSTAMP_FILTER_PTP_V1_L4_SYNC: case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ: case HWTSTAMP_FILTER_PTP_V1_L4_EVENT: if (!(pf->hw_features & I40E_HW_PTP_L4_CAPABLE)) return -ERANGE; pf->ptp_rx = true; tsyntype = I40E_PRTTSYN_CTL1_V1MESSTYPE0_MASK | I40E_PRTTSYN_CTL1_TSYNTYPE_V1 | I40E_PRTTSYN_CTL1_UDP_ENA_MASK; config->rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT; break; case HWTSTAMP_FILTER_PTP_V2_EVENT: case HWTSTAMP_FILTER_PTP_V2_L4_EVENT: case HWTSTAMP_FILTER_PTP_V2_SYNC: case HWTSTAMP_FILTER_PTP_V2_L4_SYNC: case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ: case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ: if (!(pf->hw_features & I40E_HW_PTP_L4_CAPABLE)) return -ERANGE; fallthrough; case HWTSTAMP_FILTER_PTP_V2_L2_EVENT: case HWTSTAMP_FILTER_PTP_V2_L2_SYNC: case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ: pf->ptp_rx = true; tsyntype = I40E_PRTTSYN_CTL1_V2MESSTYPE0_MASK | I40E_PRTTSYN_CTL1_TSYNTYPE_V2; if (pf->hw_features & I40E_HW_PTP_L4_CAPABLE) { tsyntype |= I40E_PRTTSYN_CTL1_UDP_ENA_MASK; config->rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT; } else { config->rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_EVENT; } break; case HWTSTAMP_FILTER_NTP_ALL: case HWTSTAMP_FILTER_ALL: default: return -ERANGE; } /* Clear out all 1588-related registers to clear and unlatch them. */ spin_lock_bh(&pf->ptp_rx_lock); rd32(hw, I40E_PRTTSYN_STAT_0); rd32(hw, I40E_PRTTSYN_TXTIME_H); rd32(hw, I40E_PRTTSYN_RXTIME_H(0)); rd32(hw, I40E_PRTTSYN_RXTIME_H(1)); rd32(hw, I40E_PRTTSYN_RXTIME_H(2)); rd32(hw, I40E_PRTTSYN_RXTIME_H(3)); pf->latch_event_flags = 0; spin_unlock_bh(&pf->ptp_rx_lock); /* Enable/disable the Tx timestamp interrupt based on user input. */ regval = rd32(hw, I40E_PRTTSYN_CTL0); if (pf->ptp_tx) regval |= I40E_PRTTSYN_CTL0_TXTIME_INT_ENA_MASK; else regval &= ~I40E_PRTTSYN_CTL0_TXTIME_INT_ENA_MASK; wr32(hw, I40E_PRTTSYN_CTL0, regval); regval = rd32(hw, I40E_PFINT_ICR0_ENA); if (pf->ptp_tx) regval |= I40E_PFINT_ICR0_ENA_TIMESYNC_MASK; else regval &= ~I40E_PFINT_ICR0_ENA_TIMESYNC_MASK; wr32(hw, I40E_PFINT_ICR0_ENA, regval); /* Although there is no simple on/off switch for Rx, we "disable" Rx * timestamps by setting to V1 only mode and clear the UDP * recognition. This ought to disable all PTP Rx timestamps as V1 * packets are always over UDP. Note that software is configured to * ignore Rx timestamps via the pf->ptp_rx flag. */ regval = rd32(hw, I40E_PRTTSYN_CTL1); /* clear everything but the enable bit */ regval &= I40E_PRTTSYN_CTL1_TSYNENA_MASK; /* now enable bits for desired Rx timestamps */ regval |= tsyntype; wr32(hw, I40E_PRTTSYN_CTL1, regval); return 0; } /** * i40e_ptp_set_ts_config - ioctl interface to control the HW timestamping * @pf: Board private structure * @ifr: ioctl data * * Respond to the user filter requests and make the appropriate hardware * changes here. The XL710 cannot support splitting of the Tx/Rx timestamping * logic, so keep track in software of whether to indicate these timestamps * or not. * * It is permissible to "upgrade" the user request to a broader filter, as long * as the user receives the timestamps they care about and the user is notified * the filter has been broadened. **/ int i40e_ptp_set_ts_config(struct i40e_pf *pf, struct ifreq *ifr) { struct hwtstamp_config config; int err; if (!(pf->flags & I40E_FLAG_PTP)) return -EOPNOTSUPP; if (copy_from_user(&config, ifr->ifr_data, sizeof(config))) return -EFAULT; err = i40e_ptp_set_timestamp_mode(pf, &config); if (err) return err; /* save these settings for future reference */ pf->tstamp_config = config; return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ? -EFAULT : 0; } /** * i40e_init_pin_config - initialize pins. * @pf: private board structure * * Initialize pins for external clock source. * Return 0 on success or error code on failure. **/ static int i40e_init_pin_config(struct i40e_pf *pf) { int i; pf->ptp_caps.n_pins = 3; pf->ptp_caps.n_ext_ts = 2; pf->ptp_caps.pps = 1; pf->ptp_caps.n_per_out = 2; pf->ptp_caps.pin_config = kcalloc(pf->ptp_caps.n_pins, sizeof(*pf->ptp_caps.pin_config), GFP_KERNEL); if (!pf->ptp_caps.pin_config) return -ENOMEM; for (i = 0; i < pf->ptp_caps.n_pins; i++) { snprintf(pf->ptp_caps.pin_config[i].name, sizeof(pf->ptp_caps.pin_config[i].name), "%s", sdp_desc[i].name); pf->ptp_caps.pin_config[i].index = sdp_desc[i].index; pf->ptp_caps.pin_config[i].func = PTP_PF_NONE; pf->ptp_caps.pin_config[i].chan = sdp_desc[i].chan; } pf->ptp_caps.verify = i40e_ptp_verify; pf->ptp_caps.enable = i40e_ptp_feature_enable; pf->ptp_caps.pps = 1; return 0; } /** * i40e_ptp_create_clock - Create PTP clock device for userspace * @pf: Board private structure * * This function creates a new PTP clock device. It only creates one if we * don't already have one, so it is safe to call. Will return error if it * can't create one, but success if we already have a device. Should be used * by i40e_ptp_init to create clock initially, and prevent global resets from * creating new clock devices. **/ static long i40e_ptp_create_clock(struct i40e_pf *pf) { /* no need to create a clock device if we already have one */ if (!IS_ERR_OR_NULL(pf->ptp_clock)) return 0; strscpy(pf->ptp_caps.name, i40e_driver_name, sizeof(pf->ptp_caps.name) - 1); pf->ptp_caps.owner = THIS_MODULE; pf->ptp_caps.max_adj = 999999999; pf->ptp_caps.adjfine = i40e_ptp_adjfine; pf->ptp_caps.adjtime = i40e_ptp_adjtime; pf->ptp_caps.gettimex64 = i40e_ptp_gettimex; pf->ptp_caps.settime64 = i40e_ptp_settime; if (i40e_is_ptp_pin_dev(&pf->hw)) { int err = i40e_init_pin_config(pf); if (err) return err; } /* Attempt to register the clock before enabling the hardware. */ pf->ptp_clock = ptp_clock_register(&pf->ptp_caps, &pf->pdev->dev); if (IS_ERR(pf->ptp_clock)) return PTR_ERR(pf->ptp_clock); /* clear the hwtstamp settings here during clock create, instead of * during regular init, so that we can maintain settings across a * reset or suspend. */ pf->tstamp_config.rx_filter = HWTSTAMP_FILTER_NONE; pf->tstamp_config.tx_type = HWTSTAMP_TX_OFF; /* Set the previous "reset" time to the current Kernel clock time */ ktime_get_real_ts64(&pf->ptp_prev_hw_time); pf->ptp_reset_start = ktime_get(); return 0; } /** * i40e_ptp_save_hw_time - Save the current PTP time as ptp_prev_hw_time * @pf: Board private structure * * Read the current PTP time and save it into pf->ptp_prev_hw_time. This should * be called at the end of preparing to reset, just before hardware reset * occurs, in order to preserve the PTP time as close as possible across * resets. */ void i40e_ptp_save_hw_time(struct i40e_pf *pf) { /* don't try to access the PTP clock if it's not enabled */ if (!(pf->flags & I40E_FLAG_PTP)) return; i40e_ptp_gettimex(&pf->ptp_caps, &pf->ptp_prev_hw_time, NULL); /* Get a monotonic starting time for this reset */ pf->ptp_reset_start = ktime_get(); } /** * i40e_ptp_restore_hw_time - Restore the ptp_prev_hw_time + delta to PTP regs * @pf: Board private structure * * Restore the PTP hardware clock registers. We previously cached the PTP * hardware time as pf->ptp_prev_hw_time. To be as accurate as possible, * update this value based on the time delta since the time was saved, using * CLOCK_MONOTONIC (via ktime_get()) to calculate the time difference. * * This ensures that the hardware clock is restored to nearly what it should * have been if a reset had not occurred. */ void i40e_ptp_restore_hw_time(struct i40e_pf *pf) { ktime_t delta = ktime_sub(ktime_get(), pf->ptp_reset_start); /* Update the previous HW time with the ktime delta */ timespec64_add_ns(&pf->ptp_prev_hw_time, ktime_to_ns(delta)); /* Restore the hardware clock registers */ i40e_ptp_settime(&pf->ptp_caps, &pf->ptp_prev_hw_time); } /** * i40e_ptp_init - Initialize the 1588 support after device probe or reset * @pf: Board private structure * * This function sets device up for 1588 support. The first time it is run, it * will create a PHC clock device. It does not create a clock device if one * already exists. It also reconfigures the device after a reset. * * The first time a clock is created, i40e_ptp_create_clock will set * pf->ptp_prev_hw_time to the current system time. During resets, it is * expected that this timespec will be set to the last known PTP clock time, * in order to preserve the clock time as close as possible across a reset. **/ void i40e_ptp_init(struct i40e_pf *pf) { struct net_device *netdev = pf->vsi[pf->lan_vsi]->netdev; struct i40e_hw *hw = &pf->hw; u32 pf_id; long err; /* Only one PF is assigned to control 1588 logic per port. Do not * enable any support for PFs not assigned via PRTTSYN_CTL0.PF_ID */ pf_id = (rd32(hw, I40E_PRTTSYN_CTL0) & I40E_PRTTSYN_CTL0_PF_ID_MASK) >> I40E_PRTTSYN_CTL0_PF_ID_SHIFT; if (hw->pf_id != pf_id) { pf->flags &= ~I40E_FLAG_PTP; dev_info(&pf->pdev->dev, "%s: PTP not supported on %s\n", __func__, netdev->name); return; } mutex_init(&pf->tmreg_lock); spin_lock_init(&pf->ptp_rx_lock); /* ensure we have a clock device */ err = i40e_ptp_create_clock(pf); if (err) { pf->ptp_clock = NULL; dev_err(&pf->pdev->dev, "%s: ptp_clock_register failed\n", __func__); } else if (pf->ptp_clock) { u32 regval; if (pf->hw.debug_mask & I40E_DEBUG_LAN) dev_info(&pf->pdev->dev, "PHC enabled\n"); pf->flags |= I40E_FLAG_PTP; /* Ensure the clocks are running. */ regval = rd32(hw, I40E_PRTTSYN_CTL0); regval |= I40E_PRTTSYN_CTL0_TSYNENA_MASK; wr32(hw, I40E_PRTTSYN_CTL0, regval); regval = rd32(hw, I40E_PRTTSYN_CTL1); regval |= I40E_PRTTSYN_CTL1_TSYNENA_MASK; wr32(hw, I40E_PRTTSYN_CTL1, regval); /* Set the increment value per clock tick. */ i40e_ptp_set_increment(pf); /* reset timestamping mode */ i40e_ptp_set_timestamp_mode(pf, &pf->tstamp_config); /* Restore the clock time based on last known value */ i40e_ptp_restore_hw_time(pf); } i40e_ptp_set_1pps_signal_hw(pf); } /** * i40e_ptp_stop - Disable the driver/hardware support and unregister the PHC * @pf: Board private structure * * This function handles the cleanup work required from the initialization by * clearing out the important information and unregistering the PHC. **/ void i40e_ptp_stop(struct i40e_pf *pf) { struct i40e_hw *hw = &pf->hw; u32 regval; pf->flags &= ~I40E_FLAG_PTP; pf->ptp_tx = false; pf->ptp_rx = false; if (pf->ptp_tx_skb) { struct sk_buff *skb = pf->ptp_tx_skb; pf->ptp_tx_skb = NULL; clear_bit_unlock(__I40E_PTP_TX_IN_PROGRESS, pf->state); dev_kfree_skb_any(skb); } if (pf->ptp_clock) { ptp_clock_unregister(pf->ptp_clock); pf->ptp_clock = NULL; dev_info(&pf->pdev->dev, "%s: removed PHC on %s\n", __func__, pf->vsi[pf->lan_vsi]->netdev->name); } if (i40e_is_ptp_pin_dev(&pf->hw)) { i40e_ptp_set_pin_hw(hw, I40E_SDP3_2, off); i40e_ptp_set_pin_hw(hw, I40E_SDP3_3, off); i40e_ptp_set_pin_hw(hw, I40E_GPIO_4, off); } regval = rd32(hw, I40E_PRTTSYN_AUX_0(0)); regval &= ~I40E_PRTTSYN_AUX_0_PTPFLAG_MASK; wr32(hw, I40E_PRTTSYN_AUX_0(0), regval); /* Disable interrupts */ regval = rd32(hw, I40E_PRTTSYN_CTL0); regval &= ~I40E_PRTTSYN_CTL0_EVENT_INT_ENA_MASK; wr32(hw, I40E_PRTTSYN_CTL0, regval); i40e_ptp_free_pins(pf); }
linux-master
drivers/net/ethernet/intel/i40e/i40e_ptp.c
// SPDX-License-Identifier: GPL-2.0 /* Copyright(c) 2013 - 2018 Intel Corporation. */ #ifdef CONFIG_DEBUG_FS #include <linux/fs.h> #include <linux/debugfs.h> #include "i40e.h" static struct dentry *i40e_dbg_root; enum ring_type { RING_TYPE_RX, RING_TYPE_TX, RING_TYPE_XDP }; /** * i40e_dbg_find_vsi - searches for the vsi with the given seid * @pf: the PF structure to search for the vsi * @seid: seid of the vsi it is searching for **/ static struct i40e_vsi *i40e_dbg_find_vsi(struct i40e_pf *pf, int seid) { int i; if (seid < 0) dev_info(&pf->pdev->dev, "%d: bad seid\n", seid); else for (i = 0; i < pf->num_alloc_vsi; i++) if (pf->vsi[i] && (pf->vsi[i]->seid == seid)) return pf->vsi[i]; return NULL; } /** * i40e_dbg_find_veb - searches for the veb with the given seid * @pf: the PF structure to search for the veb * @seid: seid of the veb it is searching for **/ static struct i40e_veb *i40e_dbg_find_veb(struct i40e_pf *pf, int seid) { int i; for (i = 0; i < I40E_MAX_VEB; i++) if (pf->veb[i] && pf->veb[i]->seid == seid) return pf->veb[i]; return NULL; } /************************************************************** * command * The command entry in debugfs is for giving the driver commands * to be executed - these may be for changing the internal switch * setup, adding or removing filters, or other things. Many of * these will be useful for some forms of unit testing. **************************************************************/ static char i40e_dbg_command_buf[256] = ""; /** * i40e_dbg_command_read - read for command datum * @filp: the opened file * @buffer: where to write the data for the user to read * @count: the size of the user's buffer * @ppos: file position offset **/ static ssize_t i40e_dbg_command_read(struct file *filp, char __user *buffer, size_t count, loff_t *ppos) { struct i40e_pf *pf = filp->private_data; int bytes_not_copied; int buf_size = 256; char *buf; int len; /* don't allow partial reads */ if (*ppos != 0) return 0; if (count < buf_size) return -ENOSPC; buf = kzalloc(buf_size, GFP_KERNEL); if (!buf) return -ENOSPC; len = snprintf(buf, buf_size, "%s: %s\n", pf->vsi[pf->lan_vsi]->netdev->name, i40e_dbg_command_buf); bytes_not_copied = copy_to_user(buffer, buf, len); kfree(buf); if (bytes_not_copied) return -EFAULT; *ppos = len; return len; } static char *i40e_filter_state_string[] = { "INVALID", "NEW", "ACTIVE", "FAILED", "REMOVE", }; /** * i40e_dbg_dump_vsi_seid - handles dump vsi seid write into command datum * @pf: the i40e_pf created in command write * @seid: the seid the user put in **/ static void i40e_dbg_dump_vsi_seid(struct i40e_pf *pf, int seid) { struct rtnl_link_stats64 *nstat; struct i40e_mac_filter *f; struct i40e_vsi *vsi; int i, bkt; vsi = i40e_dbg_find_vsi(pf, seid); if (!vsi) { dev_info(&pf->pdev->dev, "dump %d: seid not found\n", seid); return; } dev_info(&pf->pdev->dev, "vsi seid %d\n", seid); if (vsi->netdev) { struct net_device *nd = vsi->netdev; dev_info(&pf->pdev->dev, " netdev: name = %s, state = %lu, flags = 0x%08x\n", nd->name, nd->state, nd->flags); dev_info(&pf->pdev->dev, " features = 0x%08lx\n", (unsigned long int)nd->features); dev_info(&pf->pdev->dev, " hw_features = 0x%08lx\n", (unsigned long int)nd->hw_features); dev_info(&pf->pdev->dev, " vlan_features = 0x%08lx\n", (unsigned long int)nd->vlan_features); } dev_info(&pf->pdev->dev, " flags = 0x%08lx, netdev_registered = %i, current_netdev_flags = 0x%04x\n", vsi->flags, vsi->netdev_registered, vsi->current_netdev_flags); for (i = 0; i < BITS_TO_LONGS(__I40E_VSI_STATE_SIZE__); i++) dev_info(&pf->pdev->dev, " state[%d] = %08lx\n", i, vsi->state[i]); if (vsi == pf->vsi[pf->lan_vsi]) dev_info(&pf->pdev->dev, " MAC address: %pM SAN MAC: %pM Port MAC: %pM\n", pf->hw.mac.addr, pf->hw.mac.san_addr, pf->hw.mac.port_addr); hash_for_each(vsi->mac_filter_hash, bkt, f, hlist) { dev_info(&pf->pdev->dev, " mac_filter_hash: %pM vid=%d, state %s\n", f->macaddr, f->vlan, i40e_filter_state_string[f->state]); } dev_info(&pf->pdev->dev, " active_filters %u, promisc_threshold %u, overflow promisc %s\n", vsi->active_filters, vsi->promisc_threshold, (test_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state) ? "ON" : "OFF")); nstat = i40e_get_vsi_stats_struct(vsi); dev_info(&pf->pdev->dev, " net_stats: rx_packets = %lu, rx_bytes = %lu, rx_errors = %lu, rx_dropped = %lu\n", (unsigned long int)nstat->rx_packets, (unsigned long int)nstat->rx_bytes, (unsigned long int)nstat->rx_errors, (unsigned long int)nstat->rx_dropped); dev_info(&pf->pdev->dev, " net_stats: tx_packets = %lu, tx_bytes = %lu, tx_errors = %lu, tx_dropped = %lu\n", (unsigned long int)nstat->tx_packets, (unsigned long int)nstat->tx_bytes, (unsigned long int)nstat->tx_errors, (unsigned long int)nstat->tx_dropped); dev_info(&pf->pdev->dev, " net_stats: multicast = %lu, collisions = %lu\n", (unsigned long int)nstat->multicast, (unsigned long int)nstat->collisions); dev_info(&pf->pdev->dev, " net_stats: rx_length_errors = %lu, rx_over_errors = %lu, rx_crc_errors = %lu\n", (unsigned long int)nstat->rx_length_errors, (unsigned long int)nstat->rx_over_errors, (unsigned long int)nstat->rx_crc_errors); dev_info(&pf->pdev->dev, " net_stats: rx_frame_errors = %lu, rx_fifo_errors = %lu, rx_missed_errors = %lu\n", (unsigned long int)nstat->rx_frame_errors, (unsigned long int)nstat->rx_fifo_errors, (unsigned long int)nstat->rx_missed_errors); dev_info(&pf->pdev->dev, " net_stats: tx_aborted_errors = %lu, tx_carrier_errors = %lu, tx_fifo_errors = %lu\n", (unsigned long int)nstat->tx_aborted_errors, (unsigned long int)nstat->tx_carrier_errors, (unsigned long int)nstat->tx_fifo_errors); dev_info(&pf->pdev->dev, " net_stats: tx_heartbeat_errors = %lu, tx_window_errors = %lu\n", (unsigned long int)nstat->tx_heartbeat_errors, (unsigned long int)nstat->tx_window_errors); dev_info(&pf->pdev->dev, " net_stats: rx_compressed = %lu, tx_compressed = %lu\n", (unsigned long int)nstat->rx_compressed, (unsigned long int)nstat->tx_compressed); dev_info(&pf->pdev->dev, " net_stats_offsets: rx_packets = %lu, rx_bytes = %lu, rx_errors = %lu, rx_dropped = %lu\n", (unsigned long int)vsi->net_stats_offsets.rx_packets, (unsigned long int)vsi->net_stats_offsets.rx_bytes, (unsigned long int)vsi->net_stats_offsets.rx_errors, (unsigned long int)vsi->net_stats_offsets.rx_dropped); dev_info(&pf->pdev->dev, " net_stats_offsets: tx_packets = %lu, tx_bytes = %lu, tx_errors = %lu, tx_dropped = %lu\n", (unsigned long int)vsi->net_stats_offsets.tx_packets, (unsigned long int)vsi->net_stats_offsets.tx_bytes, (unsigned long int)vsi->net_stats_offsets.tx_errors, (unsigned long int)vsi->net_stats_offsets.tx_dropped); dev_info(&pf->pdev->dev, " net_stats_offsets: multicast = %lu, collisions = %lu\n", (unsigned long int)vsi->net_stats_offsets.multicast, (unsigned long int)vsi->net_stats_offsets.collisions); dev_info(&pf->pdev->dev, " net_stats_offsets: rx_length_errors = %lu, rx_over_errors = %lu, rx_crc_errors = %lu\n", (unsigned long int)vsi->net_stats_offsets.rx_length_errors, (unsigned long int)vsi->net_stats_offsets.rx_over_errors, (unsigned long int)vsi->net_stats_offsets.rx_crc_errors); dev_info(&pf->pdev->dev, " net_stats_offsets: rx_frame_errors = %lu, rx_fifo_errors = %lu, rx_missed_errors = %lu\n", (unsigned long int)vsi->net_stats_offsets.rx_frame_errors, (unsigned long int)vsi->net_stats_offsets.rx_fifo_errors, (unsigned long int)vsi->net_stats_offsets.rx_missed_errors); dev_info(&pf->pdev->dev, " net_stats_offsets: tx_aborted_errors = %lu, tx_carrier_errors = %lu, tx_fifo_errors = %lu\n", (unsigned long int)vsi->net_stats_offsets.tx_aborted_errors, (unsigned long int)vsi->net_stats_offsets.tx_carrier_errors, (unsigned long int)vsi->net_stats_offsets.tx_fifo_errors); dev_info(&pf->pdev->dev, " net_stats_offsets: tx_heartbeat_errors = %lu, tx_window_errors = %lu\n", (unsigned long int)vsi->net_stats_offsets.tx_heartbeat_errors, (unsigned long int)vsi->net_stats_offsets.tx_window_errors); dev_info(&pf->pdev->dev, " net_stats_offsets: rx_compressed = %lu, tx_compressed = %lu\n", (unsigned long int)vsi->net_stats_offsets.rx_compressed, (unsigned long int)vsi->net_stats_offsets.tx_compressed); dev_info(&pf->pdev->dev, " tx_restart = %llu, tx_busy = %llu, rx_buf_failed = %llu, rx_page_failed = %llu\n", vsi->tx_restart, vsi->tx_busy, vsi->rx_buf_failed, vsi->rx_page_failed); rcu_read_lock(); for (i = 0; i < vsi->num_queue_pairs; i++) { struct i40e_ring *rx_ring = READ_ONCE(vsi->rx_rings[i]); if (!rx_ring) continue; dev_info(&pf->pdev->dev, " rx_rings[%i]: state = %lu, queue_index = %d, reg_idx = %d\n", i, *rx_ring->state, rx_ring->queue_index, rx_ring->reg_idx); dev_info(&pf->pdev->dev, " rx_rings[%i]: rx_buf_len = %d\n", i, rx_ring->rx_buf_len); dev_info(&pf->pdev->dev, " rx_rings[%i]: next_to_use = %d, next_to_clean = %d, ring_active = %i\n", i, rx_ring->next_to_use, rx_ring->next_to_clean, rx_ring->ring_active); dev_info(&pf->pdev->dev, " rx_rings[%i]: rx_stats: packets = %lld, bytes = %lld, non_eop_descs = %lld\n", i, rx_ring->stats.packets, rx_ring->stats.bytes, rx_ring->rx_stats.non_eop_descs); dev_info(&pf->pdev->dev, " rx_rings[%i]: rx_stats: alloc_page_failed = %lld, alloc_buff_failed = %lld\n", i, rx_ring->rx_stats.alloc_page_failed, rx_ring->rx_stats.alloc_buff_failed); dev_info(&pf->pdev->dev, " rx_rings[%i]: rx_stats: realloc_count = 0, page_reuse_count = %lld\n", i, rx_ring->rx_stats.page_reuse_count); dev_info(&pf->pdev->dev, " rx_rings[%i]: size = %i\n", i, rx_ring->size); dev_info(&pf->pdev->dev, " rx_rings[%i]: itr_setting = %d (%s)\n", i, rx_ring->itr_setting, ITR_IS_DYNAMIC(rx_ring->itr_setting) ? "dynamic" : "fixed"); } for (i = 0; i < vsi->num_queue_pairs; i++) { struct i40e_ring *tx_ring = READ_ONCE(vsi->tx_rings[i]); if (!tx_ring) continue; dev_info(&pf->pdev->dev, " tx_rings[%i]: state = %lu, queue_index = %d, reg_idx = %d\n", i, *tx_ring->state, tx_ring->queue_index, tx_ring->reg_idx); dev_info(&pf->pdev->dev, " tx_rings[%i]: next_to_use = %d, next_to_clean = %d, ring_active = %i\n", i, tx_ring->next_to_use, tx_ring->next_to_clean, tx_ring->ring_active); dev_info(&pf->pdev->dev, " tx_rings[%i]: tx_stats: packets = %lld, bytes = %lld, restart_queue = %lld\n", i, tx_ring->stats.packets, tx_ring->stats.bytes, tx_ring->tx_stats.restart_queue); dev_info(&pf->pdev->dev, " tx_rings[%i]: tx_stats: tx_busy = %lld, tx_done_old = %lld, tx_stopped = %lld\n", i, tx_ring->tx_stats.tx_busy, tx_ring->tx_stats.tx_done_old, tx_ring->tx_stats.tx_stopped); dev_info(&pf->pdev->dev, " tx_rings[%i]: size = %i\n", i, tx_ring->size); dev_info(&pf->pdev->dev, " tx_rings[%i]: DCB tc = %d\n", i, tx_ring->dcb_tc); dev_info(&pf->pdev->dev, " tx_rings[%i]: itr_setting = %d (%s)\n", i, tx_ring->itr_setting, ITR_IS_DYNAMIC(tx_ring->itr_setting) ? "dynamic" : "fixed"); } if (i40e_enabled_xdp_vsi(vsi)) { for (i = 0; i < vsi->num_queue_pairs; i++) { struct i40e_ring *xdp_ring = READ_ONCE(vsi->xdp_rings[i]); if (!xdp_ring) continue; dev_info(&pf->pdev->dev, " xdp_rings[%i]: state = %lu, queue_index = %d, reg_idx = %d\n", i, *xdp_ring->state, xdp_ring->queue_index, xdp_ring->reg_idx); dev_info(&pf->pdev->dev, " xdp_rings[%i]: next_to_use = %d, next_to_clean = %d, ring_active = %i\n", i, xdp_ring->next_to_use, xdp_ring->next_to_clean, xdp_ring->ring_active); dev_info(&pf->pdev->dev, " xdp_rings[%i]: tx_stats: packets = %lld, bytes = %lld, restart_queue = %lld\n", i, xdp_ring->stats.packets, xdp_ring->stats.bytes, xdp_ring->tx_stats.restart_queue); dev_info(&pf->pdev->dev, " xdp_rings[%i]: tx_stats: tx_busy = %lld, tx_done_old = %lld\n", i, xdp_ring->tx_stats.tx_busy, xdp_ring->tx_stats.tx_done_old); dev_info(&pf->pdev->dev, " xdp_rings[%i]: size = %i\n", i, xdp_ring->size); dev_info(&pf->pdev->dev, " xdp_rings[%i]: DCB tc = %d\n", i, xdp_ring->dcb_tc); dev_info(&pf->pdev->dev, " xdp_rings[%i]: itr_setting = %d (%s)\n", i, xdp_ring->itr_setting, ITR_IS_DYNAMIC(xdp_ring->itr_setting) ? "dynamic" : "fixed"); } } rcu_read_unlock(); dev_info(&pf->pdev->dev, " work_limit = %d\n", vsi->work_limit); dev_info(&pf->pdev->dev, " max_frame = %d, rx_buf_len = %d dtype = %d\n", vsi->max_frame, vsi->rx_buf_len, 0); dev_info(&pf->pdev->dev, " num_q_vectors = %i, base_vector = %i\n", vsi->num_q_vectors, vsi->base_vector); dev_info(&pf->pdev->dev, " seid = %d, id = %d, uplink_seid = %d\n", vsi->seid, vsi->id, vsi->uplink_seid); dev_info(&pf->pdev->dev, " base_queue = %d, num_queue_pairs = %d, num_tx_desc = %d, num_rx_desc = %d\n", vsi->base_queue, vsi->num_queue_pairs, vsi->num_tx_desc, vsi->num_rx_desc); dev_info(&pf->pdev->dev, " type = %i\n", vsi->type); if (vsi->type == I40E_VSI_SRIOV) dev_info(&pf->pdev->dev, " VF ID = %i\n", vsi->vf_id); dev_info(&pf->pdev->dev, " info: valid_sections = 0x%04x, switch_id = 0x%04x\n", vsi->info.valid_sections, vsi->info.switch_id); dev_info(&pf->pdev->dev, " info: sw_reserved[] = 0x%02x 0x%02x\n", vsi->info.sw_reserved[0], vsi->info.sw_reserved[1]); dev_info(&pf->pdev->dev, " info: sec_flags = 0x%02x, sec_reserved = 0x%02x\n", vsi->info.sec_flags, vsi->info.sec_reserved); dev_info(&pf->pdev->dev, " info: pvid = 0x%04x, fcoe_pvid = 0x%04x, port_vlan_flags = 0x%02x\n", vsi->info.pvid, vsi->info.fcoe_pvid, vsi->info.port_vlan_flags); dev_info(&pf->pdev->dev, " info: pvlan_reserved[] = 0x%02x 0x%02x 0x%02x\n", vsi->info.pvlan_reserved[0], vsi->info.pvlan_reserved[1], vsi->info.pvlan_reserved[2]); dev_info(&pf->pdev->dev, " info: ingress_table = 0x%08x, egress_table = 0x%08x\n", vsi->info.ingress_table, vsi->info.egress_table); dev_info(&pf->pdev->dev, " info: cas_pv_stag = 0x%04x, cas_pv_flags= 0x%02x, cas_pv_reserved = 0x%02x\n", vsi->info.cas_pv_tag, vsi->info.cas_pv_flags, vsi->info.cas_pv_reserved); dev_info(&pf->pdev->dev, " info: queue_mapping[0..7 ] = 0x%04x 0x%04x 0x%04x 0x%04x 0x%04x 0x%04x 0x%04x 0x%04x\n", vsi->info.queue_mapping[0], vsi->info.queue_mapping[1], vsi->info.queue_mapping[2], vsi->info.queue_mapping[3], vsi->info.queue_mapping[4], vsi->info.queue_mapping[5], vsi->info.queue_mapping[6], vsi->info.queue_mapping[7]); dev_info(&pf->pdev->dev, " info: queue_mapping[8..15] = 0x%04x 0x%04x 0x%04x 0x%04x 0x%04x 0x%04x 0x%04x 0x%04x\n", vsi->info.queue_mapping[8], vsi->info.queue_mapping[9], vsi->info.queue_mapping[10], vsi->info.queue_mapping[11], vsi->info.queue_mapping[12], vsi->info.queue_mapping[13], vsi->info.queue_mapping[14], vsi->info.queue_mapping[15]); dev_info(&pf->pdev->dev, " info: tc_mapping[] = 0x%04x 0x%04x 0x%04x 0x%04x 0x%04x 0x%04x 0x%04x 0x%04x\n", vsi->info.tc_mapping[0], vsi->info.tc_mapping[1], vsi->info.tc_mapping[2], vsi->info.tc_mapping[3], vsi->info.tc_mapping[4], vsi->info.tc_mapping[5], vsi->info.tc_mapping[6], vsi->info.tc_mapping[7]); dev_info(&pf->pdev->dev, " info: queueing_opt_flags = 0x%02x queueing_opt_reserved[0..2] = 0x%02x 0x%02x 0x%02x\n", vsi->info.queueing_opt_flags, vsi->info.queueing_opt_reserved[0], vsi->info.queueing_opt_reserved[1], vsi->info.queueing_opt_reserved[2]); dev_info(&pf->pdev->dev, " info: up_enable_bits = 0x%02x\n", vsi->info.up_enable_bits); dev_info(&pf->pdev->dev, " info: sched_reserved = 0x%02x, outer_up_table = 0x%04x\n", vsi->info.sched_reserved, vsi->info.outer_up_table); dev_info(&pf->pdev->dev, " info: cmd_reserved[] = 0x%02x 0x%02x 0x%02x 0x0%02x 0x%02x 0x%02x 0x%02x 0x0%02x\n", vsi->info.cmd_reserved[0], vsi->info.cmd_reserved[1], vsi->info.cmd_reserved[2], vsi->info.cmd_reserved[3], vsi->info.cmd_reserved[4], vsi->info.cmd_reserved[5], vsi->info.cmd_reserved[6], vsi->info.cmd_reserved[7]); dev_info(&pf->pdev->dev, " info: qs_handle[] = 0x%04x 0x%04x 0x%04x 0x%04x 0x%04x 0x%04x 0x%04x 0x%04x\n", vsi->info.qs_handle[0], vsi->info.qs_handle[1], vsi->info.qs_handle[2], vsi->info.qs_handle[3], vsi->info.qs_handle[4], vsi->info.qs_handle[5], vsi->info.qs_handle[6], vsi->info.qs_handle[7]); dev_info(&pf->pdev->dev, " info: stat_counter_idx = 0x%04x, sched_id = 0x%04x\n", vsi->info.stat_counter_idx, vsi->info.sched_id); dev_info(&pf->pdev->dev, " info: resp_reserved[] = 0x%02x 0x%02x 0x%02x 0x%02x 0x%02x 0x%02x 0x%02x 0x%02x 0x%02x 0x%02x 0x%02x 0x%02x\n", vsi->info.resp_reserved[0], vsi->info.resp_reserved[1], vsi->info.resp_reserved[2], vsi->info.resp_reserved[3], vsi->info.resp_reserved[4], vsi->info.resp_reserved[5], vsi->info.resp_reserved[6], vsi->info.resp_reserved[7], vsi->info.resp_reserved[8], vsi->info.resp_reserved[9], vsi->info.resp_reserved[10], vsi->info.resp_reserved[11]); dev_info(&pf->pdev->dev, " idx = %d\n", vsi->idx); dev_info(&pf->pdev->dev, " tc_config: numtc = %d, enabled_tc = 0x%x\n", vsi->tc_config.numtc, vsi->tc_config.enabled_tc); for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { dev_info(&pf->pdev->dev, " tc_config: tc = %d, qoffset = %d, qcount = %d, netdev_tc = %d\n", i, vsi->tc_config.tc_info[i].qoffset, vsi->tc_config.tc_info[i].qcount, vsi->tc_config.tc_info[i].netdev_tc); } dev_info(&pf->pdev->dev, " bw: bw_limit = %d, bw_max_quanta = %d\n", vsi->bw_limit, vsi->bw_max_quanta); for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { dev_info(&pf->pdev->dev, " bw[%d]: ets_share_credits = %d, ets_limit_credits = %d, max_quanta = %d\n", i, vsi->bw_ets_share_credits[i], vsi->bw_ets_limit_credits[i], vsi->bw_ets_max_quanta[i]); } } /** * i40e_dbg_dump_aq_desc - handles dump aq_desc write into command datum * @pf: the i40e_pf created in command write **/ static void i40e_dbg_dump_aq_desc(struct i40e_pf *pf) { struct i40e_adminq_ring *ring; struct i40e_hw *hw = &pf->hw; char hdr[32]; int i; snprintf(hdr, sizeof(hdr), "%s %s: ", dev_driver_string(&pf->pdev->dev), dev_name(&pf->pdev->dev)); /* first the send (command) ring, then the receive (event) ring */ dev_info(&pf->pdev->dev, "AdminQ Tx Ring\n"); ring = &(hw->aq.asq); for (i = 0; i < ring->count; i++) { struct i40e_aq_desc *d = I40E_ADMINQ_DESC(*ring, i); dev_info(&pf->pdev->dev, " at[%02d] flags=0x%04x op=0x%04x dlen=0x%04x ret=0x%04x cookie_h=0x%08x cookie_l=0x%08x\n", i, d->flags, d->opcode, d->datalen, d->retval, d->cookie_high, d->cookie_low); print_hex_dump(KERN_INFO, hdr, DUMP_PREFIX_NONE, 16, 1, d->params.raw, 16, 0); } dev_info(&pf->pdev->dev, "AdminQ Rx Ring\n"); ring = &(hw->aq.arq); for (i = 0; i < ring->count; i++) { struct i40e_aq_desc *d = I40E_ADMINQ_DESC(*ring, i); dev_info(&pf->pdev->dev, " ar[%02d] flags=0x%04x op=0x%04x dlen=0x%04x ret=0x%04x cookie_h=0x%08x cookie_l=0x%08x\n", i, d->flags, d->opcode, d->datalen, d->retval, d->cookie_high, d->cookie_low); print_hex_dump(KERN_INFO, hdr, DUMP_PREFIX_NONE, 16, 1, d->params.raw, 16, 0); } } /** * i40e_dbg_dump_desc - handles dump desc write into command datum * @cnt: number of arguments that the user supplied * @vsi_seid: vsi id entered by user * @ring_id: ring id entered by user * @desc_n: descriptor number entered by user * @pf: the i40e_pf created in command write * @type: enum describing whether ring is RX, TX or XDP **/ static void i40e_dbg_dump_desc(int cnt, int vsi_seid, int ring_id, int desc_n, struct i40e_pf *pf, enum ring_type type) { bool is_rx_ring = type == RING_TYPE_RX; struct i40e_tx_desc *txd; union i40e_rx_desc *rxd; struct i40e_ring *ring; struct i40e_vsi *vsi; int i; vsi = i40e_dbg_find_vsi(pf, vsi_seid); if (!vsi) { dev_info(&pf->pdev->dev, "vsi %d not found\n", vsi_seid); return; } if (vsi->type != I40E_VSI_MAIN && vsi->type != I40E_VSI_FDIR && vsi->type != I40E_VSI_VMDQ2) { dev_info(&pf->pdev->dev, "vsi %d type %d descriptor rings not available\n", vsi_seid, vsi->type); return; } if (type == RING_TYPE_XDP && !i40e_enabled_xdp_vsi(vsi)) { dev_info(&pf->pdev->dev, "XDP not enabled on VSI %d\n", vsi_seid); return; } if (ring_id >= vsi->num_queue_pairs || ring_id < 0) { dev_info(&pf->pdev->dev, "ring %d not found\n", ring_id); return; } if (!vsi->tx_rings || !vsi->tx_rings[0]->desc) { dev_info(&pf->pdev->dev, "descriptor rings have not been allocated for vsi %d\n", vsi_seid); return; } switch (type) { case RING_TYPE_RX: ring = kmemdup(vsi->rx_rings[ring_id], sizeof(*ring), GFP_KERNEL); break; case RING_TYPE_TX: ring = kmemdup(vsi->tx_rings[ring_id], sizeof(*ring), GFP_KERNEL); break; case RING_TYPE_XDP: ring = kmemdup(vsi->xdp_rings[ring_id], sizeof(*ring), GFP_KERNEL); break; default: ring = NULL; break; } if (!ring) return; if (cnt == 2) { switch (type) { case RING_TYPE_RX: dev_info(&pf->pdev->dev, "VSI = %02i Rx ring = %02i\n", vsi_seid, ring_id); break; case RING_TYPE_TX: dev_info(&pf->pdev->dev, "VSI = %02i Tx ring = %02i\n", vsi_seid, ring_id); break; case RING_TYPE_XDP: dev_info(&pf->pdev->dev, "VSI = %02i XDP ring = %02i\n", vsi_seid, ring_id); break; } for (i = 0; i < ring->count; i++) { if (!is_rx_ring) { txd = I40E_TX_DESC(ring, i); dev_info(&pf->pdev->dev, " d[%03x] = 0x%016llx 0x%016llx\n", i, txd->buffer_addr, txd->cmd_type_offset_bsz); } else { rxd = I40E_RX_DESC(ring, i); dev_info(&pf->pdev->dev, " d[%03x] = 0x%016llx 0x%016llx\n", i, rxd->read.pkt_addr, rxd->read.hdr_addr); } } } else if (cnt == 3) { if (desc_n >= ring->count || desc_n < 0) { dev_info(&pf->pdev->dev, "descriptor %d not found\n", desc_n); goto out; } if (!is_rx_ring) { txd = I40E_TX_DESC(ring, desc_n); dev_info(&pf->pdev->dev, "vsi = %02i tx ring = %02i d[%03x] = 0x%016llx 0x%016llx\n", vsi_seid, ring_id, desc_n, txd->buffer_addr, txd->cmd_type_offset_bsz); } else { rxd = I40E_RX_DESC(ring, desc_n); dev_info(&pf->pdev->dev, "vsi = %02i rx ring = %02i d[%03x] = 0x%016llx 0x%016llx\n", vsi_seid, ring_id, desc_n, rxd->read.pkt_addr, rxd->read.hdr_addr); } } else { dev_info(&pf->pdev->dev, "dump desc rx/tx/xdp <vsi_seid> <ring_id> [<desc_n>]\n"); } out: kfree(ring); } /** * i40e_dbg_dump_vsi_no_seid - handles dump vsi write into command datum * @pf: the i40e_pf created in command write **/ static void i40e_dbg_dump_vsi_no_seid(struct i40e_pf *pf) { int i; for (i = 0; i < pf->num_alloc_vsi; i++) if (pf->vsi[i]) dev_info(&pf->pdev->dev, "dump vsi[%d]: %d\n", i, pf->vsi[i]->seid); } /** * i40e_dbg_dump_eth_stats - handles dump stats write into command datum * @pf: the i40e_pf created in command write * @estats: the eth stats structure to be dumped **/ static void i40e_dbg_dump_eth_stats(struct i40e_pf *pf, struct i40e_eth_stats *estats) { dev_info(&pf->pdev->dev, " ethstats:\n"); dev_info(&pf->pdev->dev, " rx_bytes = \t%lld \trx_unicast = \t\t%lld \trx_multicast = \t%lld\n", estats->rx_bytes, estats->rx_unicast, estats->rx_multicast); dev_info(&pf->pdev->dev, " rx_broadcast = \t%lld \trx_discards = \t\t%lld\n", estats->rx_broadcast, estats->rx_discards); dev_info(&pf->pdev->dev, " rx_unknown_protocol = \t%lld \ttx_bytes = \t%lld\n", estats->rx_unknown_protocol, estats->tx_bytes); dev_info(&pf->pdev->dev, " tx_unicast = \t%lld \ttx_multicast = \t\t%lld \ttx_broadcast = \t%lld\n", estats->tx_unicast, estats->tx_multicast, estats->tx_broadcast); dev_info(&pf->pdev->dev, " tx_discards = \t%lld \ttx_errors = \t\t%lld\n", estats->tx_discards, estats->tx_errors); } /** * i40e_dbg_dump_veb_seid - handles dump stats of a single given veb * @pf: the i40e_pf created in command write * @seid: the seid the user put in **/ static void i40e_dbg_dump_veb_seid(struct i40e_pf *pf, int seid) { struct i40e_veb *veb; veb = i40e_dbg_find_veb(pf, seid); if (!veb) { dev_info(&pf->pdev->dev, "can't find veb %d\n", seid); return; } dev_info(&pf->pdev->dev, "veb idx=%d,%d stats_ic=%d seid=%d uplink=%d mode=%s\n", veb->idx, veb->veb_idx, veb->stats_idx, veb->seid, veb->uplink_seid, veb->bridge_mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB"); i40e_dbg_dump_eth_stats(pf, &veb->stats); } /** * i40e_dbg_dump_veb_all - dumps all known veb's stats * @pf: the i40e_pf created in command write **/ static void i40e_dbg_dump_veb_all(struct i40e_pf *pf) { struct i40e_veb *veb; int i; for (i = 0; i < I40E_MAX_VEB; i++) { veb = pf->veb[i]; if (veb) i40e_dbg_dump_veb_seid(pf, veb->seid); } } /** * i40e_dbg_dump_vf - dump VF info * @pf: the i40e_pf created in command write * @vf_id: the vf_id from the user **/ static void i40e_dbg_dump_vf(struct i40e_pf *pf, int vf_id) { struct i40e_vf *vf; struct i40e_vsi *vsi; if (!pf->num_alloc_vfs) { dev_info(&pf->pdev->dev, "no VFs allocated\n"); } else if ((vf_id >= 0) && (vf_id < pf->num_alloc_vfs)) { vf = &pf->vf[vf_id]; vsi = pf->vsi[vf->lan_vsi_idx]; dev_info(&pf->pdev->dev, "vf %2d: VSI id=%d, seid=%d, qps=%d\n", vf_id, vf->lan_vsi_id, vsi->seid, vf->num_queue_pairs); dev_info(&pf->pdev->dev, " num MDD=%lld\n", vf->num_mdd_events); } else { dev_info(&pf->pdev->dev, "invalid VF id %d\n", vf_id); } } /** * i40e_dbg_dump_vf_all - dump VF info for all VFs * @pf: the i40e_pf created in command write **/ static void i40e_dbg_dump_vf_all(struct i40e_pf *pf) { int i; if (!pf->num_alloc_vfs) dev_info(&pf->pdev->dev, "no VFs enabled!\n"); else for (i = 0; i < pf->num_alloc_vfs; i++) i40e_dbg_dump_vf(pf, i); } /** * i40e_dbg_command_write - write into command datum * @filp: the opened file * @buffer: where to find the user's data * @count: the length of the user's data * @ppos: file position offset **/ static ssize_t i40e_dbg_command_write(struct file *filp, const char __user *buffer, size_t count, loff_t *ppos) { struct i40e_pf *pf = filp->private_data; char *cmd_buf, *cmd_buf_tmp; int bytes_not_copied; struct i40e_vsi *vsi; int vsi_seid; int veb_seid; int vf_id; int cnt; /* don't allow partial writes */ if (*ppos != 0) return 0; cmd_buf = kzalloc(count + 1, GFP_KERNEL); if (!cmd_buf) return count; bytes_not_copied = copy_from_user(cmd_buf, buffer, count); if (bytes_not_copied) { kfree(cmd_buf); return -EFAULT; } cmd_buf[count] = '\0'; cmd_buf_tmp = strchr(cmd_buf, '\n'); if (cmd_buf_tmp) { *cmd_buf_tmp = '\0'; count = cmd_buf_tmp - cmd_buf + 1; } if (strncmp(cmd_buf, "add vsi", 7) == 0) { vsi_seid = -1; cnt = sscanf(&cmd_buf[7], "%i", &vsi_seid); if (cnt == 0) { /* default to PF VSI */ vsi_seid = pf->vsi[pf->lan_vsi]->seid; } else if (vsi_seid < 0) { dev_info(&pf->pdev->dev, "add VSI %d: bad vsi seid\n", vsi_seid); goto command_write_done; } /* By default we are in VEPA mode, if this is the first VF/VMDq * VSI to be added switch to VEB mode. */ if (!(pf->flags & I40E_FLAG_VEB_MODE_ENABLED)) { pf->flags |= I40E_FLAG_VEB_MODE_ENABLED; i40e_do_reset_safe(pf, I40E_PF_RESET_FLAG); } vsi = i40e_vsi_setup(pf, I40E_VSI_VMDQ2, vsi_seid, 0); if (vsi) dev_info(&pf->pdev->dev, "added VSI %d to relay %d\n", vsi->seid, vsi->uplink_seid); else dev_info(&pf->pdev->dev, "'%s' failed\n", cmd_buf); } else if (strncmp(cmd_buf, "del vsi", 7) == 0) { cnt = sscanf(&cmd_buf[7], "%i", &vsi_seid); if (cnt != 1) { dev_info(&pf->pdev->dev, "del vsi: bad command string, cnt=%d\n", cnt); goto command_write_done; } vsi = i40e_dbg_find_vsi(pf, vsi_seid); if (!vsi) { dev_info(&pf->pdev->dev, "del VSI %d: seid not found\n", vsi_seid); goto command_write_done; } dev_info(&pf->pdev->dev, "deleting VSI %d\n", vsi_seid); i40e_vsi_release(vsi); } else if (strncmp(cmd_buf, "add relay", 9) == 0) { struct i40e_veb *veb; int uplink_seid, i; cnt = sscanf(&cmd_buf[9], "%i %i", &uplink_seid, &vsi_seid); if (cnt != 2) { dev_info(&pf->pdev->dev, "add relay: bad command string, cnt=%d\n", cnt); goto command_write_done; } else if (uplink_seid < 0) { dev_info(&pf->pdev->dev, "add relay %d: bad uplink seid\n", uplink_seid); goto command_write_done; } vsi = i40e_dbg_find_vsi(pf, vsi_seid); if (!vsi) { dev_info(&pf->pdev->dev, "add relay: VSI %d not found\n", vsi_seid); goto command_write_done; } for (i = 0; i < I40E_MAX_VEB; i++) if (pf->veb[i] && pf->veb[i]->seid == uplink_seid) break; if (i >= I40E_MAX_VEB && uplink_seid != 0 && uplink_seid != pf->mac_seid) { dev_info(&pf->pdev->dev, "add relay: relay uplink %d not found\n", uplink_seid); goto command_write_done; } veb = i40e_veb_setup(pf, 0, uplink_seid, vsi_seid, vsi->tc_config.enabled_tc); if (veb) dev_info(&pf->pdev->dev, "added relay %d\n", veb->seid); else dev_info(&pf->pdev->dev, "add relay failed\n"); } else if (strncmp(cmd_buf, "del relay", 9) == 0) { int i; cnt = sscanf(&cmd_buf[9], "%i", &veb_seid); if (cnt != 1) { dev_info(&pf->pdev->dev, "del relay: bad command string, cnt=%d\n", cnt); goto command_write_done; } else if (veb_seid < 0) { dev_info(&pf->pdev->dev, "del relay %d: bad relay seid\n", veb_seid); goto command_write_done; } /* find the veb */ for (i = 0; i < I40E_MAX_VEB; i++) if (pf->veb[i] && pf->veb[i]->seid == veb_seid) break; if (i >= I40E_MAX_VEB) { dev_info(&pf->pdev->dev, "del relay: relay %d not found\n", veb_seid); goto command_write_done; } dev_info(&pf->pdev->dev, "deleting relay %d\n", veb_seid); i40e_veb_release(pf->veb[i]); } else if (strncmp(cmd_buf, "add pvid", 8) == 0) { unsigned int v; int ret; u16 vid; cnt = sscanf(&cmd_buf[8], "%i %u", &vsi_seid, &v); if (cnt != 2) { dev_info(&pf->pdev->dev, "add pvid: bad command string, cnt=%d\n", cnt); goto command_write_done; } vsi = i40e_dbg_find_vsi(pf, vsi_seid); if (!vsi) { dev_info(&pf->pdev->dev, "add pvid: VSI %d not found\n", vsi_seid); goto command_write_done; } vid = v; ret = i40e_vsi_add_pvid(vsi, vid); if (!ret) dev_info(&pf->pdev->dev, "add pvid: %d added to VSI %d\n", vid, vsi_seid); else dev_info(&pf->pdev->dev, "add pvid: %d to VSI %d failed, ret=%d\n", vid, vsi_seid, ret); } else if (strncmp(cmd_buf, "del pvid", 8) == 0) { cnt = sscanf(&cmd_buf[8], "%i", &vsi_seid); if (cnt != 1) { dev_info(&pf->pdev->dev, "del pvid: bad command string, cnt=%d\n", cnt); goto command_write_done; } vsi = i40e_dbg_find_vsi(pf, vsi_seid); if (!vsi) { dev_info(&pf->pdev->dev, "del pvid: VSI %d not found\n", vsi_seid); goto command_write_done; } i40e_vsi_remove_pvid(vsi); dev_info(&pf->pdev->dev, "del pvid: removed from VSI %d\n", vsi_seid); } else if (strncmp(cmd_buf, "dump", 4) == 0) { if (strncmp(&cmd_buf[5], "switch", 6) == 0) { i40e_fetch_switch_configuration(pf, true); } else if (strncmp(&cmd_buf[5], "vsi", 3) == 0) { cnt = sscanf(&cmd_buf[8], "%i", &vsi_seid); if (cnt > 0) i40e_dbg_dump_vsi_seid(pf, vsi_seid); else i40e_dbg_dump_vsi_no_seid(pf); } else if (strncmp(&cmd_buf[5], "veb", 3) == 0) { cnt = sscanf(&cmd_buf[8], "%i", &vsi_seid); if (cnt > 0) i40e_dbg_dump_veb_seid(pf, vsi_seid); else i40e_dbg_dump_veb_all(pf); } else if (strncmp(&cmd_buf[5], "vf", 2) == 0) { cnt = sscanf(&cmd_buf[7], "%i", &vf_id); if (cnt > 0) i40e_dbg_dump_vf(pf, vf_id); else i40e_dbg_dump_vf_all(pf); } else if (strncmp(&cmd_buf[5], "desc", 4) == 0) { int ring_id, desc_n; if (strncmp(&cmd_buf[10], "rx", 2) == 0) { cnt = sscanf(&cmd_buf[12], "%i %i %i", &vsi_seid, &ring_id, &desc_n); i40e_dbg_dump_desc(cnt, vsi_seid, ring_id, desc_n, pf, RING_TYPE_RX); } else if (strncmp(&cmd_buf[10], "tx", 2) == 0) { cnt = sscanf(&cmd_buf[12], "%i %i %i", &vsi_seid, &ring_id, &desc_n); i40e_dbg_dump_desc(cnt, vsi_seid, ring_id, desc_n, pf, RING_TYPE_TX); } else if (strncmp(&cmd_buf[10], "xdp", 3) == 0) { cnt = sscanf(&cmd_buf[13], "%i %i %i", &vsi_seid, &ring_id, &desc_n); i40e_dbg_dump_desc(cnt, vsi_seid, ring_id, desc_n, pf, RING_TYPE_XDP); } else if (strncmp(&cmd_buf[10], "aq", 2) == 0) { i40e_dbg_dump_aq_desc(pf); } else { dev_info(&pf->pdev->dev, "dump desc tx <vsi_seid> <ring_id> [<desc_n>]\n"); dev_info(&pf->pdev->dev, "dump desc rx <vsi_seid> <ring_id> [<desc_n>]\n"); dev_info(&pf->pdev->dev, "dump desc xdp <vsi_seid> <ring_id> [<desc_n>]\n"); dev_info(&pf->pdev->dev, "dump desc aq\n"); } } else if (strncmp(&cmd_buf[5], "reset stats", 11) == 0) { dev_info(&pf->pdev->dev, "core reset count: %d\n", pf->corer_count); dev_info(&pf->pdev->dev, "global reset count: %d\n", pf->globr_count); dev_info(&pf->pdev->dev, "emp reset count: %d\n", pf->empr_count); dev_info(&pf->pdev->dev, "pf reset count: %d\n", pf->pfr_count); dev_info(&pf->pdev->dev, "pf tx sluggish count: %d\n", pf->tx_sluggish_count); } else if (strncmp(&cmd_buf[5], "port", 4) == 0) { struct i40e_aqc_query_port_ets_config_resp *bw_data; struct i40e_dcbx_config *cfg = &pf->hw.local_dcbx_config; struct i40e_dcbx_config *r_cfg = &pf->hw.remote_dcbx_config; int i, ret; u16 switch_id; bw_data = kzalloc(sizeof( struct i40e_aqc_query_port_ets_config_resp), GFP_KERNEL); if (!bw_data) { ret = -ENOMEM; goto command_write_done; } vsi = pf->vsi[pf->lan_vsi]; switch_id = le16_to_cpu(vsi->info.switch_id) & I40E_AQ_VSI_SW_ID_MASK; ret = i40e_aq_query_port_ets_config(&pf->hw, switch_id, bw_data, NULL); if (ret) { dev_info(&pf->pdev->dev, "Query Port ETS Config AQ command failed =0x%x\n", pf->hw.aq.asq_last_status); kfree(bw_data); bw_data = NULL; goto command_write_done; } dev_info(&pf->pdev->dev, "port bw: tc_valid=0x%x tc_strict_prio=0x%x, tc_bw_max=0x%04x,0x%04x\n", bw_data->tc_valid_bits, bw_data->tc_strict_priority_bits, le16_to_cpu(bw_data->tc_bw_max[0]), le16_to_cpu(bw_data->tc_bw_max[1])); for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { dev_info(&pf->pdev->dev, "port bw: tc_bw_share=%d tc_bw_limit=%d\n", bw_data->tc_bw_share_credits[i], le16_to_cpu(bw_data->tc_bw_limits[i])); } kfree(bw_data); bw_data = NULL; dev_info(&pf->pdev->dev, "port dcbx_mode=%d\n", cfg->dcbx_mode); dev_info(&pf->pdev->dev, "port ets_cfg: willing=%d cbs=%d, maxtcs=%d\n", cfg->etscfg.willing, cfg->etscfg.cbs, cfg->etscfg.maxtcs); for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { dev_info(&pf->pdev->dev, "port ets_cfg: %d prio_tc=%d tcbw=%d tctsa=%d\n", i, cfg->etscfg.prioritytable[i], cfg->etscfg.tcbwtable[i], cfg->etscfg.tsatable[i]); } for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { dev_info(&pf->pdev->dev, "port ets_rec: %d prio_tc=%d tcbw=%d tctsa=%d\n", i, cfg->etsrec.prioritytable[i], cfg->etsrec.tcbwtable[i], cfg->etsrec.tsatable[i]); } dev_info(&pf->pdev->dev, "port pfc_cfg: willing=%d mbc=%d, pfccap=%d pfcenable=0x%x\n", cfg->pfc.willing, cfg->pfc.mbc, cfg->pfc.pfccap, cfg->pfc.pfcenable); dev_info(&pf->pdev->dev, "port app_table: num_apps=%d\n", cfg->numapps); for (i = 0; i < cfg->numapps; i++) { dev_info(&pf->pdev->dev, "port app_table: %d prio=%d selector=%d protocol=0x%x\n", i, cfg->app[i].priority, cfg->app[i].selector, cfg->app[i].protocolid); } /* Peer TLV DCBX data */ dev_info(&pf->pdev->dev, "remote port ets_cfg: willing=%d cbs=%d, maxtcs=%d\n", r_cfg->etscfg.willing, r_cfg->etscfg.cbs, r_cfg->etscfg.maxtcs); for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { dev_info(&pf->pdev->dev, "remote port ets_cfg: %d prio_tc=%d tcbw=%d tctsa=%d\n", i, r_cfg->etscfg.prioritytable[i], r_cfg->etscfg.tcbwtable[i], r_cfg->etscfg.tsatable[i]); } for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { dev_info(&pf->pdev->dev, "remote port ets_rec: %d prio_tc=%d tcbw=%d tctsa=%d\n", i, r_cfg->etsrec.prioritytable[i], r_cfg->etsrec.tcbwtable[i], r_cfg->etsrec.tsatable[i]); } dev_info(&pf->pdev->dev, "remote port pfc_cfg: willing=%d mbc=%d, pfccap=%d pfcenable=0x%x\n", r_cfg->pfc.willing, r_cfg->pfc.mbc, r_cfg->pfc.pfccap, r_cfg->pfc.pfcenable); dev_info(&pf->pdev->dev, "remote port app_table: num_apps=%d\n", r_cfg->numapps); for (i = 0; i < r_cfg->numapps; i++) { dev_info(&pf->pdev->dev, "remote port app_table: %d prio=%d selector=%d protocol=0x%x\n", i, r_cfg->app[i].priority, r_cfg->app[i].selector, r_cfg->app[i].protocolid); } } else if (strncmp(&cmd_buf[5], "debug fwdata", 12) == 0) { int cluster_id, table_id; int index, ret; u16 buff_len = 4096; u32 next_index; u8 next_table; u8 *buff; u16 rlen; cnt = sscanf(&cmd_buf[18], "%i %i %i", &cluster_id, &table_id, &index); if (cnt != 3) { dev_info(&pf->pdev->dev, "dump debug fwdata <cluster_id> <table_id> <index>\n"); goto command_write_done; } dev_info(&pf->pdev->dev, "AQ debug dump fwdata params %x %x %x %x\n", cluster_id, table_id, index, buff_len); buff = kzalloc(buff_len, GFP_KERNEL); if (!buff) goto command_write_done; ret = i40e_aq_debug_dump(&pf->hw, cluster_id, table_id, index, buff_len, buff, &rlen, &next_table, &next_index, NULL); if (ret) { dev_info(&pf->pdev->dev, "debug dump fwdata AQ Failed %d 0x%x\n", ret, pf->hw.aq.asq_last_status); kfree(buff); buff = NULL; goto command_write_done; } dev_info(&pf->pdev->dev, "AQ debug dump fwdata rlen=0x%x next_table=0x%x next_index=0x%x\n", rlen, next_table, next_index); print_hex_dump(KERN_INFO, "AQ buffer WB: ", DUMP_PREFIX_OFFSET, 16, 1, buff, rlen, true); kfree(buff); buff = NULL; } else { dev_info(&pf->pdev->dev, "dump desc tx <vsi_seid> <ring_id> [<desc_n>], dump desc rx <vsi_seid> <ring_id> [<desc_n>], dump desc xdp <vsi_seid> <ring_id> [<desc_n>],\n"); dev_info(&pf->pdev->dev, "dump switch\n"); dev_info(&pf->pdev->dev, "dump vsi [seid]\n"); dev_info(&pf->pdev->dev, "dump reset stats\n"); dev_info(&pf->pdev->dev, "dump port\n"); dev_info(&pf->pdev->dev, "dump vf [vf_id]\n"); dev_info(&pf->pdev->dev, "dump debug fwdata <cluster_id> <table_id> <index>\n"); } } else if (strncmp(cmd_buf, "pfr", 3) == 0) { dev_info(&pf->pdev->dev, "debugfs: forcing PFR\n"); i40e_do_reset_safe(pf, BIT(__I40E_PF_RESET_REQUESTED)); } else if (strncmp(cmd_buf, "corer", 5) == 0) { dev_info(&pf->pdev->dev, "debugfs: forcing CoreR\n"); i40e_do_reset_safe(pf, BIT(__I40E_CORE_RESET_REQUESTED)); } else if (strncmp(cmd_buf, "globr", 5) == 0) { dev_info(&pf->pdev->dev, "debugfs: forcing GlobR\n"); i40e_do_reset_safe(pf, BIT(__I40E_GLOBAL_RESET_REQUESTED)); } else if (strncmp(cmd_buf, "read", 4) == 0) { u32 address; u32 value; cnt = sscanf(&cmd_buf[4], "%i", &address); if (cnt != 1) { dev_info(&pf->pdev->dev, "read <reg>\n"); goto command_write_done; } /* check the range on address */ if (address > (pf->ioremap_len - sizeof(u32))) { dev_info(&pf->pdev->dev, "read reg address 0x%08x too large, max=0x%08lx\n", address, (unsigned long int)(pf->ioremap_len - sizeof(u32))); goto command_write_done; } value = rd32(&pf->hw, address); dev_info(&pf->pdev->dev, "read: 0x%08x = 0x%08x\n", address, value); } else if (strncmp(cmd_buf, "write", 5) == 0) { u32 address, value; cnt = sscanf(&cmd_buf[5], "%i %i", &address, &value); if (cnt != 2) { dev_info(&pf->pdev->dev, "write <reg> <value>\n"); goto command_write_done; } /* check the range on address */ if (address > (pf->ioremap_len - sizeof(u32))) { dev_info(&pf->pdev->dev, "write reg address 0x%08x too large, max=0x%08lx\n", address, (unsigned long int)(pf->ioremap_len - sizeof(u32))); goto command_write_done; } wr32(&pf->hw, address, value); value = rd32(&pf->hw, address); dev_info(&pf->pdev->dev, "write: 0x%08x = 0x%08x\n", address, value); } else if (strncmp(cmd_buf, "clear_stats", 11) == 0) { if (strncmp(&cmd_buf[12], "vsi", 3) == 0) { cnt = sscanf(&cmd_buf[15], "%i", &vsi_seid); if (cnt == 0) { int i; for (i = 0; i < pf->num_alloc_vsi; i++) i40e_vsi_reset_stats(pf->vsi[i]); dev_info(&pf->pdev->dev, "vsi clear stats called for all vsi's\n"); } else if (cnt == 1) { vsi = i40e_dbg_find_vsi(pf, vsi_seid); if (!vsi) { dev_info(&pf->pdev->dev, "clear_stats vsi: bad vsi %d\n", vsi_seid); goto command_write_done; } i40e_vsi_reset_stats(vsi); dev_info(&pf->pdev->dev, "vsi clear stats called for vsi %d\n", vsi_seid); } else { dev_info(&pf->pdev->dev, "clear_stats vsi [seid]\n"); } } else if (strncmp(&cmd_buf[12], "port", 4) == 0) { if (pf->hw.partition_id == 1) { i40e_pf_reset_stats(pf); dev_info(&pf->pdev->dev, "port stats cleared\n"); } else { dev_info(&pf->pdev->dev, "clear port stats not allowed on this port partition\n"); } } else { dev_info(&pf->pdev->dev, "clear_stats vsi [seid] or clear_stats port\n"); } } else if (strncmp(cmd_buf, "send aq_cmd", 11) == 0) { struct i40e_aq_desc *desc; int ret; desc = kzalloc(sizeof(struct i40e_aq_desc), GFP_KERNEL); if (!desc) goto command_write_done; cnt = sscanf(&cmd_buf[11], "%hi %hi %hi %hi %i %i %i %i %i %i", &desc->flags, &desc->opcode, &desc->datalen, &desc->retval, &desc->cookie_high, &desc->cookie_low, &desc->params.internal.param0, &desc->params.internal.param1, &desc->params.internal.param2, &desc->params.internal.param3); if (cnt != 10) { dev_info(&pf->pdev->dev, "send aq_cmd: bad command string, cnt=%d\n", cnt); kfree(desc); desc = NULL; goto command_write_done; } ret = i40e_asq_send_command(&pf->hw, desc, NULL, 0, NULL); if (!ret) { dev_info(&pf->pdev->dev, "AQ command sent Status : Success\n"); } else if (ret == -EIO) { dev_info(&pf->pdev->dev, "AQ command send failed Opcode %x AQ Error: %d\n", desc->opcode, pf->hw.aq.asq_last_status); } else { dev_info(&pf->pdev->dev, "AQ command send failed Opcode %x Status: %d\n", desc->opcode, ret); } dev_info(&pf->pdev->dev, "AQ desc WB 0x%04x 0x%04x 0x%04x 0x%04x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n", desc->flags, desc->opcode, desc->datalen, desc->retval, desc->cookie_high, desc->cookie_low, desc->params.internal.param0, desc->params.internal.param1, desc->params.internal.param2, desc->params.internal.param3); kfree(desc); desc = NULL; } else if (strncmp(cmd_buf, "send indirect aq_cmd", 20) == 0) { struct i40e_aq_desc *desc; u16 buffer_len; u8 *buff; int ret; desc = kzalloc(sizeof(struct i40e_aq_desc), GFP_KERNEL); if (!desc) goto command_write_done; cnt = sscanf(&cmd_buf[20], "%hi %hi %hi %hi %i %i %i %i %i %i %hi", &desc->flags, &desc->opcode, &desc->datalen, &desc->retval, &desc->cookie_high, &desc->cookie_low, &desc->params.internal.param0, &desc->params.internal.param1, &desc->params.internal.param2, &desc->params.internal.param3, &buffer_len); if (cnt != 11) { dev_info(&pf->pdev->dev, "send indirect aq_cmd: bad command string, cnt=%d\n", cnt); kfree(desc); desc = NULL; goto command_write_done; } /* Just stub a buffer big enough in case user messed up */ if (buffer_len == 0) buffer_len = 1280; buff = kzalloc(buffer_len, GFP_KERNEL); if (!buff) { kfree(desc); desc = NULL; goto command_write_done; } desc->flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF); ret = i40e_asq_send_command(&pf->hw, desc, buff, buffer_len, NULL); if (!ret) { dev_info(&pf->pdev->dev, "AQ command sent Status : Success\n"); } else if (ret == -EIO) { dev_info(&pf->pdev->dev, "AQ command send failed Opcode %x AQ Error: %d\n", desc->opcode, pf->hw.aq.asq_last_status); } else { dev_info(&pf->pdev->dev, "AQ command send failed Opcode %x Status: %d\n", desc->opcode, ret); } dev_info(&pf->pdev->dev, "AQ desc WB 0x%04x 0x%04x 0x%04x 0x%04x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n", desc->flags, desc->opcode, desc->datalen, desc->retval, desc->cookie_high, desc->cookie_low, desc->params.internal.param0, desc->params.internal.param1, desc->params.internal.param2, desc->params.internal.param3); print_hex_dump(KERN_INFO, "AQ buffer WB: ", DUMP_PREFIX_OFFSET, 16, 1, buff, buffer_len, true); kfree(buff); buff = NULL; kfree(desc); desc = NULL; } else if (strncmp(cmd_buf, "fd current cnt", 14) == 0) { dev_info(&pf->pdev->dev, "FD current total filter count for this interface: %d\n", i40e_get_current_fd_count(pf)); } else if (strncmp(cmd_buf, "lldp", 4) == 0) { if (strncmp(&cmd_buf[5], "stop", 4) == 0) { int ret; ret = i40e_aq_stop_lldp(&pf->hw, false, false, NULL); if (ret) { dev_info(&pf->pdev->dev, "Stop LLDP AQ command failed =0x%x\n", pf->hw.aq.asq_last_status); goto command_write_done; } ret = i40e_aq_add_rem_control_packet_filter(&pf->hw, pf->hw.mac.addr, ETH_P_LLDP, 0, pf->vsi[pf->lan_vsi]->seid, 0, true, NULL, NULL); if (ret) { dev_info(&pf->pdev->dev, "%s: Add Control Packet Filter AQ command failed =0x%x\n", __func__, pf->hw.aq.asq_last_status); goto command_write_done; } #ifdef CONFIG_I40E_DCB pf->dcbx_cap = DCB_CAP_DCBX_HOST | DCB_CAP_DCBX_VER_IEEE; #endif /* CONFIG_I40E_DCB */ } else if (strncmp(&cmd_buf[5], "start", 5) == 0) { int ret; ret = i40e_aq_add_rem_control_packet_filter(&pf->hw, pf->hw.mac.addr, ETH_P_LLDP, 0, pf->vsi[pf->lan_vsi]->seid, 0, false, NULL, NULL); if (ret) { dev_info(&pf->pdev->dev, "%s: Remove Control Packet Filter AQ command failed =0x%x\n", __func__, pf->hw.aq.asq_last_status); /* Continue and start FW LLDP anyways */ } ret = i40e_aq_start_lldp(&pf->hw, false, NULL); if (ret) { dev_info(&pf->pdev->dev, "Start LLDP AQ command failed =0x%x\n", pf->hw.aq.asq_last_status); goto command_write_done; } #ifdef CONFIG_I40E_DCB pf->dcbx_cap = DCB_CAP_DCBX_LLD_MANAGED | DCB_CAP_DCBX_VER_IEEE; #endif /* CONFIG_I40E_DCB */ } else if (strncmp(&cmd_buf[5], "get local", 9) == 0) { u16 llen, rlen; int ret; u8 *buff; buff = kzalloc(I40E_LLDPDU_SIZE, GFP_KERNEL); if (!buff) goto command_write_done; ret = i40e_aq_get_lldp_mib(&pf->hw, 0, I40E_AQ_LLDP_MIB_LOCAL, buff, I40E_LLDPDU_SIZE, &llen, &rlen, NULL); if (ret) { dev_info(&pf->pdev->dev, "Get LLDP MIB (local) AQ command failed =0x%x\n", pf->hw.aq.asq_last_status); kfree(buff); buff = NULL; goto command_write_done; } dev_info(&pf->pdev->dev, "LLDP MIB (local)\n"); print_hex_dump(KERN_INFO, "LLDP MIB (local): ", DUMP_PREFIX_OFFSET, 16, 1, buff, I40E_LLDPDU_SIZE, true); kfree(buff); buff = NULL; } else if (strncmp(&cmd_buf[5], "get remote", 10) == 0) { u16 llen, rlen; int ret; u8 *buff; buff = kzalloc(I40E_LLDPDU_SIZE, GFP_KERNEL); if (!buff) goto command_write_done; ret = i40e_aq_get_lldp_mib(&pf->hw, I40E_AQ_LLDP_BRIDGE_TYPE_NEAREST_BRIDGE, I40E_AQ_LLDP_MIB_REMOTE, buff, I40E_LLDPDU_SIZE, &llen, &rlen, NULL); if (ret) { dev_info(&pf->pdev->dev, "Get LLDP MIB (remote) AQ command failed =0x%x\n", pf->hw.aq.asq_last_status); kfree(buff); buff = NULL; goto command_write_done; } dev_info(&pf->pdev->dev, "LLDP MIB (remote)\n"); print_hex_dump(KERN_INFO, "LLDP MIB (remote): ", DUMP_PREFIX_OFFSET, 16, 1, buff, I40E_LLDPDU_SIZE, true); kfree(buff); buff = NULL; } else if (strncmp(&cmd_buf[5], "event on", 8) == 0) { int ret; ret = i40e_aq_cfg_lldp_mib_change_event(&pf->hw, true, NULL); if (ret) { dev_info(&pf->pdev->dev, "Config LLDP MIB Change Event (on) AQ command failed =0x%x\n", pf->hw.aq.asq_last_status); goto command_write_done; } } else if (strncmp(&cmd_buf[5], "event off", 9) == 0) { int ret; ret = i40e_aq_cfg_lldp_mib_change_event(&pf->hw, false, NULL); if (ret) { dev_info(&pf->pdev->dev, "Config LLDP MIB Change Event (off) AQ command failed =0x%x\n", pf->hw.aq.asq_last_status); goto command_write_done; } } } else if (strncmp(cmd_buf, "nvm read", 8) == 0) { u16 buffer_len, bytes; u16 module; u32 offset; u16 *buff; int ret; cnt = sscanf(&cmd_buf[8], "%hx %x %hx", &module, &offset, &buffer_len); if (cnt == 0) { module = 0; offset = 0; buffer_len = 0; } else if (cnt == 1) { offset = 0; buffer_len = 0; } else if (cnt == 2) { buffer_len = 0; } else if (cnt > 3) { dev_info(&pf->pdev->dev, "nvm read: bad command string, cnt=%d\n", cnt); goto command_write_done; } /* set the max length */ buffer_len = min_t(u16, buffer_len, I40E_MAX_AQ_BUF_SIZE/2); bytes = 2 * buffer_len; /* read at least 1k bytes, no more than 4kB */ bytes = clamp(bytes, (u16)1024, (u16)I40E_MAX_AQ_BUF_SIZE); buff = kzalloc(bytes, GFP_KERNEL); if (!buff) goto command_write_done; ret = i40e_acquire_nvm(&pf->hw, I40E_RESOURCE_READ); if (ret) { dev_info(&pf->pdev->dev, "Failed Acquiring NVM resource for read err=%d status=0x%x\n", ret, pf->hw.aq.asq_last_status); kfree(buff); goto command_write_done; } ret = i40e_aq_read_nvm(&pf->hw, module, (2 * offset), bytes, (u8 *)buff, true, NULL); i40e_release_nvm(&pf->hw); if (ret) { dev_info(&pf->pdev->dev, "Read NVM AQ failed err=%d status=0x%x\n", ret, pf->hw.aq.asq_last_status); } else { dev_info(&pf->pdev->dev, "Read NVM module=0x%x offset=0x%x words=%d\n", module, offset, buffer_len); if (bytes) print_hex_dump(KERN_INFO, "NVM Dump: ", DUMP_PREFIX_OFFSET, 16, 2, buff, bytes, true); } kfree(buff); buff = NULL; } else { dev_info(&pf->pdev->dev, "unknown command '%s'\n", cmd_buf); dev_info(&pf->pdev->dev, "available commands\n"); dev_info(&pf->pdev->dev, " add vsi [relay_seid]\n"); dev_info(&pf->pdev->dev, " del vsi [vsi_seid]\n"); dev_info(&pf->pdev->dev, " add relay <uplink_seid> <vsi_seid>\n"); dev_info(&pf->pdev->dev, " del relay <relay_seid>\n"); dev_info(&pf->pdev->dev, " add pvid <vsi_seid> <vid>\n"); dev_info(&pf->pdev->dev, " del pvid <vsi_seid>\n"); dev_info(&pf->pdev->dev, " dump switch\n"); dev_info(&pf->pdev->dev, " dump vsi [seid]\n"); dev_info(&pf->pdev->dev, " dump desc tx <vsi_seid> <ring_id> [<desc_n>]\n"); dev_info(&pf->pdev->dev, " dump desc rx <vsi_seid> <ring_id> [<desc_n>]\n"); dev_info(&pf->pdev->dev, " dump desc xdp <vsi_seid> <ring_id> [<desc_n>]\n"); dev_info(&pf->pdev->dev, " dump desc aq\n"); dev_info(&pf->pdev->dev, " dump reset stats\n"); dev_info(&pf->pdev->dev, " dump debug fwdata <cluster_id> <table_id> <index>\n"); dev_info(&pf->pdev->dev, " read <reg>\n"); dev_info(&pf->pdev->dev, " write <reg> <value>\n"); dev_info(&pf->pdev->dev, " clear_stats vsi [seid]\n"); dev_info(&pf->pdev->dev, " clear_stats port\n"); dev_info(&pf->pdev->dev, " pfr\n"); dev_info(&pf->pdev->dev, " corer\n"); dev_info(&pf->pdev->dev, " globr\n"); dev_info(&pf->pdev->dev, " send aq_cmd <flags> <opcode> <datalen> <retval> <cookie_h> <cookie_l> <param0> <param1> <param2> <param3>\n"); dev_info(&pf->pdev->dev, " send indirect aq_cmd <flags> <opcode> <datalen> <retval> <cookie_h> <cookie_l> <param0> <param1> <param2> <param3> <buffer_len>\n"); dev_info(&pf->pdev->dev, " fd current cnt"); dev_info(&pf->pdev->dev, " lldp start\n"); dev_info(&pf->pdev->dev, " lldp stop\n"); dev_info(&pf->pdev->dev, " lldp get local\n"); dev_info(&pf->pdev->dev, " lldp get remote\n"); dev_info(&pf->pdev->dev, " lldp event on\n"); dev_info(&pf->pdev->dev, " lldp event off\n"); dev_info(&pf->pdev->dev, " nvm read [module] [word_offset] [word_count]\n"); } command_write_done: kfree(cmd_buf); cmd_buf = NULL; return count; } static const struct file_operations i40e_dbg_command_fops = { .owner = THIS_MODULE, .open = simple_open, .read = i40e_dbg_command_read, .write = i40e_dbg_command_write, }; /************************************************************** * netdev_ops * The netdev_ops entry in debugfs is for giving the driver commands * to be executed from the netdev operations. **************************************************************/ static char i40e_dbg_netdev_ops_buf[256] = ""; /** * i40e_dbg_netdev_ops_read - read for netdev_ops datum * @filp: the opened file * @buffer: where to write the data for the user to read * @count: the size of the user's buffer * @ppos: file position offset **/ static ssize_t i40e_dbg_netdev_ops_read(struct file *filp, char __user *buffer, size_t count, loff_t *ppos) { struct i40e_pf *pf = filp->private_data; int bytes_not_copied; int buf_size = 256; char *buf; int len; /* don't allow partal reads */ if (*ppos != 0) return 0; if (count < buf_size) return -ENOSPC; buf = kzalloc(buf_size, GFP_KERNEL); if (!buf) return -ENOSPC; len = snprintf(buf, buf_size, "%s: %s\n", pf->vsi[pf->lan_vsi]->netdev->name, i40e_dbg_netdev_ops_buf); bytes_not_copied = copy_to_user(buffer, buf, len); kfree(buf); if (bytes_not_copied) return -EFAULT; *ppos = len; return len; } /** * i40e_dbg_netdev_ops_write - write into netdev_ops datum * @filp: the opened file * @buffer: where to find the user's data * @count: the length of the user's data * @ppos: file position offset **/ static ssize_t i40e_dbg_netdev_ops_write(struct file *filp, const char __user *buffer, size_t count, loff_t *ppos) { struct i40e_pf *pf = filp->private_data; int bytes_not_copied; struct i40e_vsi *vsi; char *buf_tmp; int vsi_seid; int i, cnt; /* don't allow partial writes */ if (*ppos != 0) return 0; if (count >= sizeof(i40e_dbg_netdev_ops_buf)) return -ENOSPC; memset(i40e_dbg_netdev_ops_buf, 0, sizeof(i40e_dbg_netdev_ops_buf)); bytes_not_copied = copy_from_user(i40e_dbg_netdev_ops_buf, buffer, count); if (bytes_not_copied) return -EFAULT; i40e_dbg_netdev_ops_buf[count] = '\0'; buf_tmp = strchr(i40e_dbg_netdev_ops_buf, '\n'); if (buf_tmp) { *buf_tmp = '\0'; count = buf_tmp - i40e_dbg_netdev_ops_buf + 1; } if (strncmp(i40e_dbg_netdev_ops_buf, "change_mtu", 10) == 0) { int mtu; cnt = sscanf(&i40e_dbg_netdev_ops_buf[11], "%i %i", &vsi_seid, &mtu); if (cnt != 2) { dev_info(&pf->pdev->dev, "change_mtu <vsi_seid> <mtu>\n"); goto netdev_ops_write_done; } vsi = i40e_dbg_find_vsi(pf, vsi_seid); if (!vsi) { dev_info(&pf->pdev->dev, "change_mtu: VSI %d not found\n", vsi_seid); } else if (!vsi->netdev) { dev_info(&pf->pdev->dev, "change_mtu: no netdev for VSI %d\n", vsi_seid); } else if (rtnl_trylock()) { vsi->netdev->netdev_ops->ndo_change_mtu(vsi->netdev, mtu); rtnl_unlock(); dev_info(&pf->pdev->dev, "change_mtu called\n"); } else { dev_info(&pf->pdev->dev, "Could not acquire RTNL - please try again\n"); } } else if (strncmp(i40e_dbg_netdev_ops_buf, "set_rx_mode", 11) == 0) { cnt = sscanf(&i40e_dbg_netdev_ops_buf[11], "%i", &vsi_seid); if (cnt != 1) { dev_info(&pf->pdev->dev, "set_rx_mode <vsi_seid>\n"); goto netdev_ops_write_done; } vsi = i40e_dbg_find_vsi(pf, vsi_seid); if (!vsi) { dev_info(&pf->pdev->dev, "set_rx_mode: VSI %d not found\n", vsi_seid); } else if (!vsi->netdev) { dev_info(&pf->pdev->dev, "set_rx_mode: no netdev for VSI %d\n", vsi_seid); } else if (rtnl_trylock()) { vsi->netdev->netdev_ops->ndo_set_rx_mode(vsi->netdev); rtnl_unlock(); dev_info(&pf->pdev->dev, "set_rx_mode called\n"); } else { dev_info(&pf->pdev->dev, "Could not acquire RTNL - please try again\n"); } } else if (strncmp(i40e_dbg_netdev_ops_buf, "napi", 4) == 0) { cnt = sscanf(&i40e_dbg_netdev_ops_buf[4], "%i", &vsi_seid); if (cnt != 1) { dev_info(&pf->pdev->dev, "napi <vsi_seid>\n"); goto netdev_ops_write_done; } vsi = i40e_dbg_find_vsi(pf, vsi_seid); if (!vsi) { dev_info(&pf->pdev->dev, "napi: VSI %d not found\n", vsi_seid); } else if (!vsi->netdev) { dev_info(&pf->pdev->dev, "napi: no netdev for VSI %d\n", vsi_seid); } else { for (i = 0; i < vsi->num_q_vectors; i++) napi_schedule(&vsi->q_vectors[i]->napi); dev_info(&pf->pdev->dev, "napi called\n"); } } else { dev_info(&pf->pdev->dev, "unknown command '%s'\n", i40e_dbg_netdev_ops_buf); dev_info(&pf->pdev->dev, "available commands\n"); dev_info(&pf->pdev->dev, " change_mtu <vsi_seid> <mtu>\n"); dev_info(&pf->pdev->dev, " set_rx_mode <vsi_seid>\n"); dev_info(&pf->pdev->dev, " napi <vsi_seid>\n"); } netdev_ops_write_done: return count; } static const struct file_operations i40e_dbg_netdev_ops_fops = { .owner = THIS_MODULE, .open = simple_open, .read = i40e_dbg_netdev_ops_read, .write = i40e_dbg_netdev_ops_write, }; /** * i40e_dbg_pf_init - setup the debugfs directory for the PF * @pf: the PF that is starting up **/ void i40e_dbg_pf_init(struct i40e_pf *pf) { const char *name = pci_name(pf->pdev); pf->i40e_dbg_pf = debugfs_create_dir(name, i40e_dbg_root); debugfs_create_file("command", 0600, pf->i40e_dbg_pf, pf, &i40e_dbg_command_fops); debugfs_create_file("netdev_ops", 0600, pf->i40e_dbg_pf, pf, &i40e_dbg_netdev_ops_fops); } /** * i40e_dbg_pf_exit - clear out the PF's debugfs entries * @pf: the PF that is stopping **/ void i40e_dbg_pf_exit(struct i40e_pf *pf) { debugfs_remove_recursive(pf->i40e_dbg_pf); pf->i40e_dbg_pf = NULL; } /** * i40e_dbg_init - start up debugfs for the driver **/ void i40e_dbg_init(void) { i40e_dbg_root = debugfs_create_dir(i40e_driver_name, NULL); if (IS_ERR(i40e_dbg_root)) pr_info("init of debugfs failed\n"); } /** * i40e_dbg_exit - clean out the driver's debugfs entries **/ void i40e_dbg_exit(void) { debugfs_remove_recursive(i40e_dbg_root); i40e_dbg_root = NULL; } #endif /* CONFIG_DEBUG_FS */
linux-master
drivers/net/ethernet/intel/i40e/i40e_debugfs.c
// SPDX-License-Identifier: GPL-2.0 /* Copyright(c) 2013 - 2018 Intel Corporation. */ #include "i40e_prototype.h" /** * i40e_init_nvm - Initialize NVM function pointers * @hw: pointer to the HW structure * * Setup the function pointers and the NVM info structure. Should be called * once per NVM initialization, e.g. inside the i40e_init_shared_code(). * Please notice that the NVM term is used here (& in all methods covered * in this file) as an equivalent of the FLASH part mapped into the SR. * We are accessing FLASH always thru the Shadow RAM. **/ int i40e_init_nvm(struct i40e_hw *hw) { struct i40e_nvm_info *nvm = &hw->nvm; int ret_code = 0; u32 fla, gens; u8 sr_size; /* The SR size is stored regardless of the nvm programming mode * as the blank mode may be used in the factory line. */ gens = rd32(hw, I40E_GLNVM_GENS); sr_size = ((gens & I40E_GLNVM_GENS_SR_SIZE_MASK) >> I40E_GLNVM_GENS_SR_SIZE_SHIFT); /* Switching to words (sr_size contains power of 2KB) */ nvm->sr_size = BIT(sr_size) * I40E_SR_WORDS_IN_1KB; /* Check if we are in the normal or blank NVM programming mode */ fla = rd32(hw, I40E_GLNVM_FLA); if (fla & I40E_GLNVM_FLA_LOCKED_MASK) { /* Normal programming mode */ /* Max NVM timeout */ nvm->timeout = I40E_MAX_NVM_TIMEOUT; nvm->blank_nvm_mode = false; } else { /* Blank programming mode */ nvm->blank_nvm_mode = true; ret_code = -EIO; i40e_debug(hw, I40E_DEBUG_NVM, "NVM init error: unsupported blank mode.\n"); } return ret_code; } /** * i40e_acquire_nvm - Generic request for acquiring the NVM ownership * @hw: pointer to the HW structure * @access: NVM access type (read or write) * * This function will request NVM ownership for reading * via the proper Admin Command. **/ int i40e_acquire_nvm(struct i40e_hw *hw, enum i40e_aq_resource_access_type access) { u64 gtime, timeout; u64 time_left = 0; int ret_code = 0; if (hw->nvm.blank_nvm_mode) goto i40e_i40e_acquire_nvm_exit; ret_code = i40e_aq_request_resource(hw, I40E_NVM_RESOURCE_ID, access, 0, &time_left, NULL); /* Reading the Global Device Timer */ gtime = rd32(hw, I40E_GLVFGEN_TIMER); /* Store the timeout */ hw->nvm.hw_semaphore_timeout = I40E_MS_TO_GTIME(time_left) + gtime; if (ret_code) i40e_debug(hw, I40E_DEBUG_NVM, "NVM acquire type %d failed time_left=%llu ret=%d aq_err=%d\n", access, time_left, ret_code, hw->aq.asq_last_status); if (ret_code && time_left) { /* Poll until the current NVM owner timeouts */ timeout = I40E_MS_TO_GTIME(I40E_MAX_NVM_TIMEOUT) + gtime; while ((gtime < timeout) && time_left) { usleep_range(10000, 20000); gtime = rd32(hw, I40E_GLVFGEN_TIMER); ret_code = i40e_aq_request_resource(hw, I40E_NVM_RESOURCE_ID, access, 0, &time_left, NULL); if (!ret_code) { hw->nvm.hw_semaphore_timeout = I40E_MS_TO_GTIME(time_left) + gtime; break; } } if (ret_code) { hw->nvm.hw_semaphore_timeout = 0; i40e_debug(hw, I40E_DEBUG_NVM, "NVM acquire timed out, wait %llu ms before trying again. status=%d aq_err=%d\n", time_left, ret_code, hw->aq.asq_last_status); } } i40e_i40e_acquire_nvm_exit: return ret_code; } /** * i40e_release_nvm - Generic request for releasing the NVM ownership * @hw: pointer to the HW structure * * This function will release NVM resource via the proper Admin Command. **/ void i40e_release_nvm(struct i40e_hw *hw) { u32 total_delay = 0; int ret_code = 0; if (hw->nvm.blank_nvm_mode) return; ret_code = i40e_aq_release_resource(hw, I40E_NVM_RESOURCE_ID, 0, NULL); /* there are some rare cases when trying to release the resource * results in an admin Q timeout, so handle them correctly */ while ((ret_code == -EIO) && (total_delay < hw->aq.asq_cmd_timeout)) { usleep_range(1000, 2000); ret_code = i40e_aq_release_resource(hw, I40E_NVM_RESOURCE_ID, 0, NULL); total_delay++; } } /** * i40e_poll_sr_srctl_done_bit - Polls the GLNVM_SRCTL done bit * @hw: pointer to the HW structure * * Polls the SRCTL Shadow RAM register done bit. **/ static int i40e_poll_sr_srctl_done_bit(struct i40e_hw *hw) { int ret_code = -EIO; u32 srctl, wait_cnt; /* Poll the I40E_GLNVM_SRCTL until the done bit is set */ for (wait_cnt = 0; wait_cnt < I40E_SRRD_SRCTL_ATTEMPTS; wait_cnt++) { srctl = rd32(hw, I40E_GLNVM_SRCTL); if (srctl & I40E_GLNVM_SRCTL_DONE_MASK) { ret_code = 0; break; } udelay(5); } if (ret_code == -EIO) i40e_debug(hw, I40E_DEBUG_NVM, "Done bit in GLNVM_SRCTL not set"); return ret_code; } /** * i40e_read_nvm_word_srctl - Reads Shadow RAM via SRCTL register * @hw: pointer to the HW structure * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF) * @data: word read from the Shadow RAM * * Reads one 16 bit word from the Shadow RAM using the GLNVM_SRCTL register. **/ static int i40e_read_nvm_word_srctl(struct i40e_hw *hw, u16 offset, u16 *data) { int ret_code = -EIO; u32 sr_reg; if (offset >= hw->nvm.sr_size) { i40e_debug(hw, I40E_DEBUG_NVM, "NVM read error: offset %d beyond Shadow RAM limit %d\n", offset, hw->nvm.sr_size); ret_code = -EINVAL; goto read_nvm_exit; } /* Poll the done bit first */ ret_code = i40e_poll_sr_srctl_done_bit(hw); if (!ret_code) { /* Write the address and start reading */ sr_reg = ((u32)offset << I40E_GLNVM_SRCTL_ADDR_SHIFT) | BIT(I40E_GLNVM_SRCTL_START_SHIFT); wr32(hw, I40E_GLNVM_SRCTL, sr_reg); /* Poll I40E_GLNVM_SRCTL until the done bit is set */ ret_code = i40e_poll_sr_srctl_done_bit(hw); if (!ret_code) { sr_reg = rd32(hw, I40E_GLNVM_SRDATA); *data = (u16)((sr_reg & I40E_GLNVM_SRDATA_RDDATA_MASK) >> I40E_GLNVM_SRDATA_RDDATA_SHIFT); } } if (ret_code) i40e_debug(hw, I40E_DEBUG_NVM, "NVM read error: Couldn't access Shadow RAM address: 0x%x\n", offset); read_nvm_exit: return ret_code; } /** * i40e_read_nvm_aq - Read Shadow RAM. * @hw: pointer to the HW structure. * @module_pointer: module pointer location in words from the NVM beginning * @offset: offset in words from module start * @words: number of words to read * @data: buffer with words to read to the Shadow RAM * @last_command: tells the AdminQ that this is the last command * * Reads a 16 bit words buffer to the Shadow RAM using the admin command. **/ static int i40e_read_nvm_aq(struct i40e_hw *hw, u8 module_pointer, u32 offset, u16 words, void *data, bool last_command) { struct i40e_asq_cmd_details cmd_details; int ret_code = -EIO; memset(&cmd_details, 0, sizeof(cmd_details)); cmd_details.wb_desc = &hw->nvm_wb_desc; /* Here we are checking the SR limit only for the flat memory model. * We cannot do it for the module-based model, as we did not acquire * the NVM resource yet (we cannot get the module pointer value). * Firmware will check the module-based model. */ if ((offset + words) > hw->nvm.sr_size) i40e_debug(hw, I40E_DEBUG_NVM, "NVM read error: offset %d beyond Shadow RAM limit %d\n", (offset + words), hw->nvm.sr_size); else if (words > I40E_SR_SECTOR_SIZE_IN_WORDS) /* We can read only up to 4KB (one sector), in one AQ write */ i40e_debug(hw, I40E_DEBUG_NVM, "NVM read fail error: tried to read %d words, limit is %d.\n", words, I40E_SR_SECTOR_SIZE_IN_WORDS); else if (((offset + (words - 1)) / I40E_SR_SECTOR_SIZE_IN_WORDS) != (offset / I40E_SR_SECTOR_SIZE_IN_WORDS)) /* A single read cannot spread over two sectors */ i40e_debug(hw, I40E_DEBUG_NVM, "NVM read error: cannot spread over two sectors in a single read offset=%d words=%d\n", offset, words); else ret_code = i40e_aq_read_nvm(hw, module_pointer, 2 * offset, /*bytes*/ 2 * words, /*bytes*/ data, last_command, &cmd_details); return ret_code; } /** * i40e_read_nvm_word_aq - Reads Shadow RAM via AQ * @hw: pointer to the HW structure * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF) * @data: word read from the Shadow RAM * * Reads one 16 bit word from the Shadow RAM using the AdminQ **/ static int i40e_read_nvm_word_aq(struct i40e_hw *hw, u16 offset, u16 *data) { int ret_code = -EIO; ret_code = i40e_read_nvm_aq(hw, 0x0, offset, 1, data, true); *data = le16_to_cpu(*(__le16 *)data); return ret_code; } /** * __i40e_read_nvm_word - Reads nvm word, assumes caller does the locking * @hw: pointer to the HW structure * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF) * @data: word read from the Shadow RAM * * Reads one 16 bit word from the Shadow RAM. * * Do not use this function except in cases where the nvm lock is already * taken via i40e_acquire_nvm(). **/ static int __i40e_read_nvm_word(struct i40e_hw *hw, u16 offset, u16 *data) { if (hw->flags & I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE) return i40e_read_nvm_word_aq(hw, offset, data); return i40e_read_nvm_word_srctl(hw, offset, data); } /** * i40e_read_nvm_word - Reads nvm word and acquire lock if necessary * @hw: pointer to the HW structure * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF) * @data: word read from the Shadow RAM * * Reads one 16 bit word from the Shadow RAM. **/ int i40e_read_nvm_word(struct i40e_hw *hw, u16 offset, u16 *data) { int ret_code = 0; if (hw->flags & I40E_HW_FLAG_NVM_READ_REQUIRES_LOCK) ret_code = i40e_acquire_nvm(hw, I40E_RESOURCE_READ); if (ret_code) return ret_code; ret_code = __i40e_read_nvm_word(hw, offset, data); if (hw->flags & I40E_HW_FLAG_NVM_READ_REQUIRES_LOCK) i40e_release_nvm(hw); return ret_code; } /** * i40e_read_nvm_module_data - Reads NVM Buffer to specified memory location * @hw: Pointer to the HW structure * @module_ptr: Pointer to module in words with respect to NVM beginning * @module_offset: Offset in words from module start * @data_offset: Offset in words from reading data area start * @words_data_size: Words to read from NVM * @data_ptr: Pointer to memory location where resulting buffer will be stored **/ int i40e_read_nvm_module_data(struct i40e_hw *hw, u8 module_ptr, u16 module_offset, u16 data_offset, u16 words_data_size, u16 *data_ptr) { u16 specific_ptr = 0; u16 ptr_value = 0; u32 offset = 0; int status; if (module_ptr != 0) { status = i40e_read_nvm_word(hw, module_ptr, &ptr_value); if (status) { i40e_debug(hw, I40E_DEBUG_ALL, "Reading nvm word failed.Error code: %d.\n", status); return -EIO; } } #define I40E_NVM_INVALID_PTR_VAL 0x7FFF #define I40E_NVM_INVALID_VAL 0xFFFF /* Pointer not initialized */ if (ptr_value == I40E_NVM_INVALID_PTR_VAL || ptr_value == I40E_NVM_INVALID_VAL) { i40e_debug(hw, I40E_DEBUG_ALL, "Pointer not initialized.\n"); return -EINVAL; } /* Check whether the module is in SR mapped area or outside */ if (ptr_value & I40E_PTR_TYPE) { /* Pointer points outside of the Shared RAM mapped area */ i40e_debug(hw, I40E_DEBUG_ALL, "Reading nvm data failed. Pointer points outside of the Shared RAM mapped area.\n"); return -EINVAL; } else { /* Read from the Shadow RAM */ status = i40e_read_nvm_word(hw, ptr_value + module_offset, &specific_ptr); if (status) { i40e_debug(hw, I40E_DEBUG_ALL, "Reading nvm word failed.Error code: %d.\n", status); return -EIO; } offset = ptr_value + module_offset + specific_ptr + data_offset; status = i40e_read_nvm_buffer(hw, offset, &words_data_size, data_ptr); if (status) { i40e_debug(hw, I40E_DEBUG_ALL, "Reading nvm buffer failed.Error code: %d.\n", status); } } return status; } /** * i40e_read_nvm_buffer_srctl - Reads Shadow RAM buffer via SRCTL register * @hw: pointer to the HW structure * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF). * @words: (in) number of words to read; (out) number of words actually read * @data: words read from the Shadow RAM * * Reads 16 bit words (data buffer) from the SR using the i40e_read_nvm_srrd() * method. The buffer read is preceded by the NVM ownership take * and followed by the release. **/ static int i40e_read_nvm_buffer_srctl(struct i40e_hw *hw, u16 offset, u16 *words, u16 *data) { int ret_code = 0; u16 index, word; /* Loop thru the selected region */ for (word = 0; word < *words; word++) { index = offset + word; ret_code = i40e_read_nvm_word_srctl(hw, index, &data[word]); if (ret_code) break; } /* Update the number of words read from the Shadow RAM */ *words = word; return ret_code; } /** * i40e_read_nvm_buffer_aq - Reads Shadow RAM buffer via AQ * @hw: pointer to the HW structure * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF). * @words: (in) number of words to read; (out) number of words actually read * @data: words read from the Shadow RAM * * Reads 16 bit words (data buffer) from the SR using the i40e_read_nvm_aq() * method. The buffer read is preceded by the NVM ownership take * and followed by the release. **/ static int i40e_read_nvm_buffer_aq(struct i40e_hw *hw, u16 offset, u16 *words, u16 *data) { bool last_cmd = false; u16 words_read = 0; u16 read_size; int ret_code; u16 i = 0; do { /* Calculate number of bytes we should read in this step. * FVL AQ do not allow to read more than one page at a time or * to cross page boundaries. */ if (offset % I40E_SR_SECTOR_SIZE_IN_WORDS) read_size = min(*words, (u16)(I40E_SR_SECTOR_SIZE_IN_WORDS - (offset % I40E_SR_SECTOR_SIZE_IN_WORDS))); else read_size = min((*words - words_read), I40E_SR_SECTOR_SIZE_IN_WORDS); /* Check if this is last command, if so set proper flag */ if ((words_read + read_size) >= *words) last_cmd = true; ret_code = i40e_read_nvm_aq(hw, 0x0, offset, read_size, data + words_read, last_cmd); if (ret_code) goto read_nvm_buffer_aq_exit; /* Increment counter for words already read and move offset to * new read location */ words_read += read_size; offset += read_size; } while (words_read < *words); for (i = 0; i < *words; i++) data[i] = le16_to_cpu(((__le16 *)data)[i]); read_nvm_buffer_aq_exit: *words = words_read; return ret_code; } /** * __i40e_read_nvm_buffer - Reads nvm buffer, caller must acquire lock * @hw: pointer to the HW structure * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF). * @words: (in) number of words to read; (out) number of words actually read * @data: words read from the Shadow RAM * * Reads 16 bit words (data buffer) from the SR using the i40e_read_nvm_srrd() * method. **/ static int __i40e_read_nvm_buffer(struct i40e_hw *hw, u16 offset, u16 *words, u16 *data) { if (hw->flags & I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE) return i40e_read_nvm_buffer_aq(hw, offset, words, data); return i40e_read_nvm_buffer_srctl(hw, offset, words, data); } /** * i40e_read_nvm_buffer - Reads Shadow RAM buffer and acquire lock if necessary * @hw: pointer to the HW structure * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF). * @words: (in) number of words to read; (out) number of words actually read * @data: words read from the Shadow RAM * * Reads 16 bit words (data buffer) from the SR using the i40e_read_nvm_srrd() * method. The buffer read is preceded by the NVM ownership take * and followed by the release. **/ int i40e_read_nvm_buffer(struct i40e_hw *hw, u16 offset, u16 *words, u16 *data) { int ret_code = 0; if (hw->flags & I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE) { ret_code = i40e_acquire_nvm(hw, I40E_RESOURCE_READ); if (!ret_code) { ret_code = i40e_read_nvm_buffer_aq(hw, offset, words, data); i40e_release_nvm(hw); } } else { ret_code = i40e_read_nvm_buffer_srctl(hw, offset, words, data); } return ret_code; } /** * i40e_write_nvm_aq - Writes Shadow RAM. * @hw: pointer to the HW structure. * @module_pointer: module pointer location in words from the NVM beginning * @offset: offset in words from module start * @words: number of words to write * @data: buffer with words to write to the Shadow RAM * @last_command: tells the AdminQ that this is the last command * * Writes a 16 bit words buffer to the Shadow RAM using the admin command. **/ static int i40e_write_nvm_aq(struct i40e_hw *hw, u8 module_pointer, u32 offset, u16 words, void *data, bool last_command) { struct i40e_asq_cmd_details cmd_details; int ret_code = -EIO; memset(&cmd_details, 0, sizeof(cmd_details)); cmd_details.wb_desc = &hw->nvm_wb_desc; /* Here we are checking the SR limit only for the flat memory model. * We cannot do it for the module-based model, as we did not acquire * the NVM resource yet (we cannot get the module pointer value). * Firmware will check the module-based model. */ if ((offset + words) > hw->nvm.sr_size) i40e_debug(hw, I40E_DEBUG_NVM, "NVM write error: offset %d beyond Shadow RAM limit %d\n", (offset + words), hw->nvm.sr_size); else if (words > I40E_SR_SECTOR_SIZE_IN_WORDS) /* We can write only up to 4KB (one sector), in one AQ write */ i40e_debug(hw, I40E_DEBUG_NVM, "NVM write fail error: tried to write %d words, limit is %d.\n", words, I40E_SR_SECTOR_SIZE_IN_WORDS); else if (((offset + (words - 1)) / I40E_SR_SECTOR_SIZE_IN_WORDS) != (offset / I40E_SR_SECTOR_SIZE_IN_WORDS)) /* A single write cannot spread over two sectors */ i40e_debug(hw, I40E_DEBUG_NVM, "NVM write error: cannot spread over two sectors in a single write offset=%d words=%d\n", offset, words); else ret_code = i40e_aq_update_nvm(hw, module_pointer, 2 * offset, /*bytes*/ 2 * words, /*bytes*/ data, last_command, 0, &cmd_details); return ret_code; } /** * i40e_calc_nvm_checksum - Calculates and returns the checksum * @hw: pointer to hardware structure * @checksum: pointer to the checksum * * This function calculates SW Checksum that covers the whole 64kB shadow RAM * except the VPD and PCIe ALT Auto-load modules. The structure and size of VPD * is customer specific and unknown. Therefore, this function skips all maximum * possible size of VPD (1kB). **/ static int i40e_calc_nvm_checksum(struct i40e_hw *hw, u16 *checksum) { struct i40e_virt_mem vmem; u16 pcie_alt_module = 0; u16 checksum_local = 0; u16 vpd_module = 0; int ret_code; u16 *data; u16 i = 0; ret_code = i40e_allocate_virt_mem(hw, &vmem, I40E_SR_SECTOR_SIZE_IN_WORDS * sizeof(u16)); if (ret_code) goto i40e_calc_nvm_checksum_exit; data = (u16 *)vmem.va; /* read pointer to VPD area */ ret_code = __i40e_read_nvm_word(hw, I40E_SR_VPD_PTR, &vpd_module); if (ret_code) { ret_code = -EIO; goto i40e_calc_nvm_checksum_exit; } /* read pointer to PCIe Alt Auto-load module */ ret_code = __i40e_read_nvm_word(hw, I40E_SR_PCIE_ALT_AUTO_LOAD_PTR, &pcie_alt_module); if (ret_code) { ret_code = -EIO; goto i40e_calc_nvm_checksum_exit; } /* Calculate SW checksum that covers the whole 64kB shadow RAM * except the VPD and PCIe ALT Auto-load modules */ for (i = 0; i < hw->nvm.sr_size; i++) { /* Read SR page */ if ((i % I40E_SR_SECTOR_SIZE_IN_WORDS) == 0) { u16 words = I40E_SR_SECTOR_SIZE_IN_WORDS; ret_code = __i40e_read_nvm_buffer(hw, i, &words, data); if (ret_code) { ret_code = -EIO; goto i40e_calc_nvm_checksum_exit; } } /* Skip Checksum word */ if (i == I40E_SR_SW_CHECKSUM_WORD) continue; /* Skip VPD module (convert byte size to word count) */ if ((i >= (u32)vpd_module) && (i < ((u32)vpd_module + (I40E_SR_VPD_MODULE_MAX_SIZE / 2)))) { continue; } /* Skip PCIe ALT module (convert byte size to word count) */ if ((i >= (u32)pcie_alt_module) && (i < ((u32)pcie_alt_module + (I40E_SR_PCIE_ALT_MODULE_MAX_SIZE / 2)))) { continue; } checksum_local += data[i % I40E_SR_SECTOR_SIZE_IN_WORDS]; } *checksum = (u16)I40E_SR_SW_CHECKSUM_BASE - checksum_local; i40e_calc_nvm_checksum_exit: i40e_free_virt_mem(hw, &vmem); return ret_code; } /** * i40e_update_nvm_checksum - Updates the NVM checksum * @hw: pointer to hardware structure * * NVM ownership must be acquired before calling this function and released * on ARQ completion event reception by caller. * This function will commit SR to NVM. **/ int i40e_update_nvm_checksum(struct i40e_hw *hw) { __le16 le_sum; int ret_code; u16 checksum; ret_code = i40e_calc_nvm_checksum(hw, &checksum); if (!ret_code) { le_sum = cpu_to_le16(checksum); ret_code = i40e_write_nvm_aq(hw, 0x00, I40E_SR_SW_CHECKSUM_WORD, 1, &le_sum, true); } return ret_code; } /** * i40e_validate_nvm_checksum - Validate EEPROM checksum * @hw: pointer to hardware structure * @checksum: calculated checksum * * Performs checksum calculation and validates the NVM SW checksum. If the * caller does not need checksum, the value can be NULL. **/ int i40e_validate_nvm_checksum(struct i40e_hw *hw, u16 *checksum) { u16 checksum_local = 0; u16 checksum_sr = 0; int ret_code = 0; /* We must acquire the NVM lock in order to correctly synchronize the * NVM accesses across multiple PFs. Without doing so it is possible * for one of the PFs to read invalid data potentially indicating that * the checksum is invalid. */ ret_code = i40e_acquire_nvm(hw, I40E_RESOURCE_READ); if (ret_code) return ret_code; ret_code = i40e_calc_nvm_checksum(hw, &checksum_local); __i40e_read_nvm_word(hw, I40E_SR_SW_CHECKSUM_WORD, &checksum_sr); i40e_release_nvm(hw); if (ret_code) return ret_code; /* Verify read checksum from EEPROM is the same as * calculated checksum */ if (checksum_local != checksum_sr) ret_code = -EIO; /* If the user cares, return the calculated checksum */ if (checksum) *checksum = checksum_local; return ret_code; } static int i40e_nvmupd_state_init(struct i40e_hw *hw, struct i40e_nvm_access *cmd, u8 *bytes, int *perrno); static int i40e_nvmupd_state_reading(struct i40e_hw *hw, struct i40e_nvm_access *cmd, u8 *bytes, int *perrno); static int i40e_nvmupd_state_writing(struct i40e_hw *hw, struct i40e_nvm_access *cmd, u8 *bytes, int *errno); static enum i40e_nvmupd_cmd i40e_nvmupd_validate_command(struct i40e_hw *hw, struct i40e_nvm_access *cmd, int *perrno); static int i40e_nvmupd_nvm_erase(struct i40e_hw *hw, struct i40e_nvm_access *cmd, int *perrno); static int i40e_nvmupd_nvm_write(struct i40e_hw *hw, struct i40e_nvm_access *cmd, u8 *bytes, int *perrno); static int i40e_nvmupd_nvm_read(struct i40e_hw *hw, struct i40e_nvm_access *cmd, u8 *bytes, int *perrno); static int i40e_nvmupd_exec_aq(struct i40e_hw *hw, struct i40e_nvm_access *cmd, u8 *bytes, int *perrno); static int i40e_nvmupd_get_aq_result(struct i40e_hw *hw, struct i40e_nvm_access *cmd, u8 *bytes, int *perrno); static int i40e_nvmupd_get_aq_event(struct i40e_hw *hw, struct i40e_nvm_access *cmd, u8 *bytes, int *perrno); static inline u8 i40e_nvmupd_get_module(u32 val) { return (u8)(val & I40E_NVM_MOD_PNT_MASK); } static inline u8 i40e_nvmupd_get_transaction(u32 val) { return (u8)((val & I40E_NVM_TRANS_MASK) >> I40E_NVM_TRANS_SHIFT); } static inline u8 i40e_nvmupd_get_preservation_flags(u32 val) { return (u8)((val & I40E_NVM_PRESERVATION_FLAGS_MASK) >> I40E_NVM_PRESERVATION_FLAGS_SHIFT); } static const char * const i40e_nvm_update_state_str[] = { "I40E_NVMUPD_INVALID", "I40E_NVMUPD_READ_CON", "I40E_NVMUPD_READ_SNT", "I40E_NVMUPD_READ_LCB", "I40E_NVMUPD_READ_SA", "I40E_NVMUPD_WRITE_ERA", "I40E_NVMUPD_WRITE_CON", "I40E_NVMUPD_WRITE_SNT", "I40E_NVMUPD_WRITE_LCB", "I40E_NVMUPD_WRITE_SA", "I40E_NVMUPD_CSUM_CON", "I40E_NVMUPD_CSUM_SA", "I40E_NVMUPD_CSUM_LCB", "I40E_NVMUPD_STATUS", "I40E_NVMUPD_EXEC_AQ", "I40E_NVMUPD_GET_AQ_RESULT", "I40E_NVMUPD_GET_AQ_EVENT", }; /** * i40e_nvmupd_command - Process an NVM update command * @hw: pointer to hardware structure * @cmd: pointer to nvm update command * @bytes: pointer to the data buffer * @perrno: pointer to return error code * * Dispatches command depending on what update state is current **/ int i40e_nvmupd_command(struct i40e_hw *hw, struct i40e_nvm_access *cmd, u8 *bytes, int *perrno) { enum i40e_nvmupd_cmd upd_cmd; int status; /* assume success */ *perrno = 0; /* early check for status command and debug msgs */ upd_cmd = i40e_nvmupd_validate_command(hw, cmd, perrno); i40e_debug(hw, I40E_DEBUG_NVM, "%s state %d nvm_release_on_hold %d opc 0x%04x cmd 0x%08x config 0x%08x offset 0x%08x data_size 0x%08x\n", i40e_nvm_update_state_str[upd_cmd], hw->nvmupd_state, hw->nvm_release_on_done, hw->nvm_wait_opcode, cmd->command, cmd->config, cmd->offset, cmd->data_size); if (upd_cmd == I40E_NVMUPD_INVALID) { *perrno = -EFAULT; i40e_debug(hw, I40E_DEBUG_NVM, "i40e_nvmupd_validate_command returns %d errno %d\n", upd_cmd, *perrno); } /* a status request returns immediately rather than * going into the state machine */ if (upd_cmd == I40E_NVMUPD_STATUS) { if (!cmd->data_size) { *perrno = -EFAULT; return -EINVAL; } bytes[0] = hw->nvmupd_state; if (cmd->data_size >= 4) { bytes[1] = 0; *((u16 *)&bytes[2]) = hw->nvm_wait_opcode; } /* Clear error status on read */ if (hw->nvmupd_state == I40E_NVMUPD_STATE_ERROR) hw->nvmupd_state = I40E_NVMUPD_STATE_INIT; return 0; } /* Clear status even it is not read and log */ if (hw->nvmupd_state == I40E_NVMUPD_STATE_ERROR) { i40e_debug(hw, I40E_DEBUG_NVM, "Clearing I40E_NVMUPD_STATE_ERROR state without reading\n"); hw->nvmupd_state = I40E_NVMUPD_STATE_INIT; } /* Acquire lock to prevent race condition where adminq_task * can execute after i40e_nvmupd_nvm_read/write but before state * variables (nvm_wait_opcode, nvm_release_on_done) are updated. * * During NVMUpdate, it is observed that lock could be held for * ~5ms for most commands. However lock is held for ~60ms for * NVMUPD_CSUM_LCB command. */ mutex_lock(&hw->aq.arq_mutex); switch (hw->nvmupd_state) { case I40E_NVMUPD_STATE_INIT: status = i40e_nvmupd_state_init(hw, cmd, bytes, perrno); break; case I40E_NVMUPD_STATE_READING: status = i40e_nvmupd_state_reading(hw, cmd, bytes, perrno); break; case I40E_NVMUPD_STATE_WRITING: status = i40e_nvmupd_state_writing(hw, cmd, bytes, perrno); break; case I40E_NVMUPD_STATE_INIT_WAIT: case I40E_NVMUPD_STATE_WRITE_WAIT: /* if we need to stop waiting for an event, clear * the wait info and return before doing anything else */ if (cmd->offset == 0xffff) { i40e_nvmupd_clear_wait_state(hw); status = 0; break; } status = -EBUSY; *perrno = -EBUSY; break; default: /* invalid state, should never happen */ i40e_debug(hw, I40E_DEBUG_NVM, "NVMUPD: no such state %d\n", hw->nvmupd_state); status = -EOPNOTSUPP; *perrno = -ESRCH; break; } mutex_unlock(&hw->aq.arq_mutex); return status; } /** * i40e_nvmupd_state_init - Handle NVM update state Init * @hw: pointer to hardware structure * @cmd: pointer to nvm update command buffer * @bytes: pointer to the data buffer * @perrno: pointer to return error code * * Process legitimate commands of the Init state and conditionally set next * state. Reject all other commands. **/ static int i40e_nvmupd_state_init(struct i40e_hw *hw, struct i40e_nvm_access *cmd, u8 *bytes, int *perrno) { enum i40e_nvmupd_cmd upd_cmd; int status = 0; upd_cmd = i40e_nvmupd_validate_command(hw, cmd, perrno); switch (upd_cmd) { case I40E_NVMUPD_READ_SA: status = i40e_acquire_nvm(hw, I40E_RESOURCE_READ); if (status) { *perrno = i40e_aq_rc_to_posix(status, hw->aq.asq_last_status); } else { status = i40e_nvmupd_nvm_read(hw, cmd, bytes, perrno); i40e_release_nvm(hw); } break; case I40E_NVMUPD_READ_SNT: status = i40e_acquire_nvm(hw, I40E_RESOURCE_READ); if (status) { *perrno = i40e_aq_rc_to_posix(status, hw->aq.asq_last_status); } else { status = i40e_nvmupd_nvm_read(hw, cmd, bytes, perrno); if (status) i40e_release_nvm(hw); else hw->nvmupd_state = I40E_NVMUPD_STATE_READING; } break; case I40E_NVMUPD_WRITE_ERA: status = i40e_acquire_nvm(hw, I40E_RESOURCE_WRITE); if (status) { *perrno = i40e_aq_rc_to_posix(status, hw->aq.asq_last_status); } else { status = i40e_nvmupd_nvm_erase(hw, cmd, perrno); if (status) { i40e_release_nvm(hw); } else { hw->nvm_release_on_done = true; hw->nvm_wait_opcode = i40e_aqc_opc_nvm_erase; hw->nvmupd_state = I40E_NVMUPD_STATE_INIT_WAIT; } } break; case I40E_NVMUPD_WRITE_SA: status = i40e_acquire_nvm(hw, I40E_RESOURCE_WRITE); if (status) { *perrno = i40e_aq_rc_to_posix(status, hw->aq.asq_last_status); } else { status = i40e_nvmupd_nvm_write(hw, cmd, bytes, perrno); if (status) { i40e_release_nvm(hw); } else { hw->nvm_release_on_done = true; hw->nvm_wait_opcode = i40e_aqc_opc_nvm_update; hw->nvmupd_state = I40E_NVMUPD_STATE_INIT_WAIT; } } break; case I40E_NVMUPD_WRITE_SNT: status = i40e_acquire_nvm(hw, I40E_RESOURCE_WRITE); if (status) { *perrno = i40e_aq_rc_to_posix(status, hw->aq.asq_last_status); } else { status = i40e_nvmupd_nvm_write(hw, cmd, bytes, perrno); if (status) { i40e_release_nvm(hw); } else { hw->nvm_wait_opcode = i40e_aqc_opc_nvm_update; hw->nvmupd_state = I40E_NVMUPD_STATE_WRITE_WAIT; } } break; case I40E_NVMUPD_CSUM_SA: status = i40e_acquire_nvm(hw, I40E_RESOURCE_WRITE); if (status) { *perrno = i40e_aq_rc_to_posix(status, hw->aq.asq_last_status); } else { status = i40e_update_nvm_checksum(hw); if (status) { *perrno = hw->aq.asq_last_status ? i40e_aq_rc_to_posix(status, hw->aq.asq_last_status) : -EIO; i40e_release_nvm(hw); } else { hw->nvm_release_on_done = true; hw->nvm_wait_opcode = i40e_aqc_opc_nvm_update; hw->nvmupd_state = I40E_NVMUPD_STATE_INIT_WAIT; } } break; case I40E_NVMUPD_EXEC_AQ: status = i40e_nvmupd_exec_aq(hw, cmd, bytes, perrno); break; case I40E_NVMUPD_GET_AQ_RESULT: status = i40e_nvmupd_get_aq_result(hw, cmd, bytes, perrno); break; case I40E_NVMUPD_GET_AQ_EVENT: status = i40e_nvmupd_get_aq_event(hw, cmd, bytes, perrno); break; default: i40e_debug(hw, I40E_DEBUG_NVM, "NVMUPD: bad cmd %s in init state\n", i40e_nvm_update_state_str[upd_cmd]); status = -EIO; *perrno = -ESRCH; break; } return status; } /** * i40e_nvmupd_state_reading - Handle NVM update state Reading * @hw: pointer to hardware structure * @cmd: pointer to nvm update command buffer * @bytes: pointer to the data buffer * @perrno: pointer to return error code * * NVM ownership is already held. Process legitimate commands and set any * change in state; reject all other commands. **/ static int i40e_nvmupd_state_reading(struct i40e_hw *hw, struct i40e_nvm_access *cmd, u8 *bytes, int *perrno) { enum i40e_nvmupd_cmd upd_cmd; int status = 0; upd_cmd = i40e_nvmupd_validate_command(hw, cmd, perrno); switch (upd_cmd) { case I40E_NVMUPD_READ_SA: case I40E_NVMUPD_READ_CON: status = i40e_nvmupd_nvm_read(hw, cmd, bytes, perrno); break; case I40E_NVMUPD_READ_LCB: status = i40e_nvmupd_nvm_read(hw, cmd, bytes, perrno); i40e_release_nvm(hw); hw->nvmupd_state = I40E_NVMUPD_STATE_INIT; break; default: i40e_debug(hw, I40E_DEBUG_NVM, "NVMUPD: bad cmd %s in reading state.\n", i40e_nvm_update_state_str[upd_cmd]); status = -EOPNOTSUPP; *perrno = -ESRCH; break; } return status; } /** * i40e_nvmupd_state_writing - Handle NVM update state Writing * @hw: pointer to hardware structure * @cmd: pointer to nvm update command buffer * @bytes: pointer to the data buffer * @perrno: pointer to return error code * * NVM ownership is already held. Process legitimate commands and set any * change in state; reject all other commands **/ static int i40e_nvmupd_state_writing(struct i40e_hw *hw, struct i40e_nvm_access *cmd, u8 *bytes, int *perrno) { enum i40e_nvmupd_cmd upd_cmd; bool retry_attempt = false; int status = 0; upd_cmd = i40e_nvmupd_validate_command(hw, cmd, perrno); retry: switch (upd_cmd) { case I40E_NVMUPD_WRITE_CON: status = i40e_nvmupd_nvm_write(hw, cmd, bytes, perrno); if (!status) { hw->nvm_wait_opcode = i40e_aqc_opc_nvm_update; hw->nvmupd_state = I40E_NVMUPD_STATE_WRITE_WAIT; } break; case I40E_NVMUPD_WRITE_LCB: status = i40e_nvmupd_nvm_write(hw, cmd, bytes, perrno); if (status) { *perrno = hw->aq.asq_last_status ? i40e_aq_rc_to_posix(status, hw->aq.asq_last_status) : -EIO; hw->nvmupd_state = I40E_NVMUPD_STATE_INIT; } else { hw->nvm_release_on_done = true; hw->nvm_wait_opcode = i40e_aqc_opc_nvm_update; hw->nvmupd_state = I40E_NVMUPD_STATE_INIT_WAIT; } break; case I40E_NVMUPD_CSUM_CON: /* Assumes the caller has acquired the nvm */ status = i40e_update_nvm_checksum(hw); if (status) { *perrno = hw->aq.asq_last_status ? i40e_aq_rc_to_posix(status, hw->aq.asq_last_status) : -EIO; hw->nvmupd_state = I40E_NVMUPD_STATE_INIT; } else { hw->nvm_wait_opcode = i40e_aqc_opc_nvm_update; hw->nvmupd_state = I40E_NVMUPD_STATE_WRITE_WAIT; } break; case I40E_NVMUPD_CSUM_LCB: /* Assumes the caller has acquired the nvm */ status = i40e_update_nvm_checksum(hw); if (status) { *perrno = hw->aq.asq_last_status ? i40e_aq_rc_to_posix(status, hw->aq.asq_last_status) : -EIO; hw->nvmupd_state = I40E_NVMUPD_STATE_INIT; } else { hw->nvm_release_on_done = true; hw->nvm_wait_opcode = i40e_aqc_opc_nvm_update; hw->nvmupd_state = I40E_NVMUPD_STATE_INIT_WAIT; } break; default: i40e_debug(hw, I40E_DEBUG_NVM, "NVMUPD: bad cmd %s in writing state.\n", i40e_nvm_update_state_str[upd_cmd]); status = -EOPNOTSUPP; *perrno = -ESRCH; break; } /* In some circumstances, a multi-write transaction takes longer * than the default 3 minute timeout on the write semaphore. If * the write failed with an EBUSY status, this is likely the problem, * so here we try to reacquire the semaphore then retry the write. * We only do one retry, then give up. */ if (status && (hw->aq.asq_last_status == I40E_AQ_RC_EBUSY) && !retry_attempt) { u32 old_asq_status = hw->aq.asq_last_status; int old_status = status; u32 gtime; gtime = rd32(hw, I40E_GLVFGEN_TIMER); if (gtime >= hw->nvm.hw_semaphore_timeout) { i40e_debug(hw, I40E_DEBUG_ALL, "NVMUPD: write semaphore expired (%d >= %lld), retrying\n", gtime, hw->nvm.hw_semaphore_timeout); i40e_release_nvm(hw); status = i40e_acquire_nvm(hw, I40E_RESOURCE_WRITE); if (status) { i40e_debug(hw, I40E_DEBUG_ALL, "NVMUPD: write semaphore reacquire failed aq_err = %d\n", hw->aq.asq_last_status); status = old_status; hw->aq.asq_last_status = old_asq_status; } else { retry_attempt = true; goto retry; } } } return status; } /** * i40e_nvmupd_clear_wait_state - clear wait state on hw * @hw: pointer to the hardware structure **/ void i40e_nvmupd_clear_wait_state(struct i40e_hw *hw) { i40e_debug(hw, I40E_DEBUG_NVM, "NVMUPD: clearing wait on opcode 0x%04x\n", hw->nvm_wait_opcode); if (hw->nvm_release_on_done) { i40e_release_nvm(hw); hw->nvm_release_on_done = false; } hw->nvm_wait_opcode = 0; if (hw->aq.arq_last_status) { hw->nvmupd_state = I40E_NVMUPD_STATE_ERROR; return; } switch (hw->nvmupd_state) { case I40E_NVMUPD_STATE_INIT_WAIT: hw->nvmupd_state = I40E_NVMUPD_STATE_INIT; break; case I40E_NVMUPD_STATE_WRITE_WAIT: hw->nvmupd_state = I40E_NVMUPD_STATE_WRITING; break; default: break; } } /** * i40e_nvmupd_check_wait_event - handle NVM update operation events * @hw: pointer to the hardware structure * @opcode: the event that just happened * @desc: AdminQ descriptor **/ void i40e_nvmupd_check_wait_event(struct i40e_hw *hw, u16 opcode, struct i40e_aq_desc *desc) { u32 aq_desc_len = sizeof(struct i40e_aq_desc); if (opcode == hw->nvm_wait_opcode) { memcpy(&hw->nvm_aq_event_desc, desc, aq_desc_len); i40e_nvmupd_clear_wait_state(hw); } } /** * i40e_nvmupd_validate_command - Validate given command * @hw: pointer to hardware structure * @cmd: pointer to nvm update command buffer * @perrno: pointer to return error code * * Return one of the valid command types or I40E_NVMUPD_INVALID **/ static enum i40e_nvmupd_cmd i40e_nvmupd_validate_command(struct i40e_hw *hw, struct i40e_nvm_access *cmd, int *perrno) { enum i40e_nvmupd_cmd upd_cmd; u8 module, transaction; /* anything that doesn't match a recognized case is an error */ upd_cmd = I40E_NVMUPD_INVALID; transaction = i40e_nvmupd_get_transaction(cmd->config); module = i40e_nvmupd_get_module(cmd->config); /* limits on data size */ if ((cmd->data_size < 1) || (cmd->data_size > I40E_NVMUPD_MAX_DATA)) { i40e_debug(hw, I40E_DEBUG_NVM, "i40e_nvmupd_validate_command data_size %d\n", cmd->data_size); *perrno = -EFAULT; return I40E_NVMUPD_INVALID; } switch (cmd->command) { case I40E_NVM_READ: switch (transaction) { case I40E_NVM_CON: upd_cmd = I40E_NVMUPD_READ_CON; break; case I40E_NVM_SNT: upd_cmd = I40E_NVMUPD_READ_SNT; break; case I40E_NVM_LCB: upd_cmd = I40E_NVMUPD_READ_LCB; break; case I40E_NVM_SA: upd_cmd = I40E_NVMUPD_READ_SA; break; case I40E_NVM_EXEC: if (module == 0xf) upd_cmd = I40E_NVMUPD_STATUS; else if (module == 0) upd_cmd = I40E_NVMUPD_GET_AQ_RESULT; break; case I40E_NVM_AQE: upd_cmd = I40E_NVMUPD_GET_AQ_EVENT; break; } break; case I40E_NVM_WRITE: switch (transaction) { case I40E_NVM_CON: upd_cmd = I40E_NVMUPD_WRITE_CON; break; case I40E_NVM_SNT: upd_cmd = I40E_NVMUPD_WRITE_SNT; break; case I40E_NVM_LCB: upd_cmd = I40E_NVMUPD_WRITE_LCB; break; case I40E_NVM_SA: upd_cmd = I40E_NVMUPD_WRITE_SA; break; case I40E_NVM_ERA: upd_cmd = I40E_NVMUPD_WRITE_ERA; break; case I40E_NVM_CSUM: upd_cmd = I40E_NVMUPD_CSUM_CON; break; case (I40E_NVM_CSUM|I40E_NVM_SA): upd_cmd = I40E_NVMUPD_CSUM_SA; break; case (I40E_NVM_CSUM|I40E_NVM_LCB): upd_cmd = I40E_NVMUPD_CSUM_LCB; break; case I40E_NVM_EXEC: if (module == 0) upd_cmd = I40E_NVMUPD_EXEC_AQ; break; } break; } return upd_cmd; } /** * i40e_nvmupd_exec_aq - Run an AQ command * @hw: pointer to hardware structure * @cmd: pointer to nvm update command buffer * @bytes: pointer to the data buffer * @perrno: pointer to return error code * * cmd structure contains identifiers and data buffer **/ static int i40e_nvmupd_exec_aq(struct i40e_hw *hw, struct i40e_nvm_access *cmd, u8 *bytes, int *perrno) { struct i40e_asq_cmd_details cmd_details; struct i40e_aq_desc *aq_desc; u32 buff_size = 0; u8 *buff = NULL; u32 aq_desc_len; u32 aq_data_len; int status; i40e_debug(hw, I40E_DEBUG_NVM, "NVMUPD: %s\n", __func__); if (cmd->offset == 0xffff) return 0; memset(&cmd_details, 0, sizeof(cmd_details)); cmd_details.wb_desc = &hw->nvm_wb_desc; aq_desc_len = sizeof(struct i40e_aq_desc); memset(&hw->nvm_wb_desc, 0, aq_desc_len); /* get the aq descriptor */ if (cmd->data_size < aq_desc_len) { i40e_debug(hw, I40E_DEBUG_NVM, "NVMUPD: not enough aq desc bytes for exec, size %d < %d\n", cmd->data_size, aq_desc_len); *perrno = -EINVAL; return -EINVAL; } aq_desc = (struct i40e_aq_desc *)bytes; /* if data buffer needed, make sure it's ready */ aq_data_len = cmd->data_size - aq_desc_len; buff_size = max_t(u32, aq_data_len, le16_to_cpu(aq_desc->datalen)); if (buff_size) { if (!hw->nvm_buff.va) { status = i40e_allocate_virt_mem(hw, &hw->nvm_buff, hw->aq.asq_buf_size); if (status) i40e_debug(hw, I40E_DEBUG_NVM, "NVMUPD: i40e_allocate_virt_mem for exec buff failed, %d\n", status); } if (hw->nvm_buff.va) { buff = hw->nvm_buff.va; memcpy(buff, &bytes[aq_desc_len], aq_data_len); } } if (cmd->offset) memset(&hw->nvm_aq_event_desc, 0, aq_desc_len); /* and away we go! */ status = i40e_asq_send_command(hw, aq_desc, buff, buff_size, &cmd_details); if (status) { i40e_debug(hw, I40E_DEBUG_NVM, "%s err %pe aq_err %s\n", __func__, ERR_PTR(status), i40e_aq_str(hw, hw->aq.asq_last_status)); *perrno = i40e_aq_rc_to_posix(status, hw->aq.asq_last_status); return status; } /* should we wait for a followup event? */ if (cmd->offset) { hw->nvm_wait_opcode = cmd->offset; hw->nvmupd_state = I40E_NVMUPD_STATE_INIT_WAIT; } return status; } /** * i40e_nvmupd_get_aq_result - Get the results from the previous exec_aq * @hw: pointer to hardware structure * @cmd: pointer to nvm update command buffer * @bytes: pointer to the data buffer * @perrno: pointer to return error code * * cmd structure contains identifiers and data buffer **/ static int i40e_nvmupd_get_aq_result(struct i40e_hw *hw, struct i40e_nvm_access *cmd, u8 *bytes, int *perrno) { u32 aq_total_len; u32 aq_desc_len; int remainder; u8 *buff; i40e_debug(hw, I40E_DEBUG_NVM, "NVMUPD: %s\n", __func__); aq_desc_len = sizeof(struct i40e_aq_desc); aq_total_len = aq_desc_len + le16_to_cpu(hw->nvm_wb_desc.datalen); /* check offset range */ if (cmd->offset > aq_total_len) { i40e_debug(hw, I40E_DEBUG_NVM, "%s: offset too big %d > %d\n", __func__, cmd->offset, aq_total_len); *perrno = -EINVAL; return -EINVAL; } /* check copylength range */ if (cmd->data_size > (aq_total_len - cmd->offset)) { int new_len = aq_total_len - cmd->offset; i40e_debug(hw, I40E_DEBUG_NVM, "%s: copy length %d too big, trimming to %d\n", __func__, cmd->data_size, new_len); cmd->data_size = new_len; } remainder = cmd->data_size; if (cmd->offset < aq_desc_len) { u32 len = aq_desc_len - cmd->offset; len = min(len, cmd->data_size); i40e_debug(hw, I40E_DEBUG_NVM, "%s: aq_desc bytes %d to %d\n", __func__, cmd->offset, cmd->offset + len); buff = ((u8 *)&hw->nvm_wb_desc) + cmd->offset; memcpy(bytes, buff, len); bytes += len; remainder -= len; buff = hw->nvm_buff.va; } else { buff = hw->nvm_buff.va + (cmd->offset - aq_desc_len); } if (remainder > 0) { int start_byte = buff - (u8 *)hw->nvm_buff.va; i40e_debug(hw, I40E_DEBUG_NVM, "%s: databuf bytes %d to %d\n", __func__, start_byte, start_byte + remainder); memcpy(bytes, buff, remainder); } return 0; } /** * i40e_nvmupd_get_aq_event - Get the Admin Queue event from previous exec_aq * @hw: pointer to hardware structure * @cmd: pointer to nvm update command buffer * @bytes: pointer to the data buffer * @perrno: pointer to return error code * * cmd structure contains identifiers and data buffer **/ static int i40e_nvmupd_get_aq_event(struct i40e_hw *hw, struct i40e_nvm_access *cmd, u8 *bytes, int *perrno) { u32 aq_total_len; u32 aq_desc_len; i40e_debug(hw, I40E_DEBUG_NVM, "NVMUPD: %s\n", __func__); aq_desc_len = sizeof(struct i40e_aq_desc); aq_total_len = aq_desc_len + le16_to_cpu(hw->nvm_aq_event_desc.datalen); /* check copylength range */ if (cmd->data_size > aq_total_len) { i40e_debug(hw, I40E_DEBUG_NVM, "%s: copy length %d too big, trimming to %d\n", __func__, cmd->data_size, aq_total_len); cmd->data_size = aq_total_len; } memcpy(bytes, &hw->nvm_aq_event_desc, cmd->data_size); return 0; } /** * i40e_nvmupd_nvm_read - Read NVM * @hw: pointer to hardware structure * @cmd: pointer to nvm update command buffer * @bytes: pointer to the data buffer * @perrno: pointer to return error code * * cmd structure contains identifiers and data buffer **/ static int i40e_nvmupd_nvm_read(struct i40e_hw *hw, struct i40e_nvm_access *cmd, u8 *bytes, int *perrno) { struct i40e_asq_cmd_details cmd_details; u8 module, transaction; int status; bool last; transaction = i40e_nvmupd_get_transaction(cmd->config); module = i40e_nvmupd_get_module(cmd->config); last = (transaction == I40E_NVM_LCB) || (transaction == I40E_NVM_SA); memset(&cmd_details, 0, sizeof(cmd_details)); cmd_details.wb_desc = &hw->nvm_wb_desc; status = i40e_aq_read_nvm(hw, module, cmd->offset, (u16)cmd->data_size, bytes, last, &cmd_details); if (status) { i40e_debug(hw, I40E_DEBUG_NVM, "i40e_nvmupd_nvm_read mod 0x%x off 0x%x len 0x%x\n", module, cmd->offset, cmd->data_size); i40e_debug(hw, I40E_DEBUG_NVM, "i40e_nvmupd_nvm_read status %d aq %d\n", status, hw->aq.asq_last_status); *perrno = i40e_aq_rc_to_posix(status, hw->aq.asq_last_status); } return status; } /** * i40e_nvmupd_nvm_erase - Erase an NVM module * @hw: pointer to hardware structure * @cmd: pointer to nvm update command buffer * @perrno: pointer to return error code * * module, offset, data_size and data are in cmd structure **/ static int i40e_nvmupd_nvm_erase(struct i40e_hw *hw, struct i40e_nvm_access *cmd, int *perrno) { struct i40e_asq_cmd_details cmd_details; u8 module, transaction; int status = 0; bool last; transaction = i40e_nvmupd_get_transaction(cmd->config); module = i40e_nvmupd_get_module(cmd->config); last = (transaction & I40E_NVM_LCB); memset(&cmd_details, 0, sizeof(cmd_details)); cmd_details.wb_desc = &hw->nvm_wb_desc; status = i40e_aq_erase_nvm(hw, module, cmd->offset, (u16)cmd->data_size, last, &cmd_details); if (status) { i40e_debug(hw, I40E_DEBUG_NVM, "i40e_nvmupd_nvm_erase mod 0x%x off 0x%x len 0x%x\n", module, cmd->offset, cmd->data_size); i40e_debug(hw, I40E_DEBUG_NVM, "i40e_nvmupd_nvm_erase status %d aq %d\n", status, hw->aq.asq_last_status); *perrno = i40e_aq_rc_to_posix(status, hw->aq.asq_last_status); } return status; } /** * i40e_nvmupd_nvm_write - Write NVM * @hw: pointer to hardware structure * @cmd: pointer to nvm update command buffer * @bytes: pointer to the data buffer * @perrno: pointer to return error code * * module, offset, data_size and data are in cmd structure **/ static int i40e_nvmupd_nvm_write(struct i40e_hw *hw, struct i40e_nvm_access *cmd, u8 *bytes, int *perrno) { struct i40e_asq_cmd_details cmd_details; u8 module, transaction; u8 preservation_flags; int status = 0; bool last; transaction = i40e_nvmupd_get_transaction(cmd->config); module = i40e_nvmupd_get_module(cmd->config); last = (transaction & I40E_NVM_LCB); preservation_flags = i40e_nvmupd_get_preservation_flags(cmd->config); memset(&cmd_details, 0, sizeof(cmd_details)); cmd_details.wb_desc = &hw->nvm_wb_desc; status = i40e_aq_update_nvm(hw, module, cmd->offset, (u16)cmd->data_size, bytes, last, preservation_flags, &cmd_details); if (status) { i40e_debug(hw, I40E_DEBUG_NVM, "i40e_nvmupd_nvm_write mod 0x%x off 0x%x len 0x%x\n", module, cmd->offset, cmd->data_size); i40e_debug(hw, I40E_DEBUG_NVM, "i40e_nvmupd_nvm_write status %d aq %d\n", status, hw->aq.asq_last_status); *perrno = i40e_aq_rc_to_posix(status, hw->aq.asq_last_status); } return status; }
linux-master
drivers/net/ethernet/intel/i40e/i40e_nvm.c
// SPDX-License-Identifier: GPL-2.0 /* Copyright(c) 2013 - 2018 Intel Corporation. */ #include <linux/prefetch.h> #include <linux/bpf_trace.h> #include <net/mpls.h> #include <net/xdp.h> #include "i40e.h" #include "i40e_trace.h" #include "i40e_prototype.h" #include "i40e_txrx_common.h" #include "i40e_xsk.h" #define I40E_TXD_CMD (I40E_TX_DESC_CMD_EOP | I40E_TX_DESC_CMD_RS) /** * i40e_fdir - Generate a Flow Director descriptor based on fdata * @tx_ring: Tx ring to send buffer on * @fdata: Flow director filter data * @add: Indicate if we are adding a rule or deleting one * **/ static void i40e_fdir(struct i40e_ring *tx_ring, struct i40e_fdir_filter *fdata, bool add) { struct i40e_filter_program_desc *fdir_desc; struct i40e_pf *pf = tx_ring->vsi->back; u32 flex_ptype, dtype_cmd; u16 i; /* grab the next descriptor */ i = tx_ring->next_to_use; fdir_desc = I40E_TX_FDIRDESC(tx_ring, i); i++; tx_ring->next_to_use = (i < tx_ring->count) ? i : 0; flex_ptype = I40E_TXD_FLTR_QW0_QINDEX_MASK & (fdata->q_index << I40E_TXD_FLTR_QW0_QINDEX_SHIFT); flex_ptype |= I40E_TXD_FLTR_QW0_FLEXOFF_MASK & (fdata->flex_off << I40E_TXD_FLTR_QW0_FLEXOFF_SHIFT); flex_ptype |= I40E_TXD_FLTR_QW0_PCTYPE_MASK & (fdata->pctype << I40E_TXD_FLTR_QW0_PCTYPE_SHIFT); /* Use LAN VSI Id if not programmed by user */ flex_ptype |= I40E_TXD_FLTR_QW0_DEST_VSI_MASK & ((u32)(fdata->dest_vsi ? : pf->vsi[pf->lan_vsi]->id) << I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT); dtype_cmd = I40E_TX_DESC_DTYPE_FILTER_PROG; dtype_cmd |= add ? I40E_FILTER_PROGRAM_DESC_PCMD_ADD_UPDATE << I40E_TXD_FLTR_QW1_PCMD_SHIFT : I40E_FILTER_PROGRAM_DESC_PCMD_REMOVE << I40E_TXD_FLTR_QW1_PCMD_SHIFT; dtype_cmd |= I40E_TXD_FLTR_QW1_DEST_MASK & (fdata->dest_ctl << I40E_TXD_FLTR_QW1_DEST_SHIFT); dtype_cmd |= I40E_TXD_FLTR_QW1_FD_STATUS_MASK & (fdata->fd_status << I40E_TXD_FLTR_QW1_FD_STATUS_SHIFT); if (fdata->cnt_index) { dtype_cmd |= I40E_TXD_FLTR_QW1_CNT_ENA_MASK; dtype_cmd |= I40E_TXD_FLTR_QW1_CNTINDEX_MASK & ((u32)fdata->cnt_index << I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT); } fdir_desc->qindex_flex_ptype_vsi = cpu_to_le32(flex_ptype); fdir_desc->rsvd = cpu_to_le32(0); fdir_desc->dtype_cmd_cntindex = cpu_to_le32(dtype_cmd); fdir_desc->fd_id = cpu_to_le32(fdata->fd_id); } #define I40E_FD_CLEAN_DELAY 10 /** * i40e_program_fdir_filter - Program a Flow Director filter * @fdir_data: Packet data that will be filter parameters * @raw_packet: the pre-allocated packet buffer for FDir * @pf: The PF pointer * @add: True for add/update, False for remove **/ static int i40e_program_fdir_filter(struct i40e_fdir_filter *fdir_data, u8 *raw_packet, struct i40e_pf *pf, bool add) { struct i40e_tx_buffer *tx_buf, *first; struct i40e_tx_desc *tx_desc; struct i40e_ring *tx_ring; struct i40e_vsi *vsi; struct device *dev; dma_addr_t dma; u32 td_cmd = 0; u16 i; /* find existing FDIR VSI */ vsi = i40e_find_vsi_by_type(pf, I40E_VSI_FDIR); if (!vsi) return -ENOENT; tx_ring = vsi->tx_rings[0]; dev = tx_ring->dev; /* we need two descriptors to add/del a filter and we can wait */ for (i = I40E_FD_CLEAN_DELAY; I40E_DESC_UNUSED(tx_ring) < 2; i--) { if (!i) return -EAGAIN; msleep_interruptible(1); } dma = dma_map_single(dev, raw_packet, I40E_FDIR_MAX_RAW_PACKET_SIZE, DMA_TO_DEVICE); if (dma_mapping_error(dev, dma)) goto dma_fail; /* grab the next descriptor */ i = tx_ring->next_to_use; first = &tx_ring->tx_bi[i]; i40e_fdir(tx_ring, fdir_data, add); /* Now program a dummy descriptor */ i = tx_ring->next_to_use; tx_desc = I40E_TX_DESC(tx_ring, i); tx_buf = &tx_ring->tx_bi[i]; tx_ring->next_to_use = ((i + 1) < tx_ring->count) ? i + 1 : 0; memset(tx_buf, 0, sizeof(struct i40e_tx_buffer)); /* record length, and DMA address */ dma_unmap_len_set(tx_buf, len, I40E_FDIR_MAX_RAW_PACKET_SIZE); dma_unmap_addr_set(tx_buf, dma, dma); tx_desc->buffer_addr = cpu_to_le64(dma); td_cmd = I40E_TXD_CMD | I40E_TX_DESC_CMD_DUMMY; tx_buf->tx_flags = I40E_TX_FLAGS_FD_SB; tx_buf->raw_buf = (void *)raw_packet; tx_desc->cmd_type_offset_bsz = build_ctob(td_cmd, 0, I40E_FDIR_MAX_RAW_PACKET_SIZE, 0); /* Force memory writes to complete before letting h/w * know there are new descriptors to fetch. */ wmb(); /* Mark the data descriptor to be watched */ first->next_to_watch = tx_desc; writel(tx_ring->next_to_use, tx_ring->tail); return 0; dma_fail: return -1; } /** * i40e_create_dummy_packet - Constructs dummy packet for HW * @dummy_packet: preallocated space for dummy packet * @ipv4: is layer 3 packet of version 4 or 6 * @l4proto: next level protocol used in data portion of l3 * @data: filter data * * Returns address of layer 4 protocol dummy packet. **/ static char *i40e_create_dummy_packet(u8 *dummy_packet, bool ipv4, u8 l4proto, struct i40e_fdir_filter *data) { bool is_vlan = !!data->vlan_tag; struct vlan_hdr vlan = {}; struct ipv6hdr ipv6 = {}; struct ethhdr eth = {}; struct iphdr ip = {}; u8 *tmp; if (ipv4) { eth.h_proto = cpu_to_be16(ETH_P_IP); ip.protocol = l4proto; ip.version = 0x4; ip.ihl = 0x5; ip.daddr = data->dst_ip; ip.saddr = data->src_ip; } else { eth.h_proto = cpu_to_be16(ETH_P_IPV6); ipv6.nexthdr = l4proto; ipv6.version = 0x6; memcpy(&ipv6.saddr.in6_u.u6_addr32, data->src_ip6, sizeof(__be32) * 4); memcpy(&ipv6.daddr.in6_u.u6_addr32, data->dst_ip6, sizeof(__be32) * 4); } if (is_vlan) { vlan.h_vlan_TCI = data->vlan_tag; vlan.h_vlan_encapsulated_proto = eth.h_proto; eth.h_proto = data->vlan_etype; } tmp = dummy_packet; memcpy(tmp, &eth, sizeof(eth)); tmp += sizeof(eth); if (is_vlan) { memcpy(tmp, &vlan, sizeof(vlan)); tmp += sizeof(vlan); } if (ipv4) { memcpy(tmp, &ip, sizeof(ip)); tmp += sizeof(ip); } else { memcpy(tmp, &ipv6, sizeof(ipv6)); tmp += sizeof(ipv6); } return tmp; } /** * i40e_create_dummy_udp_packet - helper function to create UDP packet * @raw_packet: preallocated space for dummy packet * @ipv4: is layer 3 packet of version 4 or 6 * @l4proto: next level protocol used in data portion of l3 * @data: filter data * * Helper function to populate udp fields. **/ static void i40e_create_dummy_udp_packet(u8 *raw_packet, bool ipv4, u8 l4proto, struct i40e_fdir_filter *data) { struct udphdr *udp; u8 *tmp; tmp = i40e_create_dummy_packet(raw_packet, ipv4, IPPROTO_UDP, data); udp = (struct udphdr *)(tmp); udp->dest = data->dst_port; udp->source = data->src_port; } /** * i40e_create_dummy_tcp_packet - helper function to create TCP packet * @raw_packet: preallocated space for dummy packet * @ipv4: is layer 3 packet of version 4 or 6 * @l4proto: next level protocol used in data portion of l3 * @data: filter data * * Helper function to populate tcp fields. **/ static void i40e_create_dummy_tcp_packet(u8 *raw_packet, bool ipv4, u8 l4proto, struct i40e_fdir_filter *data) { struct tcphdr *tcp; u8 *tmp; /* Dummy tcp packet */ static const char tcp_packet[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x50, 0x11, 0x0, 0x72, 0, 0, 0, 0}; tmp = i40e_create_dummy_packet(raw_packet, ipv4, IPPROTO_TCP, data); tcp = (struct tcphdr *)tmp; memcpy(tcp, tcp_packet, sizeof(tcp_packet)); tcp->dest = data->dst_port; tcp->source = data->src_port; } /** * i40e_create_dummy_sctp_packet - helper function to create SCTP packet * @raw_packet: preallocated space for dummy packet * @ipv4: is layer 3 packet of version 4 or 6 * @l4proto: next level protocol used in data portion of l3 * @data: filter data * * Helper function to populate sctp fields. **/ static void i40e_create_dummy_sctp_packet(u8 *raw_packet, bool ipv4, u8 l4proto, struct i40e_fdir_filter *data) { struct sctphdr *sctp; u8 *tmp; tmp = i40e_create_dummy_packet(raw_packet, ipv4, IPPROTO_SCTP, data); sctp = (struct sctphdr *)tmp; sctp->dest = data->dst_port; sctp->source = data->src_port; } /** * i40e_prepare_fdir_filter - Prepare and program fdir filter * @pf: physical function to attach filter to * @fd_data: filter data * @add: add or delete filter * @packet_addr: address of dummy packet, used in filtering * @payload_offset: offset from dummy packet address to user defined data * @pctype: Packet type for which filter is used * * Helper function to offset data of dummy packet, program it and * handle errors. **/ static int i40e_prepare_fdir_filter(struct i40e_pf *pf, struct i40e_fdir_filter *fd_data, bool add, char *packet_addr, int payload_offset, u8 pctype) { int ret; if (fd_data->flex_filter) { u8 *payload; __be16 pattern = fd_data->flex_word; u16 off = fd_data->flex_offset; payload = packet_addr + payload_offset; /* If user provided vlan, offset payload by vlan header length */ if (!!fd_data->vlan_tag) payload += VLAN_HLEN; *((__force __be16 *)(payload + off)) = pattern; } fd_data->pctype = pctype; ret = i40e_program_fdir_filter(fd_data, packet_addr, pf, add); if (ret) { dev_info(&pf->pdev->dev, "PCTYPE:%d, Filter command send failed for fd_id:%d (ret = %d)\n", fd_data->pctype, fd_data->fd_id, ret); /* Free the packet buffer since it wasn't added to the ring */ return -EOPNOTSUPP; } else if (I40E_DEBUG_FD & pf->hw.debug_mask) { if (add) dev_info(&pf->pdev->dev, "Filter OK for PCTYPE %d loc = %d\n", fd_data->pctype, fd_data->fd_id); else dev_info(&pf->pdev->dev, "Filter deleted for PCTYPE %d loc = %d\n", fd_data->pctype, fd_data->fd_id); } return ret; } /** * i40e_change_filter_num - Prepare and program fdir filter * @ipv4: is layer 3 packet of version 4 or 6 * @add: add or delete filter * @ipv4_filter_num: field to update * @ipv6_filter_num: field to update * * Update filter number field for pf. **/ static void i40e_change_filter_num(bool ipv4, bool add, u16 *ipv4_filter_num, u16 *ipv6_filter_num) { if (add) { if (ipv4) (*ipv4_filter_num)++; else (*ipv6_filter_num)++; } else { if (ipv4) (*ipv4_filter_num)--; else (*ipv6_filter_num)--; } } #define I40E_UDPIP_DUMMY_PACKET_LEN 42 #define I40E_UDPIP6_DUMMY_PACKET_LEN 62 /** * i40e_add_del_fdir_udp - Add/Remove UDP filters * @vsi: pointer to the targeted VSI * @fd_data: the flow director data required for the FDir descriptor * @add: true adds a filter, false removes it * @ipv4: true is v4, false is v6 * * Returns 0 if the filters were successfully added or removed **/ static int i40e_add_del_fdir_udp(struct i40e_vsi *vsi, struct i40e_fdir_filter *fd_data, bool add, bool ipv4) { struct i40e_pf *pf = vsi->back; u8 *raw_packet; int ret; raw_packet = kzalloc(I40E_FDIR_MAX_RAW_PACKET_SIZE, GFP_KERNEL); if (!raw_packet) return -ENOMEM; i40e_create_dummy_udp_packet(raw_packet, ipv4, IPPROTO_UDP, fd_data); if (ipv4) ret = i40e_prepare_fdir_filter (pf, fd_data, add, raw_packet, I40E_UDPIP_DUMMY_PACKET_LEN, I40E_FILTER_PCTYPE_NONF_IPV4_UDP); else ret = i40e_prepare_fdir_filter (pf, fd_data, add, raw_packet, I40E_UDPIP6_DUMMY_PACKET_LEN, I40E_FILTER_PCTYPE_NONF_IPV6_UDP); if (ret) { kfree(raw_packet); return ret; } i40e_change_filter_num(ipv4, add, &pf->fd_udp4_filter_cnt, &pf->fd_udp6_filter_cnt); return 0; } #define I40E_TCPIP_DUMMY_PACKET_LEN 54 #define I40E_TCPIP6_DUMMY_PACKET_LEN 74 /** * i40e_add_del_fdir_tcp - Add/Remove TCPv4 filters * @vsi: pointer to the targeted VSI * @fd_data: the flow director data required for the FDir descriptor * @add: true adds a filter, false removes it * @ipv4: true is v4, false is v6 * * Returns 0 if the filters were successfully added or removed **/ static int i40e_add_del_fdir_tcp(struct i40e_vsi *vsi, struct i40e_fdir_filter *fd_data, bool add, bool ipv4) { struct i40e_pf *pf = vsi->back; u8 *raw_packet; int ret; raw_packet = kzalloc(I40E_FDIR_MAX_RAW_PACKET_SIZE, GFP_KERNEL); if (!raw_packet) return -ENOMEM; i40e_create_dummy_tcp_packet(raw_packet, ipv4, IPPROTO_TCP, fd_data); if (ipv4) ret = i40e_prepare_fdir_filter (pf, fd_data, add, raw_packet, I40E_TCPIP_DUMMY_PACKET_LEN, I40E_FILTER_PCTYPE_NONF_IPV4_TCP); else ret = i40e_prepare_fdir_filter (pf, fd_data, add, raw_packet, I40E_TCPIP6_DUMMY_PACKET_LEN, I40E_FILTER_PCTYPE_NONF_IPV6_TCP); if (ret) { kfree(raw_packet); return ret; } i40e_change_filter_num(ipv4, add, &pf->fd_tcp4_filter_cnt, &pf->fd_tcp6_filter_cnt); if (add) { if ((pf->flags & I40E_FLAG_FD_ATR_ENABLED) && I40E_DEBUG_FD & pf->hw.debug_mask) dev_info(&pf->pdev->dev, "Forcing ATR off, sideband rules for TCP/IPv4 flow being applied\n"); set_bit(__I40E_FD_ATR_AUTO_DISABLED, pf->state); } return 0; } #define I40E_SCTPIP_DUMMY_PACKET_LEN 46 #define I40E_SCTPIP6_DUMMY_PACKET_LEN 66 /** * i40e_add_del_fdir_sctp - Add/Remove SCTPv4 Flow Director filters for * a specific flow spec * @vsi: pointer to the targeted VSI * @fd_data: the flow director data required for the FDir descriptor * @add: true adds a filter, false removes it * @ipv4: true is v4, false is v6 * * Returns 0 if the filters were successfully added or removed **/ static int i40e_add_del_fdir_sctp(struct i40e_vsi *vsi, struct i40e_fdir_filter *fd_data, bool add, bool ipv4) { struct i40e_pf *pf = vsi->back; u8 *raw_packet; int ret; raw_packet = kzalloc(I40E_FDIR_MAX_RAW_PACKET_SIZE, GFP_KERNEL); if (!raw_packet) return -ENOMEM; i40e_create_dummy_sctp_packet(raw_packet, ipv4, IPPROTO_SCTP, fd_data); if (ipv4) ret = i40e_prepare_fdir_filter (pf, fd_data, add, raw_packet, I40E_SCTPIP_DUMMY_PACKET_LEN, I40E_FILTER_PCTYPE_NONF_IPV4_SCTP); else ret = i40e_prepare_fdir_filter (pf, fd_data, add, raw_packet, I40E_SCTPIP6_DUMMY_PACKET_LEN, I40E_FILTER_PCTYPE_NONF_IPV6_SCTP); if (ret) { kfree(raw_packet); return ret; } i40e_change_filter_num(ipv4, add, &pf->fd_sctp4_filter_cnt, &pf->fd_sctp6_filter_cnt); return 0; } #define I40E_IP_DUMMY_PACKET_LEN 34 #define I40E_IP6_DUMMY_PACKET_LEN 54 /** * i40e_add_del_fdir_ip - Add/Remove IPv4 Flow Director filters for * a specific flow spec * @vsi: pointer to the targeted VSI * @fd_data: the flow director data required for the FDir descriptor * @add: true adds a filter, false removes it * @ipv4: true is v4, false is v6 * * Returns 0 if the filters were successfully added or removed **/ static int i40e_add_del_fdir_ip(struct i40e_vsi *vsi, struct i40e_fdir_filter *fd_data, bool add, bool ipv4) { struct i40e_pf *pf = vsi->back; int payload_offset; u8 *raw_packet; int iter_start; int iter_end; int ret; int i; if (ipv4) { iter_start = I40E_FILTER_PCTYPE_NONF_IPV4_OTHER; iter_end = I40E_FILTER_PCTYPE_FRAG_IPV4; } else { iter_start = I40E_FILTER_PCTYPE_NONF_IPV6_OTHER; iter_end = I40E_FILTER_PCTYPE_FRAG_IPV6; } for (i = iter_start; i <= iter_end; i++) { raw_packet = kzalloc(I40E_FDIR_MAX_RAW_PACKET_SIZE, GFP_KERNEL); if (!raw_packet) return -ENOMEM; /* IPv6 no header option differs from IPv4 */ (void)i40e_create_dummy_packet (raw_packet, ipv4, (ipv4) ? IPPROTO_IP : IPPROTO_NONE, fd_data); payload_offset = (ipv4) ? I40E_IP_DUMMY_PACKET_LEN : I40E_IP6_DUMMY_PACKET_LEN; ret = i40e_prepare_fdir_filter(pf, fd_data, add, raw_packet, payload_offset, i); if (ret) goto err; } i40e_change_filter_num(ipv4, add, &pf->fd_ip4_filter_cnt, &pf->fd_ip6_filter_cnt); return 0; err: kfree(raw_packet); return ret; } /** * i40e_add_del_fdir - Build raw packets to add/del fdir filter * @vsi: pointer to the targeted VSI * @input: filter to add or delete * @add: true adds a filter, false removes it * **/ int i40e_add_del_fdir(struct i40e_vsi *vsi, struct i40e_fdir_filter *input, bool add) { enum ip_ver { ipv6 = 0, ipv4 = 1 }; struct i40e_pf *pf = vsi->back; int ret; switch (input->flow_type & ~FLOW_EXT) { case TCP_V4_FLOW: ret = i40e_add_del_fdir_tcp(vsi, input, add, ipv4); break; case UDP_V4_FLOW: ret = i40e_add_del_fdir_udp(vsi, input, add, ipv4); break; case SCTP_V4_FLOW: ret = i40e_add_del_fdir_sctp(vsi, input, add, ipv4); break; case TCP_V6_FLOW: ret = i40e_add_del_fdir_tcp(vsi, input, add, ipv6); break; case UDP_V6_FLOW: ret = i40e_add_del_fdir_udp(vsi, input, add, ipv6); break; case SCTP_V6_FLOW: ret = i40e_add_del_fdir_sctp(vsi, input, add, ipv6); break; case IP_USER_FLOW: switch (input->ipl4_proto) { case IPPROTO_TCP: ret = i40e_add_del_fdir_tcp(vsi, input, add, ipv4); break; case IPPROTO_UDP: ret = i40e_add_del_fdir_udp(vsi, input, add, ipv4); break; case IPPROTO_SCTP: ret = i40e_add_del_fdir_sctp(vsi, input, add, ipv4); break; case IPPROTO_IP: ret = i40e_add_del_fdir_ip(vsi, input, add, ipv4); break; default: /* We cannot support masking based on protocol */ dev_info(&pf->pdev->dev, "Unsupported IPv4 protocol 0x%02x\n", input->ipl4_proto); return -EINVAL; } break; case IPV6_USER_FLOW: switch (input->ipl4_proto) { case IPPROTO_TCP: ret = i40e_add_del_fdir_tcp(vsi, input, add, ipv6); break; case IPPROTO_UDP: ret = i40e_add_del_fdir_udp(vsi, input, add, ipv6); break; case IPPROTO_SCTP: ret = i40e_add_del_fdir_sctp(vsi, input, add, ipv6); break; case IPPROTO_IP: ret = i40e_add_del_fdir_ip(vsi, input, add, ipv6); break; default: /* We cannot support masking based on protocol */ dev_info(&pf->pdev->dev, "Unsupported IPv6 protocol 0x%02x\n", input->ipl4_proto); return -EINVAL; } break; default: dev_info(&pf->pdev->dev, "Unsupported flow type 0x%02x\n", input->flow_type); return -EINVAL; } /* The buffer allocated here will be normally be freed by * i40e_clean_fdir_tx_irq() as it reclaims resources after transmit * completion. In the event of an error adding the buffer to the FDIR * ring, it will immediately be freed. It may also be freed by * i40e_clean_tx_ring() when closing the VSI. */ return ret; } /** * i40e_fd_handle_status - check the Programming Status for FD * @rx_ring: the Rx ring for this descriptor * @qword0_raw: qword0 * @qword1: qword1 after le_to_cpu * @prog_id: the id originally used for programming * * This is used to verify if the FD programming or invalidation * requested by SW to the HW is successful or not and take actions accordingly. **/ static void i40e_fd_handle_status(struct i40e_ring *rx_ring, u64 qword0_raw, u64 qword1, u8 prog_id) { struct i40e_pf *pf = rx_ring->vsi->back; struct pci_dev *pdev = pf->pdev; struct i40e_16b_rx_wb_qw0 *qw0; u32 fcnt_prog, fcnt_avail; u32 error; qw0 = (struct i40e_16b_rx_wb_qw0 *)&qword0_raw; error = (qword1 & I40E_RX_PROG_STATUS_DESC_QW1_ERROR_MASK) >> I40E_RX_PROG_STATUS_DESC_QW1_ERROR_SHIFT; if (error == BIT(I40E_RX_PROG_STATUS_DESC_FD_TBL_FULL_SHIFT)) { pf->fd_inv = le32_to_cpu(qw0->hi_dword.fd_id); if (qw0->hi_dword.fd_id != 0 || (I40E_DEBUG_FD & pf->hw.debug_mask)) dev_warn(&pdev->dev, "ntuple filter loc = %d, could not be added\n", pf->fd_inv); /* Check if the programming error is for ATR. * If so, auto disable ATR and set a state for * flush in progress. Next time we come here if flush is in * progress do nothing, once flush is complete the state will * be cleared. */ if (test_bit(__I40E_FD_FLUSH_REQUESTED, pf->state)) return; pf->fd_add_err++; /* store the current atr filter count */ pf->fd_atr_cnt = i40e_get_current_atr_cnt(pf); if (qw0->hi_dword.fd_id == 0 && test_bit(__I40E_FD_SB_AUTO_DISABLED, pf->state)) { /* These set_bit() calls aren't atomic with the * test_bit() here, but worse case we potentially * disable ATR and queue a flush right after SB * support is re-enabled. That shouldn't cause an * issue in practice */ set_bit(__I40E_FD_ATR_AUTO_DISABLED, pf->state); set_bit(__I40E_FD_FLUSH_REQUESTED, pf->state); } /* filter programming failed most likely due to table full */ fcnt_prog = i40e_get_global_fd_count(pf); fcnt_avail = pf->fdir_pf_filter_count; /* If ATR is running fcnt_prog can quickly change, * if we are very close to full, it makes sense to disable * FD ATR/SB and then re-enable it when there is room. */ if (fcnt_prog >= (fcnt_avail - I40E_FDIR_BUFFER_FULL_MARGIN)) { if ((pf->flags & I40E_FLAG_FD_SB_ENABLED) && !test_and_set_bit(__I40E_FD_SB_AUTO_DISABLED, pf->state)) if (I40E_DEBUG_FD & pf->hw.debug_mask) dev_warn(&pdev->dev, "FD filter space full, new ntuple rules will not be added\n"); } } else if (error == BIT(I40E_RX_PROG_STATUS_DESC_NO_FD_ENTRY_SHIFT)) { if (I40E_DEBUG_FD & pf->hw.debug_mask) dev_info(&pdev->dev, "ntuple filter fd_id = %d, could not be removed\n", qw0->hi_dword.fd_id); } } /** * i40e_unmap_and_free_tx_resource - Release a Tx buffer * @ring: the ring that owns the buffer * @tx_buffer: the buffer to free **/ static void i40e_unmap_and_free_tx_resource(struct i40e_ring *ring, struct i40e_tx_buffer *tx_buffer) { if (tx_buffer->skb) { if (tx_buffer->tx_flags & I40E_TX_FLAGS_FD_SB) kfree(tx_buffer->raw_buf); else if (ring_is_xdp(ring)) xdp_return_frame(tx_buffer->xdpf); else dev_kfree_skb_any(tx_buffer->skb); if (dma_unmap_len(tx_buffer, len)) dma_unmap_single(ring->dev, dma_unmap_addr(tx_buffer, dma), dma_unmap_len(tx_buffer, len), DMA_TO_DEVICE); } else if (dma_unmap_len(tx_buffer, len)) { dma_unmap_page(ring->dev, dma_unmap_addr(tx_buffer, dma), dma_unmap_len(tx_buffer, len), DMA_TO_DEVICE); } tx_buffer->next_to_watch = NULL; tx_buffer->skb = NULL; dma_unmap_len_set(tx_buffer, len, 0); /* tx_buffer must be completely set up in the transmit path */ } /** * i40e_clean_tx_ring - Free any empty Tx buffers * @tx_ring: ring to be cleaned **/ void i40e_clean_tx_ring(struct i40e_ring *tx_ring) { unsigned long bi_size; u16 i; if (ring_is_xdp(tx_ring) && tx_ring->xsk_pool) { i40e_xsk_clean_tx_ring(tx_ring); } else { /* ring already cleared, nothing to do */ if (!tx_ring->tx_bi) return; /* Free all the Tx ring sk_buffs */ for (i = 0; i < tx_ring->count; i++) i40e_unmap_and_free_tx_resource(tx_ring, &tx_ring->tx_bi[i]); } bi_size = sizeof(struct i40e_tx_buffer) * tx_ring->count; memset(tx_ring->tx_bi, 0, bi_size); /* Zero out the descriptor ring */ memset(tx_ring->desc, 0, tx_ring->size); tx_ring->next_to_use = 0; tx_ring->next_to_clean = 0; if (!tx_ring->netdev) return; /* cleanup Tx queue statistics */ netdev_tx_reset_queue(txring_txq(tx_ring)); } /** * i40e_free_tx_resources - Free Tx resources per queue * @tx_ring: Tx descriptor ring for a specific queue * * Free all transmit software resources **/ void i40e_free_tx_resources(struct i40e_ring *tx_ring) { i40e_clean_tx_ring(tx_ring); kfree(tx_ring->tx_bi); tx_ring->tx_bi = NULL; if (tx_ring->desc) { dma_free_coherent(tx_ring->dev, tx_ring->size, tx_ring->desc, tx_ring->dma); tx_ring->desc = NULL; } } /** * i40e_get_tx_pending - how many tx descriptors not processed * @ring: the ring of descriptors * @in_sw: use SW variables * * Since there is no access to the ring head register * in XL710, we need to use our local copies **/ u32 i40e_get_tx_pending(struct i40e_ring *ring, bool in_sw) { u32 head, tail; if (!in_sw) { head = i40e_get_head(ring); tail = readl(ring->tail); } else { head = ring->next_to_clean; tail = ring->next_to_use; } if (head != tail) return (head < tail) ? tail - head : (tail + ring->count - head); return 0; } /** * i40e_detect_recover_hung - Function to detect and recover hung_queues * @vsi: pointer to vsi struct with tx queues * * VSI has netdev and netdev has TX queues. This function is to check each of * those TX queues if they are hung, trigger recovery by issuing SW interrupt. **/ void i40e_detect_recover_hung(struct i40e_vsi *vsi) { struct i40e_ring *tx_ring = NULL; struct net_device *netdev; unsigned int i; int packets; if (!vsi) return; if (test_bit(__I40E_VSI_DOWN, vsi->state)) return; netdev = vsi->netdev; if (!netdev) return; if (!netif_carrier_ok(netdev)) return; for (i = 0; i < vsi->num_queue_pairs; i++) { tx_ring = vsi->tx_rings[i]; if (tx_ring && tx_ring->desc) { /* If packet counter has not changed the queue is * likely stalled, so force an interrupt for this * queue. * * prev_pkt_ctr would be negative if there was no * pending work. */ packets = tx_ring->stats.packets & INT_MAX; if (tx_ring->tx_stats.prev_pkt_ctr == packets) { i40e_force_wb(vsi, tx_ring->q_vector); continue; } /* Memory barrier between read of packet count and call * to i40e_get_tx_pending() */ smp_rmb(); tx_ring->tx_stats.prev_pkt_ctr = i40e_get_tx_pending(tx_ring, true) ? packets : -1; } } } /** * i40e_clean_tx_irq - Reclaim resources after transmit completes * @vsi: the VSI we care about * @tx_ring: Tx ring to clean * @napi_budget: Used to determine if we are in netpoll * @tx_cleaned: Out parameter set to the number of TXes cleaned * * Returns true if there's any budget left (e.g. the clean is finished) **/ static bool i40e_clean_tx_irq(struct i40e_vsi *vsi, struct i40e_ring *tx_ring, int napi_budget, unsigned int *tx_cleaned) { int i = tx_ring->next_to_clean; struct i40e_tx_buffer *tx_buf; struct i40e_tx_desc *tx_head; struct i40e_tx_desc *tx_desc; unsigned int total_bytes = 0, total_packets = 0; unsigned int budget = vsi->work_limit; tx_buf = &tx_ring->tx_bi[i]; tx_desc = I40E_TX_DESC(tx_ring, i); i -= tx_ring->count; tx_head = I40E_TX_DESC(tx_ring, i40e_get_head(tx_ring)); do { struct i40e_tx_desc *eop_desc = tx_buf->next_to_watch; /* if next_to_watch is not set then there is no work pending */ if (!eop_desc) break; /* prevent any other reads prior to eop_desc */ smp_rmb(); i40e_trace(clean_tx_irq, tx_ring, tx_desc, tx_buf); /* we have caught up to head, no work left to do */ if (tx_head == tx_desc) break; /* clear next_to_watch to prevent false hangs */ tx_buf->next_to_watch = NULL; /* update the statistics for this packet */ total_bytes += tx_buf->bytecount; total_packets += tx_buf->gso_segs; /* free the skb/XDP data */ if (ring_is_xdp(tx_ring)) xdp_return_frame(tx_buf->xdpf); else napi_consume_skb(tx_buf->skb, napi_budget); /* unmap skb header data */ dma_unmap_single(tx_ring->dev, dma_unmap_addr(tx_buf, dma), dma_unmap_len(tx_buf, len), DMA_TO_DEVICE); /* clear tx_buffer data */ tx_buf->skb = NULL; dma_unmap_len_set(tx_buf, len, 0); /* unmap remaining buffers */ while (tx_desc != eop_desc) { i40e_trace(clean_tx_irq_unmap, tx_ring, tx_desc, tx_buf); tx_buf++; tx_desc++; i++; if (unlikely(!i)) { i -= tx_ring->count; tx_buf = tx_ring->tx_bi; tx_desc = I40E_TX_DESC(tx_ring, 0); } /* unmap any remaining paged data */ if (dma_unmap_len(tx_buf, len)) { dma_unmap_page(tx_ring->dev, dma_unmap_addr(tx_buf, dma), dma_unmap_len(tx_buf, len), DMA_TO_DEVICE); dma_unmap_len_set(tx_buf, len, 0); } } /* move us one more past the eop_desc for start of next pkt */ tx_buf++; tx_desc++; i++; if (unlikely(!i)) { i -= tx_ring->count; tx_buf = tx_ring->tx_bi; tx_desc = I40E_TX_DESC(tx_ring, 0); } prefetch(tx_desc); /* update budget accounting */ budget--; } while (likely(budget)); i += tx_ring->count; tx_ring->next_to_clean = i; i40e_update_tx_stats(tx_ring, total_packets, total_bytes); i40e_arm_wb(tx_ring, vsi, budget); if (ring_is_xdp(tx_ring)) return !!budget; /* notify netdev of completed buffers */ netdev_tx_completed_queue(txring_txq(tx_ring), total_packets, total_bytes); #define TX_WAKE_THRESHOLD ((s16)(DESC_NEEDED * 2)) if (unlikely(total_packets && netif_carrier_ok(tx_ring->netdev) && (I40E_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD))) { /* Make sure that anybody stopping the queue after this * sees the new next_to_clean. */ smp_mb(); if (__netif_subqueue_stopped(tx_ring->netdev, tx_ring->queue_index) && !test_bit(__I40E_VSI_DOWN, vsi->state)) { netif_wake_subqueue(tx_ring->netdev, tx_ring->queue_index); ++tx_ring->tx_stats.restart_queue; } } *tx_cleaned = total_packets; return !!budget; } /** * i40e_enable_wb_on_itr - Arm hardware to do a wb, interrupts are not enabled * @vsi: the VSI we care about * @q_vector: the vector on which to enable writeback * **/ static void i40e_enable_wb_on_itr(struct i40e_vsi *vsi, struct i40e_q_vector *q_vector) { u16 flags = q_vector->tx.ring[0].flags; u32 val; if (!(flags & I40E_TXR_FLAGS_WB_ON_ITR)) return; if (q_vector->arm_wb_state) return; if (vsi->back->flags & I40E_FLAG_MSIX_ENABLED) { val = I40E_PFINT_DYN_CTLN_WB_ON_ITR_MASK | I40E_PFINT_DYN_CTLN_ITR_INDX_MASK; /* set noitr */ wr32(&vsi->back->hw, I40E_PFINT_DYN_CTLN(q_vector->reg_idx), val); } else { val = I40E_PFINT_DYN_CTL0_WB_ON_ITR_MASK | I40E_PFINT_DYN_CTL0_ITR_INDX_MASK; /* set noitr */ wr32(&vsi->back->hw, I40E_PFINT_DYN_CTL0, val); } q_vector->arm_wb_state = true; } /** * i40e_force_wb - Issue SW Interrupt so HW does a wb * @vsi: the VSI we care about * @q_vector: the vector on which to force writeback * **/ void i40e_force_wb(struct i40e_vsi *vsi, struct i40e_q_vector *q_vector) { if (vsi->back->flags & I40E_FLAG_MSIX_ENABLED) { u32 val = I40E_PFINT_DYN_CTLN_INTENA_MASK | I40E_PFINT_DYN_CTLN_ITR_INDX_MASK | /* set noitr */ I40E_PFINT_DYN_CTLN_SWINT_TRIG_MASK | I40E_PFINT_DYN_CTLN_SW_ITR_INDX_ENA_MASK; /* allow 00 to be written to the index */ wr32(&vsi->back->hw, I40E_PFINT_DYN_CTLN(q_vector->reg_idx), val); } else { u32 val = I40E_PFINT_DYN_CTL0_INTENA_MASK | I40E_PFINT_DYN_CTL0_ITR_INDX_MASK | /* set noitr */ I40E_PFINT_DYN_CTL0_SWINT_TRIG_MASK | I40E_PFINT_DYN_CTL0_SW_ITR_INDX_ENA_MASK; /* allow 00 to be written to the index */ wr32(&vsi->back->hw, I40E_PFINT_DYN_CTL0, val); } } static inline bool i40e_container_is_rx(struct i40e_q_vector *q_vector, struct i40e_ring_container *rc) { return &q_vector->rx == rc; } static inline unsigned int i40e_itr_divisor(struct i40e_q_vector *q_vector) { unsigned int divisor; switch (q_vector->vsi->back->hw.phy.link_info.link_speed) { case I40E_LINK_SPEED_40GB: divisor = I40E_ITR_ADAPTIVE_MIN_INC * 1024; break; case I40E_LINK_SPEED_25GB: case I40E_LINK_SPEED_20GB: divisor = I40E_ITR_ADAPTIVE_MIN_INC * 512; break; default: case I40E_LINK_SPEED_10GB: divisor = I40E_ITR_ADAPTIVE_MIN_INC * 256; break; case I40E_LINK_SPEED_1GB: case I40E_LINK_SPEED_100MB: divisor = I40E_ITR_ADAPTIVE_MIN_INC * 32; break; } return divisor; } /** * i40e_update_itr - update the dynamic ITR value based on statistics * @q_vector: structure containing interrupt and ring information * @rc: structure containing ring performance data * * Stores a new ITR value based on packets and byte * counts during the last interrupt. The advantage of per interrupt * computation is faster updates and more accurate ITR for the current * traffic pattern. Constants in this function were computed * based on theoretical maximum wire speed and thresholds were set based * on testing data as well as attempting to minimize response time * while increasing bulk throughput. **/ static void i40e_update_itr(struct i40e_q_vector *q_vector, struct i40e_ring_container *rc) { unsigned int avg_wire_size, packets, bytes, itr; unsigned long next_update = jiffies; /* If we don't have any rings just leave ourselves set for maximum * possible latency so we take ourselves out of the equation. */ if (!rc->ring || !ITR_IS_DYNAMIC(rc->ring->itr_setting)) return; /* For Rx we want to push the delay up and default to low latency. * for Tx we want to pull the delay down and default to high latency. */ itr = i40e_container_is_rx(q_vector, rc) ? I40E_ITR_ADAPTIVE_MIN_USECS | I40E_ITR_ADAPTIVE_LATENCY : I40E_ITR_ADAPTIVE_MAX_USECS | I40E_ITR_ADAPTIVE_LATENCY; /* If we didn't update within up to 1 - 2 jiffies we can assume * that either packets are coming in so slow there hasn't been * any work, or that there is so much work that NAPI is dealing * with interrupt moderation and we don't need to do anything. */ if (time_after(next_update, rc->next_update)) goto clear_counts; /* If itr_countdown is set it means we programmed an ITR within * the last 4 interrupt cycles. This has a side effect of us * potentially firing an early interrupt. In order to work around * this we need to throw out any data received for a few * interrupts following the update. */ if (q_vector->itr_countdown) { itr = rc->target_itr; goto clear_counts; } packets = rc->total_packets; bytes = rc->total_bytes; if (i40e_container_is_rx(q_vector, rc)) { /* If Rx there are 1 to 4 packets and bytes are less than * 9000 assume insufficient data to use bulk rate limiting * approach unless Tx is already in bulk rate limiting. We * are likely latency driven. */ if (packets && packets < 4 && bytes < 9000 && (q_vector->tx.target_itr & I40E_ITR_ADAPTIVE_LATENCY)) { itr = I40E_ITR_ADAPTIVE_LATENCY; goto adjust_by_size; } } else if (packets < 4) { /* If we have Tx and Rx ITR maxed and Tx ITR is running in * bulk mode and we are receiving 4 or fewer packets just * reset the ITR_ADAPTIVE_LATENCY bit for latency mode so * that the Rx can relax. */ if (rc->target_itr == I40E_ITR_ADAPTIVE_MAX_USECS && (q_vector->rx.target_itr & I40E_ITR_MASK) == I40E_ITR_ADAPTIVE_MAX_USECS) goto clear_counts; } else if (packets > 32) { /* If we have processed over 32 packets in a single interrupt * for Tx assume we need to switch over to "bulk" mode. */ rc->target_itr &= ~I40E_ITR_ADAPTIVE_LATENCY; } /* We have no packets to actually measure against. This means * either one of the other queues on this vector is active or * we are a Tx queue doing TSO with too high of an interrupt rate. * * Between 4 and 56 we can assume that our current interrupt delay * is only slightly too low. As such we should increase it by a small * fixed amount. */ if (packets < 56) { itr = rc->target_itr + I40E_ITR_ADAPTIVE_MIN_INC; if ((itr & I40E_ITR_MASK) > I40E_ITR_ADAPTIVE_MAX_USECS) { itr &= I40E_ITR_ADAPTIVE_LATENCY; itr += I40E_ITR_ADAPTIVE_MAX_USECS; } goto clear_counts; } if (packets <= 256) { itr = min(q_vector->tx.current_itr, q_vector->rx.current_itr); itr &= I40E_ITR_MASK; /* Between 56 and 112 is our "goldilocks" zone where we are * working out "just right". Just report that our current * ITR is good for us. */ if (packets <= 112) goto clear_counts; /* If packet count is 128 or greater we are likely looking * at a slight overrun of the delay we want. Try halving * our delay to see if that will cut the number of packets * in half per interrupt. */ itr /= 2; itr &= I40E_ITR_MASK; if (itr < I40E_ITR_ADAPTIVE_MIN_USECS) itr = I40E_ITR_ADAPTIVE_MIN_USECS; goto clear_counts; } /* The paths below assume we are dealing with a bulk ITR since * number of packets is greater than 256. We are just going to have * to compute a value and try to bring the count under control, * though for smaller packet sizes there isn't much we can do as * NAPI polling will likely be kicking in sooner rather than later. */ itr = I40E_ITR_ADAPTIVE_BULK; adjust_by_size: /* If packet counts are 256 or greater we can assume we have a gross * overestimation of what the rate should be. Instead of trying to fine * tune it just use the formula below to try and dial in an exact value * give the current packet size of the frame. */ avg_wire_size = bytes / packets; /* The following is a crude approximation of: * wmem_default / (size + overhead) = desired_pkts_per_int * rate / bits_per_byte / (size + ethernet overhead) = pkt_rate * (desired_pkt_rate / pkt_rate) * usecs_per_sec = ITR value * * Assuming wmem_default is 212992 and overhead is 640 bytes per * packet, (256 skb, 64 headroom, 320 shared info), we can reduce the * formula down to * * (170 * (size + 24)) / (size + 640) = ITR * * We first do some math on the packet size and then finally bitshift * by 8 after rounding up. We also have to account for PCIe link speed * difference as ITR scales based on this. */ if (avg_wire_size <= 60) { /* Start at 250k ints/sec */ avg_wire_size = 4096; } else if (avg_wire_size <= 380) { /* 250K ints/sec to 60K ints/sec */ avg_wire_size *= 40; avg_wire_size += 1696; } else if (avg_wire_size <= 1084) { /* 60K ints/sec to 36K ints/sec */ avg_wire_size *= 15; avg_wire_size += 11452; } else if (avg_wire_size <= 1980) { /* 36K ints/sec to 30K ints/sec */ avg_wire_size *= 5; avg_wire_size += 22420; } else { /* plateau at a limit of 30K ints/sec */ avg_wire_size = 32256; } /* If we are in low latency mode halve our delay which doubles the * rate to somewhere between 100K to 16K ints/sec */ if (itr & I40E_ITR_ADAPTIVE_LATENCY) avg_wire_size /= 2; /* Resultant value is 256 times larger than it needs to be. This * gives us room to adjust the value as needed to either increase * or decrease the value based on link speeds of 10G, 2.5G, 1G, etc. * * Use addition as we have already recorded the new latency flag * for the ITR value. */ itr += DIV_ROUND_UP(avg_wire_size, i40e_itr_divisor(q_vector)) * I40E_ITR_ADAPTIVE_MIN_INC; if ((itr & I40E_ITR_MASK) > I40E_ITR_ADAPTIVE_MAX_USECS) { itr &= I40E_ITR_ADAPTIVE_LATENCY; itr += I40E_ITR_ADAPTIVE_MAX_USECS; } clear_counts: /* write back value */ rc->target_itr = itr; /* next update should occur within next jiffy */ rc->next_update = next_update + 1; rc->total_bytes = 0; rc->total_packets = 0; } static struct i40e_rx_buffer *i40e_rx_bi(struct i40e_ring *rx_ring, u32 idx) { return &rx_ring->rx_bi[idx]; } /** * i40e_reuse_rx_page - page flip buffer and store it back on the ring * @rx_ring: rx descriptor ring to store buffers on * @old_buff: donor buffer to have page reused * * Synchronizes page for reuse by the adapter **/ static void i40e_reuse_rx_page(struct i40e_ring *rx_ring, struct i40e_rx_buffer *old_buff) { struct i40e_rx_buffer *new_buff; u16 nta = rx_ring->next_to_alloc; new_buff = i40e_rx_bi(rx_ring, nta); /* update, and store next to alloc */ nta++; rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0; /* transfer page from old buffer to new buffer */ new_buff->dma = old_buff->dma; new_buff->page = old_buff->page; new_buff->page_offset = old_buff->page_offset; new_buff->pagecnt_bias = old_buff->pagecnt_bias; /* clear contents of buffer_info */ old_buff->page = NULL; } /** * i40e_clean_programming_status - clean the programming status descriptor * @rx_ring: the rx ring that has this descriptor * @qword0_raw: qword0 * @qword1: qword1 representing status_error_len in CPU ordering * * Flow director should handle FD_FILTER_STATUS to check its filter programming * status being successful or not and take actions accordingly. FCoE should * handle its context/filter programming/invalidation status and take actions. * * Returns an i40e_rx_buffer to reuse if the cleanup occurred, otherwise NULL. **/ void i40e_clean_programming_status(struct i40e_ring *rx_ring, u64 qword0_raw, u64 qword1) { u8 id; id = (qword1 & I40E_RX_PROG_STATUS_DESC_QW1_PROGID_MASK) >> I40E_RX_PROG_STATUS_DESC_QW1_PROGID_SHIFT; if (id == I40E_RX_PROG_STATUS_DESC_FD_FILTER_STATUS) i40e_fd_handle_status(rx_ring, qword0_raw, qword1, id); } /** * i40e_setup_tx_descriptors - Allocate the Tx descriptors * @tx_ring: the tx ring to set up * * Return 0 on success, negative on error **/ int i40e_setup_tx_descriptors(struct i40e_ring *tx_ring) { struct device *dev = tx_ring->dev; int bi_size; if (!dev) return -ENOMEM; /* warn if we are about to overwrite the pointer */ WARN_ON(tx_ring->tx_bi); bi_size = sizeof(struct i40e_tx_buffer) * tx_ring->count; tx_ring->tx_bi = kzalloc(bi_size, GFP_KERNEL); if (!tx_ring->tx_bi) goto err; u64_stats_init(&tx_ring->syncp); /* round up to nearest 4K */ tx_ring->size = tx_ring->count * sizeof(struct i40e_tx_desc); /* add u32 for head writeback, align after this takes care of * guaranteeing this is at least one cache line in size */ tx_ring->size += sizeof(u32); tx_ring->size = ALIGN(tx_ring->size, 4096); tx_ring->desc = dma_alloc_coherent(dev, tx_ring->size, &tx_ring->dma, GFP_KERNEL); if (!tx_ring->desc) { dev_info(dev, "Unable to allocate memory for the Tx descriptor ring, size=%d\n", tx_ring->size); goto err; } tx_ring->next_to_use = 0; tx_ring->next_to_clean = 0; tx_ring->tx_stats.prev_pkt_ctr = -1; return 0; err: kfree(tx_ring->tx_bi); tx_ring->tx_bi = NULL; return -ENOMEM; } static void i40e_clear_rx_bi(struct i40e_ring *rx_ring) { memset(rx_ring->rx_bi, 0, sizeof(*rx_ring->rx_bi) * rx_ring->count); } /** * i40e_clean_rx_ring - Free Rx buffers * @rx_ring: ring to be cleaned **/ void i40e_clean_rx_ring(struct i40e_ring *rx_ring) { u16 i; /* ring already cleared, nothing to do */ if (!rx_ring->rx_bi) return; if (rx_ring->xsk_pool) { i40e_xsk_clean_rx_ring(rx_ring); goto skip_free; } /* Free all the Rx ring sk_buffs */ for (i = 0; i < rx_ring->count; i++) { struct i40e_rx_buffer *rx_bi = i40e_rx_bi(rx_ring, i); if (!rx_bi->page) continue; /* Invalidate cache lines that may have been written to by * device so that we avoid corrupting memory. */ dma_sync_single_range_for_cpu(rx_ring->dev, rx_bi->dma, rx_bi->page_offset, rx_ring->rx_buf_len, DMA_FROM_DEVICE); /* free resources associated with mapping */ dma_unmap_page_attrs(rx_ring->dev, rx_bi->dma, i40e_rx_pg_size(rx_ring), DMA_FROM_DEVICE, I40E_RX_DMA_ATTR); __page_frag_cache_drain(rx_bi->page, rx_bi->pagecnt_bias); rx_bi->page = NULL; rx_bi->page_offset = 0; } skip_free: if (rx_ring->xsk_pool) i40e_clear_rx_bi_zc(rx_ring); else i40e_clear_rx_bi(rx_ring); /* Zero out the descriptor ring */ memset(rx_ring->desc, 0, rx_ring->size); rx_ring->next_to_alloc = 0; rx_ring->next_to_clean = 0; rx_ring->next_to_process = 0; rx_ring->next_to_use = 0; } /** * i40e_free_rx_resources - Free Rx resources * @rx_ring: ring to clean the resources from * * Free all receive software resources **/ void i40e_free_rx_resources(struct i40e_ring *rx_ring) { i40e_clean_rx_ring(rx_ring); if (rx_ring->vsi->type == I40E_VSI_MAIN) xdp_rxq_info_unreg(&rx_ring->xdp_rxq); rx_ring->xdp_prog = NULL; kfree(rx_ring->rx_bi); rx_ring->rx_bi = NULL; if (rx_ring->desc) { dma_free_coherent(rx_ring->dev, rx_ring->size, rx_ring->desc, rx_ring->dma); rx_ring->desc = NULL; } } /** * i40e_setup_rx_descriptors - Allocate Rx descriptors * @rx_ring: Rx descriptor ring (for a specific queue) to setup * * Returns 0 on success, negative on failure **/ int i40e_setup_rx_descriptors(struct i40e_ring *rx_ring) { struct device *dev = rx_ring->dev; int err; u64_stats_init(&rx_ring->syncp); /* Round up to nearest 4K */ rx_ring->size = rx_ring->count * sizeof(union i40e_rx_desc); rx_ring->size = ALIGN(rx_ring->size, 4096); rx_ring->desc = dma_alloc_coherent(dev, rx_ring->size, &rx_ring->dma, GFP_KERNEL); if (!rx_ring->desc) { dev_info(dev, "Unable to allocate memory for the Rx descriptor ring, size=%d\n", rx_ring->size); return -ENOMEM; } rx_ring->next_to_alloc = 0; rx_ring->next_to_clean = 0; rx_ring->next_to_process = 0; rx_ring->next_to_use = 0; /* XDP RX-queue info only needed for RX rings exposed to XDP */ if (rx_ring->vsi->type == I40E_VSI_MAIN) { err = xdp_rxq_info_reg(&rx_ring->xdp_rxq, rx_ring->netdev, rx_ring->queue_index, rx_ring->q_vector->napi.napi_id); if (err < 0) return err; } rx_ring->xdp_prog = rx_ring->vsi->xdp_prog; rx_ring->rx_bi = kcalloc(rx_ring->count, sizeof(*rx_ring->rx_bi), GFP_KERNEL); if (!rx_ring->rx_bi) return -ENOMEM; return 0; } /** * i40e_release_rx_desc - Store the new tail and head values * @rx_ring: ring to bump * @val: new head index **/ void i40e_release_rx_desc(struct i40e_ring *rx_ring, u32 val) { rx_ring->next_to_use = val; /* update next to alloc since we have filled the ring */ rx_ring->next_to_alloc = val; /* Force memory writes to complete before letting h/w * know there are new descriptors to fetch. (Only * applicable for weak-ordered memory model archs, * such as IA-64). */ wmb(); writel(val, rx_ring->tail); } #if (PAGE_SIZE >= 8192) static unsigned int i40e_rx_frame_truesize(struct i40e_ring *rx_ring, unsigned int size) { unsigned int truesize; truesize = rx_ring->rx_offset ? SKB_DATA_ALIGN(size + rx_ring->rx_offset) + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) : SKB_DATA_ALIGN(size); return truesize; } #endif /** * i40e_alloc_mapped_page - recycle or make a new page * @rx_ring: ring to use * @bi: rx_buffer struct to modify * * Returns true if the page was successfully allocated or * reused. **/ static bool i40e_alloc_mapped_page(struct i40e_ring *rx_ring, struct i40e_rx_buffer *bi) { struct page *page = bi->page; dma_addr_t dma; /* since we are recycling buffers we should seldom need to alloc */ if (likely(page)) { rx_ring->rx_stats.page_reuse_count++; return true; } /* alloc new page for storage */ page = dev_alloc_pages(i40e_rx_pg_order(rx_ring)); if (unlikely(!page)) { rx_ring->rx_stats.alloc_page_failed++; return false; } rx_ring->rx_stats.page_alloc_count++; /* map page for use */ dma = dma_map_page_attrs(rx_ring->dev, page, 0, i40e_rx_pg_size(rx_ring), DMA_FROM_DEVICE, I40E_RX_DMA_ATTR); /* if mapping failed free memory back to system since * there isn't much point in holding memory we can't use */ if (dma_mapping_error(rx_ring->dev, dma)) { __free_pages(page, i40e_rx_pg_order(rx_ring)); rx_ring->rx_stats.alloc_page_failed++; return false; } bi->dma = dma; bi->page = page; bi->page_offset = rx_ring->rx_offset; page_ref_add(page, USHRT_MAX - 1); bi->pagecnt_bias = USHRT_MAX; return true; } /** * i40e_alloc_rx_buffers - Replace used receive buffers * @rx_ring: ring to place buffers on * @cleaned_count: number of buffers to replace * * Returns false if all allocations were successful, true if any fail **/ bool i40e_alloc_rx_buffers(struct i40e_ring *rx_ring, u16 cleaned_count) { u16 ntu = rx_ring->next_to_use; union i40e_rx_desc *rx_desc; struct i40e_rx_buffer *bi; /* do nothing if no valid netdev defined */ if (!rx_ring->netdev || !cleaned_count) return false; rx_desc = I40E_RX_DESC(rx_ring, ntu); bi = i40e_rx_bi(rx_ring, ntu); do { if (!i40e_alloc_mapped_page(rx_ring, bi)) goto no_buffers; /* sync the buffer for use by the device */ dma_sync_single_range_for_device(rx_ring->dev, bi->dma, bi->page_offset, rx_ring->rx_buf_len, DMA_FROM_DEVICE); /* Refresh the desc even if buffer_addrs didn't change * because each write-back erases this info. */ rx_desc->read.pkt_addr = cpu_to_le64(bi->dma + bi->page_offset); rx_desc++; bi++; ntu++; if (unlikely(ntu == rx_ring->count)) { rx_desc = I40E_RX_DESC(rx_ring, 0); bi = i40e_rx_bi(rx_ring, 0); ntu = 0; } /* clear the status bits for the next_to_use descriptor */ rx_desc->wb.qword1.status_error_len = 0; cleaned_count--; } while (cleaned_count); if (rx_ring->next_to_use != ntu) i40e_release_rx_desc(rx_ring, ntu); return false; no_buffers: if (rx_ring->next_to_use != ntu) i40e_release_rx_desc(rx_ring, ntu); /* make sure to come back via polling to try again after * allocation failure */ return true; } /** * i40e_rx_checksum - Indicate in skb if hw indicated a good cksum * @vsi: the VSI we care about * @skb: skb currently being received and modified * @rx_desc: the receive descriptor **/ static inline void i40e_rx_checksum(struct i40e_vsi *vsi, struct sk_buff *skb, union i40e_rx_desc *rx_desc) { struct i40e_rx_ptype_decoded decoded; u32 rx_error, rx_status; bool ipv4, ipv6; u8 ptype; u64 qword; qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len); ptype = (qword & I40E_RXD_QW1_PTYPE_MASK) >> I40E_RXD_QW1_PTYPE_SHIFT; rx_error = (qword & I40E_RXD_QW1_ERROR_MASK) >> I40E_RXD_QW1_ERROR_SHIFT; rx_status = (qword & I40E_RXD_QW1_STATUS_MASK) >> I40E_RXD_QW1_STATUS_SHIFT; decoded = decode_rx_desc_ptype(ptype); skb->ip_summed = CHECKSUM_NONE; skb_checksum_none_assert(skb); /* Rx csum enabled and ip headers found? */ if (!(vsi->netdev->features & NETIF_F_RXCSUM)) return; /* did the hardware decode the packet and checksum? */ if (!(rx_status & BIT(I40E_RX_DESC_STATUS_L3L4P_SHIFT))) return; /* both known and outer_ip must be set for the below code to work */ if (!(decoded.known && decoded.outer_ip)) return; ipv4 = (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP) && (decoded.outer_ip_ver == I40E_RX_PTYPE_OUTER_IPV4); ipv6 = (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP) && (decoded.outer_ip_ver == I40E_RX_PTYPE_OUTER_IPV6); if (ipv4 && (rx_error & (BIT(I40E_RX_DESC_ERROR_IPE_SHIFT) | BIT(I40E_RX_DESC_ERROR_EIPE_SHIFT)))) goto checksum_fail; /* likely incorrect csum if alternate IP extension headers found */ if (ipv6 && rx_status & BIT(I40E_RX_DESC_STATUS_IPV6EXADD_SHIFT)) /* don't increment checksum err here, non-fatal err */ return; /* there was some L4 error, count error and punt packet to the stack */ if (rx_error & BIT(I40E_RX_DESC_ERROR_L4E_SHIFT)) goto checksum_fail; /* handle packets that were not able to be checksummed due * to arrival speed, in this case the stack can compute * the csum. */ if (rx_error & BIT(I40E_RX_DESC_ERROR_PPRS_SHIFT)) return; /* If there is an outer header present that might contain a checksum * we need to bump the checksum level by 1 to reflect the fact that * we are indicating we validated the inner checksum. */ if (decoded.tunnel_type >= I40E_RX_PTYPE_TUNNEL_IP_GRENAT) skb->csum_level = 1; /* Only report checksum unnecessary for TCP, UDP, or SCTP */ switch (decoded.inner_prot) { case I40E_RX_PTYPE_INNER_PROT_TCP: case I40E_RX_PTYPE_INNER_PROT_UDP: case I40E_RX_PTYPE_INNER_PROT_SCTP: skb->ip_summed = CHECKSUM_UNNECESSARY; fallthrough; default: break; } return; checksum_fail: vsi->back->hw_csum_rx_error++; } /** * i40e_ptype_to_htype - get a hash type * @ptype: the ptype value from the descriptor * * Returns a hash type to be used by skb_set_hash **/ static inline int i40e_ptype_to_htype(u8 ptype) { struct i40e_rx_ptype_decoded decoded = decode_rx_desc_ptype(ptype); if (!decoded.known) return PKT_HASH_TYPE_NONE; if (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP && decoded.payload_layer == I40E_RX_PTYPE_PAYLOAD_LAYER_PAY4) return PKT_HASH_TYPE_L4; else if (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP && decoded.payload_layer == I40E_RX_PTYPE_PAYLOAD_LAYER_PAY3) return PKT_HASH_TYPE_L3; else return PKT_HASH_TYPE_L2; } /** * i40e_rx_hash - set the hash value in the skb * @ring: descriptor ring * @rx_desc: specific descriptor * @skb: skb currently being received and modified * @rx_ptype: Rx packet type **/ static inline void i40e_rx_hash(struct i40e_ring *ring, union i40e_rx_desc *rx_desc, struct sk_buff *skb, u8 rx_ptype) { u32 hash; const __le64 rss_mask = cpu_to_le64((u64)I40E_RX_DESC_FLTSTAT_RSS_HASH << I40E_RX_DESC_STATUS_FLTSTAT_SHIFT); if (!(ring->netdev->features & NETIF_F_RXHASH)) return; if ((rx_desc->wb.qword1.status_error_len & rss_mask) == rss_mask) { hash = le32_to_cpu(rx_desc->wb.qword0.hi_dword.rss); skb_set_hash(skb, hash, i40e_ptype_to_htype(rx_ptype)); } } /** * i40e_process_skb_fields - Populate skb header fields from Rx descriptor * @rx_ring: rx descriptor ring packet is being transacted on * @rx_desc: pointer to the EOP Rx descriptor * @skb: pointer to current skb being populated * * This function checks the ring, descriptor, and packet information in * order to populate the hash, checksum, VLAN, protocol, and * other fields within the skb. **/ void i40e_process_skb_fields(struct i40e_ring *rx_ring, union i40e_rx_desc *rx_desc, struct sk_buff *skb) { u64 qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len); u32 rx_status = (qword & I40E_RXD_QW1_STATUS_MASK) >> I40E_RXD_QW1_STATUS_SHIFT; u32 tsynvalid = rx_status & I40E_RXD_QW1_STATUS_TSYNVALID_MASK; u32 tsyn = (rx_status & I40E_RXD_QW1_STATUS_TSYNINDX_MASK) >> I40E_RXD_QW1_STATUS_TSYNINDX_SHIFT; u8 rx_ptype = (qword & I40E_RXD_QW1_PTYPE_MASK) >> I40E_RXD_QW1_PTYPE_SHIFT; if (unlikely(tsynvalid)) i40e_ptp_rx_hwtstamp(rx_ring->vsi->back, skb, tsyn); i40e_rx_hash(rx_ring, rx_desc, skb, rx_ptype); i40e_rx_checksum(rx_ring->vsi, skb, rx_desc); skb_record_rx_queue(skb, rx_ring->queue_index); if (qword & BIT(I40E_RX_DESC_STATUS_L2TAG1P_SHIFT)) { __le16 vlan_tag = rx_desc->wb.qword0.lo_dword.l2tag1; __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), le16_to_cpu(vlan_tag)); } /* modifies the skb - consumes the enet header */ skb->protocol = eth_type_trans(skb, rx_ring->netdev); } /** * i40e_cleanup_headers - Correct empty headers * @rx_ring: rx descriptor ring packet is being transacted on * @skb: pointer to current skb being fixed * @rx_desc: pointer to the EOP Rx descriptor * * In addition if skb is not at least 60 bytes we need to pad it so that * it is large enough to qualify as a valid Ethernet frame. * * Returns true if an error was encountered and skb was freed. **/ static bool i40e_cleanup_headers(struct i40e_ring *rx_ring, struct sk_buff *skb, union i40e_rx_desc *rx_desc) { /* ERR_MASK will only have valid bits if EOP set, and * what we are doing here is actually checking * I40E_RX_DESC_ERROR_RXE_SHIFT, since it is the zeroth bit in * the error field */ if (unlikely(i40e_test_staterr(rx_desc, BIT(I40E_RXD_QW1_ERROR_SHIFT)))) { dev_kfree_skb_any(skb); return true; } /* if eth_skb_pad returns an error the skb was freed */ if (eth_skb_pad(skb)) return true; return false; } /** * i40e_can_reuse_rx_page - Determine if page can be reused for another Rx * @rx_buffer: buffer containing the page * @rx_stats: rx stats structure for the rx ring * * If page is reusable, we have a green light for calling i40e_reuse_rx_page, * which will assign the current buffer to the buffer that next_to_alloc is * pointing to; otherwise, the DMA mapping needs to be destroyed and * page freed. * * rx_stats will be updated to indicate whether the page was waived * or busy if it could not be reused. */ static bool i40e_can_reuse_rx_page(struct i40e_rx_buffer *rx_buffer, struct i40e_rx_queue_stats *rx_stats) { unsigned int pagecnt_bias = rx_buffer->pagecnt_bias; struct page *page = rx_buffer->page; /* Is any reuse possible? */ if (!dev_page_is_reusable(page)) { rx_stats->page_waive_count++; return false; } #if (PAGE_SIZE < 8192) /* if we are only owner of page we can reuse it */ if (unlikely((rx_buffer->page_count - pagecnt_bias) > 1)) { rx_stats->page_busy_count++; return false; } #else #define I40E_LAST_OFFSET \ (SKB_WITH_OVERHEAD(PAGE_SIZE) - I40E_RXBUFFER_2048) if (rx_buffer->page_offset > I40E_LAST_OFFSET) { rx_stats->page_busy_count++; return false; } #endif /* If we have drained the page fragment pool we need to update * the pagecnt_bias and page count so that we fully restock the * number of references the driver holds. */ if (unlikely(pagecnt_bias == 1)) { page_ref_add(page, USHRT_MAX - 1); rx_buffer->pagecnt_bias = USHRT_MAX; } return true; } /** * i40e_rx_buffer_flip - adjusted rx_buffer to point to an unused region * @rx_buffer: Rx buffer to adjust * @truesize: Size of adjustment **/ static void i40e_rx_buffer_flip(struct i40e_rx_buffer *rx_buffer, unsigned int truesize) { #if (PAGE_SIZE < 8192) rx_buffer->page_offset ^= truesize; #else rx_buffer->page_offset += truesize; #endif } /** * i40e_get_rx_buffer - Fetch Rx buffer and synchronize data for use * @rx_ring: rx descriptor ring to transact packets on * @size: size of buffer to add to skb * * This function will pull an Rx buffer from the ring and synchronize it * for use by the CPU. */ static struct i40e_rx_buffer *i40e_get_rx_buffer(struct i40e_ring *rx_ring, const unsigned int size) { struct i40e_rx_buffer *rx_buffer; rx_buffer = i40e_rx_bi(rx_ring, rx_ring->next_to_process); rx_buffer->page_count = #if (PAGE_SIZE < 8192) page_count(rx_buffer->page); #else 0; #endif prefetch_page_address(rx_buffer->page); /* we are reusing so sync this buffer for CPU use */ dma_sync_single_range_for_cpu(rx_ring->dev, rx_buffer->dma, rx_buffer->page_offset, size, DMA_FROM_DEVICE); /* We have pulled a buffer for use, so decrement pagecnt_bias */ rx_buffer->pagecnt_bias--; return rx_buffer; } /** * i40e_put_rx_buffer - Clean up used buffer and either recycle or free * @rx_ring: rx descriptor ring to transact packets on * @rx_buffer: rx buffer to pull data from * * This function will clean up the contents of the rx_buffer. It will * either recycle the buffer or unmap it and free the associated resources. */ static void i40e_put_rx_buffer(struct i40e_ring *rx_ring, struct i40e_rx_buffer *rx_buffer) { if (i40e_can_reuse_rx_page(rx_buffer, &rx_ring->rx_stats)) { /* hand second half of page back to the ring */ i40e_reuse_rx_page(rx_ring, rx_buffer); } else { /* we are not reusing the buffer so unmap it */ dma_unmap_page_attrs(rx_ring->dev, rx_buffer->dma, i40e_rx_pg_size(rx_ring), DMA_FROM_DEVICE, I40E_RX_DMA_ATTR); __page_frag_cache_drain(rx_buffer->page, rx_buffer->pagecnt_bias); /* clear contents of buffer_info */ rx_buffer->page = NULL; } } /** * i40e_process_rx_buffs- Processing of buffers post XDP prog or on error * @rx_ring: Rx descriptor ring to transact packets on * @xdp_res: Result of the XDP program * @xdp: xdp_buff pointing to the data **/ static void i40e_process_rx_buffs(struct i40e_ring *rx_ring, int xdp_res, struct xdp_buff *xdp) { u32 next = rx_ring->next_to_clean; struct i40e_rx_buffer *rx_buffer; xdp->flags = 0; while (1) { rx_buffer = i40e_rx_bi(rx_ring, next); if (++next == rx_ring->count) next = 0; if (!rx_buffer->page) continue; if (xdp_res == I40E_XDP_CONSUMED) rx_buffer->pagecnt_bias++; else i40e_rx_buffer_flip(rx_buffer, xdp->frame_sz); /* EOP buffer will be put in i40e_clean_rx_irq() */ if (next == rx_ring->next_to_process) return; i40e_put_rx_buffer(rx_ring, rx_buffer); } } /** * i40e_construct_skb - Allocate skb and populate it * @rx_ring: rx descriptor ring to transact packets on * @xdp: xdp_buff pointing to the data * @nr_frags: number of buffers for the packet * * This function allocates an skb. It then populates it with the page * data from the current receive descriptor, taking care to set up the * skb correctly. */ static struct sk_buff *i40e_construct_skb(struct i40e_ring *rx_ring, struct xdp_buff *xdp, u32 nr_frags) { unsigned int size = xdp->data_end - xdp->data; struct i40e_rx_buffer *rx_buffer; unsigned int headlen; struct sk_buff *skb; /* prefetch first cache line of first page */ net_prefetch(xdp->data); /* Note, we get here by enabling legacy-rx via: * * ethtool --set-priv-flags <dev> legacy-rx on * * In this mode, we currently get 0 extra XDP headroom as * opposed to having legacy-rx off, where we process XDP * packets going to stack via i40e_build_skb(). The latter * provides us currently with 192 bytes of headroom. * * For i40e_construct_skb() mode it means that the * xdp->data_meta will always point to xdp->data, since * the helper cannot expand the head. Should this ever * change in future for legacy-rx mode on, then lets also * add xdp->data_meta handling here. */ /* allocate a skb to store the frags */ skb = __napi_alloc_skb(&rx_ring->q_vector->napi, I40E_RX_HDR_SIZE, GFP_ATOMIC | __GFP_NOWARN); if (unlikely(!skb)) return NULL; /* Determine available headroom for copy */ headlen = size; if (headlen > I40E_RX_HDR_SIZE) headlen = eth_get_headlen(skb->dev, xdp->data, I40E_RX_HDR_SIZE); /* align pull length to size of long to optimize memcpy performance */ memcpy(__skb_put(skb, headlen), xdp->data, ALIGN(headlen, sizeof(long))); rx_buffer = i40e_rx_bi(rx_ring, rx_ring->next_to_clean); /* update all of the pointers */ size -= headlen; if (size) { if (unlikely(nr_frags >= MAX_SKB_FRAGS)) { dev_kfree_skb(skb); return NULL; } skb_add_rx_frag(skb, 0, rx_buffer->page, rx_buffer->page_offset + headlen, size, xdp->frame_sz); /* buffer is used by skb, update page_offset */ i40e_rx_buffer_flip(rx_buffer, xdp->frame_sz); } else { /* buffer is unused, reset bias back to rx_buffer */ rx_buffer->pagecnt_bias++; } if (unlikely(xdp_buff_has_frags(xdp))) { struct skb_shared_info *sinfo, *skinfo = skb_shinfo(skb); sinfo = xdp_get_shared_info_from_buff(xdp); memcpy(&skinfo->frags[skinfo->nr_frags], &sinfo->frags[0], sizeof(skb_frag_t) * nr_frags); xdp_update_skb_shared_info(skb, skinfo->nr_frags + nr_frags, sinfo->xdp_frags_size, nr_frags * xdp->frame_sz, xdp_buff_is_frag_pfmemalloc(xdp)); /* First buffer has already been processed, so bump ntc */ if (++rx_ring->next_to_clean == rx_ring->count) rx_ring->next_to_clean = 0; i40e_process_rx_buffs(rx_ring, I40E_XDP_PASS, xdp); } return skb; } /** * i40e_build_skb - Build skb around an existing buffer * @rx_ring: Rx descriptor ring to transact packets on * @xdp: xdp_buff pointing to the data * @nr_frags: number of buffers for the packet * * This function builds an skb around an existing Rx buffer, taking care * to set up the skb correctly and avoid any memcpy overhead. */ static struct sk_buff *i40e_build_skb(struct i40e_ring *rx_ring, struct xdp_buff *xdp, u32 nr_frags) { unsigned int metasize = xdp->data - xdp->data_meta; struct sk_buff *skb; /* Prefetch first cache line of first page. If xdp->data_meta * is unused, this points exactly as xdp->data, otherwise we * likely have a consumer accessing first few bytes of meta * data, and then actual data. */ net_prefetch(xdp->data_meta); /* build an skb around the page buffer */ skb = napi_build_skb(xdp->data_hard_start, xdp->frame_sz); if (unlikely(!skb)) return NULL; /* update pointers within the skb to store the data */ skb_reserve(skb, xdp->data - xdp->data_hard_start); __skb_put(skb, xdp->data_end - xdp->data); if (metasize) skb_metadata_set(skb, metasize); if (unlikely(xdp_buff_has_frags(xdp))) { struct skb_shared_info *sinfo; sinfo = xdp_get_shared_info_from_buff(xdp); xdp_update_skb_shared_info(skb, nr_frags, sinfo->xdp_frags_size, nr_frags * xdp->frame_sz, xdp_buff_is_frag_pfmemalloc(xdp)); i40e_process_rx_buffs(rx_ring, I40E_XDP_PASS, xdp); } else { struct i40e_rx_buffer *rx_buffer; rx_buffer = i40e_rx_bi(rx_ring, rx_ring->next_to_clean); /* buffer is used by skb, update page_offset */ i40e_rx_buffer_flip(rx_buffer, xdp->frame_sz); } return skb; } /** * i40e_is_non_eop - process handling of non-EOP buffers * @rx_ring: Rx ring being processed * @rx_desc: Rx descriptor for current buffer * * If the buffer is an EOP buffer, this function exits returning false, * otherwise return true indicating that this is in fact a non-EOP buffer. */ bool i40e_is_non_eop(struct i40e_ring *rx_ring, union i40e_rx_desc *rx_desc) { /* if we are the last buffer then there is nothing else to do */ #define I40E_RXD_EOF BIT(I40E_RX_DESC_STATUS_EOF_SHIFT) if (likely(i40e_test_staterr(rx_desc, I40E_RXD_EOF))) return false; rx_ring->rx_stats.non_eop_descs++; return true; } static int i40e_xmit_xdp_ring(struct xdp_frame *xdpf, struct i40e_ring *xdp_ring); int i40e_xmit_xdp_tx_ring(struct xdp_buff *xdp, struct i40e_ring *xdp_ring) { struct xdp_frame *xdpf = xdp_convert_buff_to_frame(xdp); if (unlikely(!xdpf)) return I40E_XDP_CONSUMED; return i40e_xmit_xdp_ring(xdpf, xdp_ring); } /** * i40e_run_xdp - run an XDP program * @rx_ring: Rx ring being processed * @xdp: XDP buffer containing the frame * @xdp_prog: XDP program to run **/ static int i40e_run_xdp(struct i40e_ring *rx_ring, struct xdp_buff *xdp, struct bpf_prog *xdp_prog) { int err, result = I40E_XDP_PASS; struct i40e_ring *xdp_ring; u32 act; if (!xdp_prog) goto xdp_out; prefetchw(xdp->data_hard_start); /* xdp_frame write */ act = bpf_prog_run_xdp(xdp_prog, xdp); switch (act) { case XDP_PASS: break; case XDP_TX: xdp_ring = rx_ring->vsi->xdp_rings[rx_ring->queue_index]; result = i40e_xmit_xdp_tx_ring(xdp, xdp_ring); if (result == I40E_XDP_CONSUMED) goto out_failure; break; case XDP_REDIRECT: err = xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog); if (err) goto out_failure; result = I40E_XDP_REDIR; break; default: bpf_warn_invalid_xdp_action(rx_ring->netdev, xdp_prog, act); fallthrough; case XDP_ABORTED: out_failure: trace_xdp_exception(rx_ring->netdev, xdp_prog, act); fallthrough; /* handle aborts by dropping packet */ case XDP_DROP: result = I40E_XDP_CONSUMED; break; } xdp_out: return result; } /** * i40e_xdp_ring_update_tail - Updates the XDP Tx ring tail register * @xdp_ring: XDP Tx ring * * This function updates the XDP Tx ring tail register. **/ void i40e_xdp_ring_update_tail(struct i40e_ring *xdp_ring) { /* Force memory writes to complete before letting h/w * know there are new descriptors to fetch. */ wmb(); writel_relaxed(xdp_ring->next_to_use, xdp_ring->tail); } /** * i40e_update_rx_stats - Update Rx ring statistics * @rx_ring: rx descriptor ring * @total_rx_bytes: number of bytes received * @total_rx_packets: number of packets received * * This function updates the Rx ring statistics. **/ void i40e_update_rx_stats(struct i40e_ring *rx_ring, unsigned int total_rx_bytes, unsigned int total_rx_packets) { u64_stats_update_begin(&rx_ring->syncp); rx_ring->stats.packets += total_rx_packets; rx_ring->stats.bytes += total_rx_bytes; u64_stats_update_end(&rx_ring->syncp); rx_ring->q_vector->rx.total_packets += total_rx_packets; rx_ring->q_vector->rx.total_bytes += total_rx_bytes; } /** * i40e_finalize_xdp_rx - Bump XDP Tx tail and/or flush redirect map * @rx_ring: Rx ring * @xdp_res: Result of the receive batch * * This function bumps XDP Tx tail and/or flush redirect map, and * should be called when a batch of packets has been processed in the * napi loop. **/ void i40e_finalize_xdp_rx(struct i40e_ring *rx_ring, unsigned int xdp_res) { if (xdp_res & I40E_XDP_REDIR) xdp_do_flush_map(); if (xdp_res & I40E_XDP_TX) { struct i40e_ring *xdp_ring = rx_ring->vsi->xdp_rings[rx_ring->queue_index]; i40e_xdp_ring_update_tail(xdp_ring); } } /** * i40e_inc_ntp: Advance the next_to_process index * @rx_ring: Rx ring **/ static void i40e_inc_ntp(struct i40e_ring *rx_ring) { u32 ntp = rx_ring->next_to_process + 1; ntp = (ntp < rx_ring->count) ? ntp : 0; rx_ring->next_to_process = ntp; prefetch(I40E_RX_DESC(rx_ring, ntp)); } /** * i40e_add_xdp_frag: Add a frag to xdp_buff * @xdp: xdp_buff pointing to the data * @nr_frags: return number of buffers for the packet * @rx_buffer: rx_buffer holding data of the current frag * @size: size of data of current frag */ static int i40e_add_xdp_frag(struct xdp_buff *xdp, u32 *nr_frags, struct i40e_rx_buffer *rx_buffer, u32 size) { struct skb_shared_info *sinfo = xdp_get_shared_info_from_buff(xdp); if (!xdp_buff_has_frags(xdp)) { sinfo->nr_frags = 0; sinfo->xdp_frags_size = 0; xdp_buff_set_frags_flag(xdp); } else if (unlikely(sinfo->nr_frags >= MAX_SKB_FRAGS)) { /* Overflowing packet: All frags need to be dropped */ return -ENOMEM; } __skb_fill_page_desc_noacc(sinfo, sinfo->nr_frags++, rx_buffer->page, rx_buffer->page_offset, size); sinfo->xdp_frags_size += size; if (page_is_pfmemalloc(rx_buffer->page)) xdp_buff_set_frag_pfmemalloc(xdp); *nr_frags = sinfo->nr_frags; return 0; } /** * i40e_consume_xdp_buff - Consume all the buffers of the packet and update ntc * @rx_ring: rx descriptor ring to transact packets on * @xdp: xdp_buff pointing to the data * @rx_buffer: rx_buffer of eop desc */ static void i40e_consume_xdp_buff(struct i40e_ring *rx_ring, struct xdp_buff *xdp, struct i40e_rx_buffer *rx_buffer) { i40e_process_rx_buffs(rx_ring, I40E_XDP_CONSUMED, xdp); i40e_put_rx_buffer(rx_ring, rx_buffer); rx_ring->next_to_clean = rx_ring->next_to_process; xdp->data = NULL; } /** * i40e_clean_rx_irq - Clean completed descriptors from Rx ring - bounce buf * @rx_ring: rx descriptor ring to transact packets on * @budget: Total limit on number of packets to process * @rx_cleaned: Out parameter of the number of packets processed * * This function provides a "bounce buffer" approach to Rx interrupt * processing. The advantage to this is that on systems that have * expensive overhead for IOMMU access this provides a means of avoiding * it by maintaining the mapping of the page to the system. * * Returns amount of work completed **/ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget, unsigned int *rx_cleaned) { unsigned int total_rx_bytes = 0, total_rx_packets = 0; u16 cleaned_count = I40E_DESC_UNUSED(rx_ring); u16 clean_threshold = rx_ring->count / 2; unsigned int offset = rx_ring->rx_offset; struct xdp_buff *xdp = &rx_ring->xdp; unsigned int xdp_xmit = 0; struct bpf_prog *xdp_prog; bool failure = false; int xdp_res = 0; xdp_prog = READ_ONCE(rx_ring->xdp_prog); while (likely(total_rx_packets < (unsigned int)budget)) { u16 ntp = rx_ring->next_to_process; struct i40e_rx_buffer *rx_buffer; union i40e_rx_desc *rx_desc; struct sk_buff *skb; unsigned int size; u32 nfrags = 0; bool neop; u64 qword; /* return some buffers to hardware, one at a time is too slow */ if (cleaned_count >= clean_threshold) { failure = failure || i40e_alloc_rx_buffers(rx_ring, cleaned_count); cleaned_count = 0; } rx_desc = I40E_RX_DESC(rx_ring, ntp); /* status_error_len will always be zero for unused descriptors * because it's cleared in cleanup, and overlaps with hdr_addr * which is always zero because packet split isn't used, if the * hardware wrote DD then the length will be non-zero */ qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len); /* This memory barrier is needed to keep us from reading * any other fields out of the rx_desc until we have * verified the descriptor has been written back. */ dma_rmb(); if (i40e_rx_is_programming_status(qword)) { i40e_clean_programming_status(rx_ring, rx_desc->raw.qword[0], qword); rx_buffer = i40e_rx_bi(rx_ring, ntp); i40e_inc_ntp(rx_ring); i40e_reuse_rx_page(rx_ring, rx_buffer); cleaned_count++; continue; } size = (qword & I40E_RXD_QW1_LENGTH_PBUF_MASK) >> I40E_RXD_QW1_LENGTH_PBUF_SHIFT; if (!size) break; i40e_trace(clean_rx_irq, rx_ring, rx_desc, xdp); /* retrieve a buffer from the ring */ rx_buffer = i40e_get_rx_buffer(rx_ring, size); neop = i40e_is_non_eop(rx_ring, rx_desc); i40e_inc_ntp(rx_ring); if (!xdp->data) { unsigned char *hard_start; hard_start = page_address(rx_buffer->page) + rx_buffer->page_offset - offset; xdp_prepare_buff(xdp, hard_start, offset, size, true); #if (PAGE_SIZE > 4096) /* At larger PAGE_SIZE, frame_sz depend on len size */ xdp->frame_sz = i40e_rx_frame_truesize(rx_ring, size); #endif } else if (i40e_add_xdp_frag(xdp, &nfrags, rx_buffer, size) && !neop) { /* Overflowing packet: Drop all frags on EOP */ i40e_consume_xdp_buff(rx_ring, xdp, rx_buffer); break; } if (neop) continue; xdp_res = i40e_run_xdp(rx_ring, xdp, xdp_prog); if (xdp_res) { xdp_xmit |= xdp_res & (I40E_XDP_TX | I40E_XDP_REDIR); if (unlikely(xdp_buff_has_frags(xdp))) { i40e_process_rx_buffs(rx_ring, xdp_res, xdp); size = xdp_get_buff_len(xdp); } else if (xdp_res & (I40E_XDP_TX | I40E_XDP_REDIR)) { i40e_rx_buffer_flip(rx_buffer, xdp->frame_sz); } else { rx_buffer->pagecnt_bias++; } total_rx_bytes += size; } else { if (ring_uses_build_skb(rx_ring)) skb = i40e_build_skb(rx_ring, xdp, nfrags); else skb = i40e_construct_skb(rx_ring, xdp, nfrags); /* drop if we failed to retrieve a buffer */ if (!skb) { rx_ring->rx_stats.alloc_buff_failed++; i40e_consume_xdp_buff(rx_ring, xdp, rx_buffer); break; } if (i40e_cleanup_headers(rx_ring, skb, rx_desc)) goto process_next; /* probably a little skewed due to removing CRC */ total_rx_bytes += skb->len; /* populate checksum, VLAN, and protocol */ i40e_process_skb_fields(rx_ring, rx_desc, skb); i40e_trace(clean_rx_irq_rx, rx_ring, rx_desc, xdp); napi_gro_receive(&rx_ring->q_vector->napi, skb); } /* update budget accounting */ total_rx_packets++; process_next: cleaned_count += nfrags + 1; i40e_put_rx_buffer(rx_ring, rx_buffer); rx_ring->next_to_clean = rx_ring->next_to_process; xdp->data = NULL; } i40e_finalize_xdp_rx(rx_ring, xdp_xmit); i40e_update_rx_stats(rx_ring, total_rx_bytes, total_rx_packets); *rx_cleaned = total_rx_packets; /* guarantee a trip back through this routine if there was a failure */ return failure ? budget : (int)total_rx_packets; } static inline u32 i40e_buildreg_itr(const int type, u16 itr) { u32 val; /* We don't bother with setting the CLEARPBA bit as the data sheet * points out doing so is "meaningless since it was already * auto-cleared". The auto-clearing happens when the interrupt is * asserted. * * Hardware errata 28 for also indicates that writing to a * xxINT_DYN_CTLx CSR with INTENA_MSK (bit 31) set to 0 will clear * an event in the PBA anyway so we need to rely on the automask * to hold pending events for us until the interrupt is re-enabled * * The itr value is reported in microseconds, and the register * value is recorded in 2 microsecond units. For this reason we * only need to shift by the interval shift - 1 instead of the * full value. */ itr &= I40E_ITR_MASK; val = I40E_PFINT_DYN_CTLN_INTENA_MASK | (type << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT) | (itr << (I40E_PFINT_DYN_CTLN_INTERVAL_SHIFT - 1)); return val; } /* a small macro to shorten up some long lines */ #define INTREG I40E_PFINT_DYN_CTLN /* The act of updating the ITR will cause it to immediately trigger. In order * to prevent this from throwing off adaptive update statistics we defer the * update so that it can only happen so often. So after either Tx or Rx are * updated we make the adaptive scheme wait until either the ITR completely * expires via the next_update expiration or we have been through at least * 3 interrupts. */ #define ITR_COUNTDOWN_START 3 /** * i40e_update_enable_itr - Update itr and re-enable MSIX interrupt * @vsi: the VSI we care about * @q_vector: q_vector for which itr is being updated and interrupt enabled * **/ static inline void i40e_update_enable_itr(struct i40e_vsi *vsi, struct i40e_q_vector *q_vector) { struct i40e_hw *hw = &vsi->back->hw; u32 intval; /* If we don't have MSIX, then we only need to re-enable icr0 */ if (!(vsi->back->flags & I40E_FLAG_MSIX_ENABLED)) { i40e_irq_dynamic_enable_icr0(vsi->back); return; } /* These will do nothing if dynamic updates are not enabled */ i40e_update_itr(q_vector, &q_vector->tx); i40e_update_itr(q_vector, &q_vector->rx); /* This block of logic allows us to get away with only updating * one ITR value with each interrupt. The idea is to perform a * pseudo-lazy update with the following criteria. * * 1. Rx is given higher priority than Tx if both are in same state * 2. If we must reduce an ITR that is given highest priority. * 3. We then give priority to increasing ITR based on amount. */ if (q_vector->rx.target_itr < q_vector->rx.current_itr) { /* Rx ITR needs to be reduced, this is highest priority */ intval = i40e_buildreg_itr(I40E_RX_ITR, q_vector->rx.target_itr); q_vector->rx.current_itr = q_vector->rx.target_itr; q_vector->itr_countdown = ITR_COUNTDOWN_START; } else if ((q_vector->tx.target_itr < q_vector->tx.current_itr) || ((q_vector->rx.target_itr - q_vector->rx.current_itr) < (q_vector->tx.target_itr - q_vector->tx.current_itr))) { /* Tx ITR needs to be reduced, this is second priority * Tx ITR needs to be increased more than Rx, fourth priority */ intval = i40e_buildreg_itr(I40E_TX_ITR, q_vector->tx.target_itr); q_vector->tx.current_itr = q_vector->tx.target_itr; q_vector->itr_countdown = ITR_COUNTDOWN_START; } else if (q_vector->rx.current_itr != q_vector->rx.target_itr) { /* Rx ITR needs to be increased, third priority */ intval = i40e_buildreg_itr(I40E_RX_ITR, q_vector->rx.target_itr); q_vector->rx.current_itr = q_vector->rx.target_itr; q_vector->itr_countdown = ITR_COUNTDOWN_START; } else { /* No ITR update, lowest priority */ intval = i40e_buildreg_itr(I40E_ITR_NONE, 0); if (q_vector->itr_countdown) q_vector->itr_countdown--; } if (!test_bit(__I40E_VSI_DOWN, vsi->state)) wr32(hw, INTREG(q_vector->reg_idx), intval); } /** * i40e_napi_poll - NAPI polling Rx/Tx cleanup routine * @napi: napi struct with our devices info in it * @budget: amount of work driver is allowed to do this pass, in packets * * This function will clean all queues associated with a q_vector. * * Returns the amount of work done **/ int i40e_napi_poll(struct napi_struct *napi, int budget) { struct i40e_q_vector *q_vector = container_of(napi, struct i40e_q_vector, napi); struct i40e_vsi *vsi = q_vector->vsi; struct i40e_ring *ring; bool tx_clean_complete = true; bool rx_clean_complete = true; unsigned int tx_cleaned = 0; unsigned int rx_cleaned = 0; bool clean_complete = true; bool arm_wb = false; int budget_per_ring; int work_done = 0; if (test_bit(__I40E_VSI_DOWN, vsi->state)) { napi_complete(napi); return 0; } /* Since the actual Tx work is minimal, we can give the Tx a larger * budget and be more aggressive about cleaning up the Tx descriptors. */ i40e_for_each_ring(ring, q_vector->tx) { bool wd = ring->xsk_pool ? i40e_clean_xdp_tx_irq(vsi, ring) : i40e_clean_tx_irq(vsi, ring, budget, &tx_cleaned); if (!wd) { clean_complete = tx_clean_complete = false; continue; } arm_wb |= ring->arm_wb; ring->arm_wb = false; } /* Handle case where we are called by netpoll with a budget of 0 */ if (budget <= 0) goto tx_only; /* normally we have 1 Rx ring per q_vector */ if (unlikely(q_vector->num_ringpairs > 1)) /* We attempt to distribute budget to each Rx queue fairly, but * don't allow the budget to go below 1 because that would exit * polling early. */ budget_per_ring = max_t(int, budget / q_vector->num_ringpairs, 1); else /* Max of 1 Rx ring in this q_vector so give it the budget */ budget_per_ring = budget; i40e_for_each_ring(ring, q_vector->rx) { int cleaned = ring->xsk_pool ? i40e_clean_rx_irq_zc(ring, budget_per_ring) : i40e_clean_rx_irq(ring, budget_per_ring, &rx_cleaned); work_done += cleaned; /* if we clean as many as budgeted, we must not be done */ if (cleaned >= budget_per_ring) clean_complete = rx_clean_complete = false; } if (!i40e_enabled_xdp_vsi(vsi)) trace_i40e_napi_poll(napi, q_vector, budget, budget_per_ring, rx_cleaned, tx_cleaned, rx_clean_complete, tx_clean_complete); /* If work not completed, return budget and polling will return */ if (!clean_complete) { int cpu_id = smp_processor_id(); /* It is possible that the interrupt affinity has changed but, * if the cpu is pegged at 100%, polling will never exit while * traffic continues and the interrupt will be stuck on this * cpu. We check to make sure affinity is correct before we * continue to poll, otherwise we must stop polling so the * interrupt can move to the correct cpu. */ if (!cpumask_test_cpu(cpu_id, &q_vector->affinity_mask)) { /* Tell napi that we are done polling */ napi_complete_done(napi, work_done); /* Force an interrupt */ i40e_force_wb(vsi, q_vector); /* Return budget-1 so that polling stops */ return budget - 1; } tx_only: if (arm_wb) { q_vector->tx.ring[0].tx_stats.tx_force_wb++; i40e_enable_wb_on_itr(vsi, q_vector); } return budget; } if (vsi->back->flags & I40E_TXR_FLAGS_WB_ON_ITR) q_vector->arm_wb_state = false; /* Exit the polling mode, but don't re-enable interrupts if stack might * poll us due to busy-polling */ if (likely(napi_complete_done(napi, work_done))) i40e_update_enable_itr(vsi, q_vector); return min(work_done, budget - 1); } /** * i40e_atr - Add a Flow Director ATR filter * @tx_ring: ring to add programming descriptor to * @skb: send buffer * @tx_flags: send tx flags **/ static void i40e_atr(struct i40e_ring *tx_ring, struct sk_buff *skb, u32 tx_flags) { struct i40e_filter_program_desc *fdir_desc; struct i40e_pf *pf = tx_ring->vsi->back; union { unsigned char *network; struct iphdr *ipv4; struct ipv6hdr *ipv6; } hdr; struct tcphdr *th; unsigned int hlen; u32 flex_ptype, dtype_cmd; int l4_proto; u16 i; /* make sure ATR is enabled */ if (!(pf->flags & I40E_FLAG_FD_ATR_ENABLED)) return; if (test_bit(__I40E_FD_ATR_AUTO_DISABLED, pf->state)) return; /* if sampling is disabled do nothing */ if (!tx_ring->atr_sample_rate) return; /* Currently only IPv4/IPv6 with TCP is supported */ if (!(tx_flags & (I40E_TX_FLAGS_IPV4 | I40E_TX_FLAGS_IPV6))) return; /* snag network header to get L4 type and address */ hdr.network = (tx_flags & I40E_TX_FLAGS_UDP_TUNNEL) ? skb_inner_network_header(skb) : skb_network_header(skb); /* Note: tx_flags gets modified to reflect inner protocols in * tx_enable_csum function if encap is enabled. */ if (tx_flags & I40E_TX_FLAGS_IPV4) { /* access ihl as u8 to avoid unaligned access on ia64 */ hlen = (hdr.network[0] & 0x0F) << 2; l4_proto = hdr.ipv4->protocol; } else { /* find the start of the innermost ipv6 header */ unsigned int inner_hlen = hdr.network - skb->data; unsigned int h_offset = inner_hlen; /* this function updates h_offset to the end of the header */ l4_proto = ipv6_find_hdr(skb, &h_offset, IPPROTO_TCP, NULL, NULL); /* hlen will contain our best estimate of the tcp header */ hlen = h_offset - inner_hlen; } if (l4_proto != IPPROTO_TCP) return; th = (struct tcphdr *)(hdr.network + hlen); /* Due to lack of space, no more new filters can be programmed */ if (th->syn && test_bit(__I40E_FD_ATR_AUTO_DISABLED, pf->state)) return; if (pf->flags & I40E_FLAG_HW_ATR_EVICT_ENABLED) { /* HW ATR eviction will take care of removing filters on FIN * and RST packets. */ if (th->fin || th->rst) return; } tx_ring->atr_count++; /* sample on all syn/fin/rst packets or once every atr sample rate */ if (!th->fin && !th->syn && !th->rst && (tx_ring->atr_count < tx_ring->atr_sample_rate)) return; tx_ring->atr_count = 0; /* grab the next descriptor */ i = tx_ring->next_to_use; fdir_desc = I40E_TX_FDIRDESC(tx_ring, i); i++; tx_ring->next_to_use = (i < tx_ring->count) ? i : 0; flex_ptype = (tx_ring->queue_index << I40E_TXD_FLTR_QW0_QINDEX_SHIFT) & I40E_TXD_FLTR_QW0_QINDEX_MASK; flex_ptype |= (tx_flags & I40E_TX_FLAGS_IPV4) ? (I40E_FILTER_PCTYPE_NONF_IPV4_TCP << I40E_TXD_FLTR_QW0_PCTYPE_SHIFT) : (I40E_FILTER_PCTYPE_NONF_IPV6_TCP << I40E_TXD_FLTR_QW0_PCTYPE_SHIFT); flex_ptype |= tx_ring->vsi->id << I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT; dtype_cmd = I40E_TX_DESC_DTYPE_FILTER_PROG; dtype_cmd |= (th->fin || th->rst) ? (I40E_FILTER_PROGRAM_DESC_PCMD_REMOVE << I40E_TXD_FLTR_QW1_PCMD_SHIFT) : (I40E_FILTER_PROGRAM_DESC_PCMD_ADD_UPDATE << I40E_TXD_FLTR_QW1_PCMD_SHIFT); dtype_cmd |= I40E_FILTER_PROGRAM_DESC_DEST_DIRECT_PACKET_QINDEX << I40E_TXD_FLTR_QW1_DEST_SHIFT; dtype_cmd |= I40E_FILTER_PROGRAM_DESC_FD_STATUS_FD_ID << I40E_TXD_FLTR_QW1_FD_STATUS_SHIFT; dtype_cmd |= I40E_TXD_FLTR_QW1_CNT_ENA_MASK; if (!(tx_flags & I40E_TX_FLAGS_UDP_TUNNEL)) dtype_cmd |= ((u32)I40E_FD_ATR_STAT_IDX(pf->hw.pf_id) << I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT) & I40E_TXD_FLTR_QW1_CNTINDEX_MASK; else dtype_cmd |= ((u32)I40E_FD_ATR_TUNNEL_STAT_IDX(pf->hw.pf_id) << I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT) & I40E_TXD_FLTR_QW1_CNTINDEX_MASK; if (pf->flags & I40E_FLAG_HW_ATR_EVICT_ENABLED) dtype_cmd |= I40E_TXD_FLTR_QW1_ATR_MASK; fdir_desc->qindex_flex_ptype_vsi = cpu_to_le32(flex_ptype); fdir_desc->rsvd = cpu_to_le32(0); fdir_desc->dtype_cmd_cntindex = cpu_to_le32(dtype_cmd); fdir_desc->fd_id = cpu_to_le32(0); } /** * i40e_tx_prepare_vlan_flags - prepare generic TX VLAN tagging flags for HW * @skb: send buffer * @tx_ring: ring to send buffer on * @flags: the tx flags to be set * * Checks the skb and set up correspondingly several generic transmit flags * related to VLAN tagging for the HW, such as VLAN, DCB, etc. * * Returns error code indicate the frame should be dropped upon error and the * otherwise returns 0 to indicate the flags has been set properly. **/ static inline int i40e_tx_prepare_vlan_flags(struct sk_buff *skb, struct i40e_ring *tx_ring, u32 *flags) { __be16 protocol = skb->protocol; u32 tx_flags = 0; if (protocol == htons(ETH_P_8021Q) && !(tx_ring->netdev->features & NETIF_F_HW_VLAN_CTAG_TX)) { /* When HW VLAN acceleration is turned off by the user the * stack sets the protocol to 8021q so that the driver * can take any steps required to support the SW only * VLAN handling. In our case the driver doesn't need * to take any further steps so just set the protocol * to the encapsulated ethertype. */ skb->protocol = vlan_get_protocol(skb); goto out; } /* if we have a HW VLAN tag being added, default to the HW one */ if (skb_vlan_tag_present(skb)) { tx_flags |= skb_vlan_tag_get(skb) << I40E_TX_FLAGS_VLAN_SHIFT; tx_flags |= I40E_TX_FLAGS_HW_VLAN; /* else if it is a SW VLAN, check the next protocol and store the tag */ } else if (protocol == htons(ETH_P_8021Q)) { struct vlan_hdr *vhdr, _vhdr; vhdr = skb_header_pointer(skb, ETH_HLEN, sizeof(_vhdr), &_vhdr); if (!vhdr) return -EINVAL; protocol = vhdr->h_vlan_encapsulated_proto; tx_flags |= ntohs(vhdr->h_vlan_TCI) << I40E_TX_FLAGS_VLAN_SHIFT; tx_flags |= I40E_TX_FLAGS_SW_VLAN; } if (!(tx_ring->vsi->back->flags & I40E_FLAG_DCB_ENABLED)) goto out; /* Insert 802.1p priority into VLAN header */ if ((tx_flags & (I40E_TX_FLAGS_HW_VLAN | I40E_TX_FLAGS_SW_VLAN)) || (skb->priority != TC_PRIO_CONTROL)) { tx_flags &= ~I40E_TX_FLAGS_VLAN_PRIO_MASK; tx_flags |= (skb->priority & 0x7) << I40E_TX_FLAGS_VLAN_PRIO_SHIFT; if (tx_flags & I40E_TX_FLAGS_SW_VLAN) { struct vlan_ethhdr *vhdr; int rc; rc = skb_cow_head(skb, 0); if (rc < 0) return rc; vhdr = skb_vlan_eth_hdr(skb); vhdr->h_vlan_TCI = htons(tx_flags >> I40E_TX_FLAGS_VLAN_SHIFT); } else { tx_flags |= I40E_TX_FLAGS_HW_VLAN; } } out: *flags = tx_flags; return 0; } /** * i40e_tso - set up the tso context descriptor * @first: pointer to first Tx buffer for xmit * @hdr_len: ptr to the size of the packet header * @cd_type_cmd_tso_mss: Quad Word 1 * * Returns 0 if no TSO can happen, 1 if tso is going, or error **/ static int i40e_tso(struct i40e_tx_buffer *first, u8 *hdr_len, u64 *cd_type_cmd_tso_mss) { struct sk_buff *skb = first->skb; u64 cd_cmd, cd_tso_len, cd_mss; __be16 protocol; union { struct iphdr *v4; struct ipv6hdr *v6; unsigned char *hdr; } ip; union { struct tcphdr *tcp; struct udphdr *udp; unsigned char *hdr; } l4; u32 paylen, l4_offset; u16 gso_size; int err; if (skb->ip_summed != CHECKSUM_PARTIAL) return 0; if (!skb_is_gso(skb)) return 0; err = skb_cow_head(skb, 0); if (err < 0) return err; protocol = vlan_get_protocol(skb); if (eth_p_mpls(protocol)) ip.hdr = skb_inner_network_header(skb); else ip.hdr = skb_network_header(skb); l4.hdr = skb_checksum_start(skb); /* initialize outer IP header fields */ if (ip.v4->version == 4) { ip.v4->tot_len = 0; ip.v4->check = 0; first->tx_flags |= I40E_TX_FLAGS_TSO; } else { ip.v6->payload_len = 0; first->tx_flags |= I40E_TX_FLAGS_TSO; } if (skb_shinfo(skb)->gso_type & (SKB_GSO_GRE | SKB_GSO_GRE_CSUM | SKB_GSO_IPXIP4 | SKB_GSO_IPXIP6 | SKB_GSO_UDP_TUNNEL | SKB_GSO_UDP_TUNNEL_CSUM)) { if (!(skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL) && (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM)) { l4.udp->len = 0; /* determine offset of outer transport header */ l4_offset = l4.hdr - skb->data; /* remove payload length from outer checksum */ paylen = skb->len - l4_offset; csum_replace_by_diff(&l4.udp->check, (__force __wsum)htonl(paylen)); } /* reset pointers to inner headers */ ip.hdr = skb_inner_network_header(skb); l4.hdr = skb_inner_transport_header(skb); /* initialize inner IP header fields */ if (ip.v4->version == 4) { ip.v4->tot_len = 0; ip.v4->check = 0; } else { ip.v6->payload_len = 0; } } /* determine offset of inner transport header */ l4_offset = l4.hdr - skb->data; /* remove payload length from inner checksum */ paylen = skb->len - l4_offset; if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) { csum_replace_by_diff(&l4.udp->check, (__force __wsum)htonl(paylen)); /* compute length of segmentation header */ *hdr_len = sizeof(*l4.udp) + l4_offset; } else { csum_replace_by_diff(&l4.tcp->check, (__force __wsum)htonl(paylen)); /* compute length of segmentation header */ *hdr_len = (l4.tcp->doff * 4) + l4_offset; } /* pull values out of skb_shinfo */ gso_size = skb_shinfo(skb)->gso_size; /* update GSO size and bytecount with header size */ first->gso_segs = skb_shinfo(skb)->gso_segs; first->bytecount += (first->gso_segs - 1) * *hdr_len; /* find the field values */ cd_cmd = I40E_TX_CTX_DESC_TSO; cd_tso_len = skb->len - *hdr_len; cd_mss = gso_size; *cd_type_cmd_tso_mss |= (cd_cmd << I40E_TXD_CTX_QW1_CMD_SHIFT) | (cd_tso_len << I40E_TXD_CTX_QW1_TSO_LEN_SHIFT) | (cd_mss << I40E_TXD_CTX_QW1_MSS_SHIFT); return 1; } /** * i40e_tsyn - set up the tsyn context descriptor * @tx_ring: ptr to the ring to send * @skb: ptr to the skb we're sending * @tx_flags: the collected send information * @cd_type_cmd_tso_mss: Quad Word 1 * * Returns 0 if no Tx timestamp can happen and 1 if the timestamp will happen **/ static int i40e_tsyn(struct i40e_ring *tx_ring, struct sk_buff *skb, u32 tx_flags, u64 *cd_type_cmd_tso_mss) { struct i40e_pf *pf; if (likely(!(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP))) return 0; /* Tx timestamps cannot be sampled when doing TSO */ if (tx_flags & I40E_TX_FLAGS_TSO) return 0; /* only timestamp the outbound packet if the user has requested it and * we are not already transmitting a packet to be timestamped */ pf = i40e_netdev_to_pf(tx_ring->netdev); if (!(pf->flags & I40E_FLAG_PTP)) return 0; if (pf->ptp_tx && !test_and_set_bit_lock(__I40E_PTP_TX_IN_PROGRESS, pf->state)) { skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; pf->ptp_tx_start = jiffies; pf->ptp_tx_skb = skb_get(skb); } else { pf->tx_hwtstamp_skipped++; return 0; } *cd_type_cmd_tso_mss |= (u64)I40E_TX_CTX_DESC_TSYN << I40E_TXD_CTX_QW1_CMD_SHIFT; return 1; } /** * i40e_tx_enable_csum - Enable Tx checksum offloads * @skb: send buffer * @tx_flags: pointer to Tx flags currently set * @td_cmd: Tx descriptor command bits to set * @td_offset: Tx descriptor header offsets to set * @tx_ring: Tx descriptor ring * @cd_tunneling: ptr to context desc bits **/ static int i40e_tx_enable_csum(struct sk_buff *skb, u32 *tx_flags, u32 *td_cmd, u32 *td_offset, struct i40e_ring *tx_ring, u32 *cd_tunneling) { union { struct iphdr *v4; struct ipv6hdr *v6; unsigned char *hdr; } ip; union { struct tcphdr *tcp; struct udphdr *udp; unsigned char *hdr; } l4; unsigned char *exthdr; u32 offset, cmd = 0; __be16 frag_off; __be16 protocol; u8 l4_proto = 0; if (skb->ip_summed != CHECKSUM_PARTIAL) return 0; protocol = vlan_get_protocol(skb); if (eth_p_mpls(protocol)) { ip.hdr = skb_inner_network_header(skb); l4.hdr = skb_checksum_start(skb); } else { ip.hdr = skb_network_header(skb); l4.hdr = skb_transport_header(skb); } /* set the tx_flags to indicate the IP protocol type. this is * required so that checksum header computation below is accurate. */ if (ip.v4->version == 4) *tx_flags |= I40E_TX_FLAGS_IPV4; else *tx_flags |= I40E_TX_FLAGS_IPV6; /* compute outer L2 header size */ offset = ((ip.hdr - skb->data) / 2) << I40E_TX_DESC_LENGTH_MACLEN_SHIFT; if (skb->encapsulation) { u32 tunnel = 0; /* define outer network header type */ if (*tx_flags & I40E_TX_FLAGS_IPV4) { tunnel |= (*tx_flags & I40E_TX_FLAGS_TSO) ? I40E_TX_CTX_EXT_IP_IPV4 : I40E_TX_CTX_EXT_IP_IPV4_NO_CSUM; l4_proto = ip.v4->protocol; } else if (*tx_flags & I40E_TX_FLAGS_IPV6) { int ret; tunnel |= I40E_TX_CTX_EXT_IP_IPV6; exthdr = ip.hdr + sizeof(*ip.v6); l4_proto = ip.v6->nexthdr; ret = ipv6_skip_exthdr(skb, exthdr - skb->data, &l4_proto, &frag_off); if (ret < 0) return -1; } /* define outer transport */ switch (l4_proto) { case IPPROTO_UDP: tunnel |= I40E_TXD_CTX_UDP_TUNNELING; *tx_flags |= I40E_TX_FLAGS_UDP_TUNNEL; break; case IPPROTO_GRE: tunnel |= I40E_TXD_CTX_GRE_TUNNELING; *tx_flags |= I40E_TX_FLAGS_UDP_TUNNEL; break; case IPPROTO_IPIP: case IPPROTO_IPV6: *tx_flags |= I40E_TX_FLAGS_UDP_TUNNEL; l4.hdr = skb_inner_network_header(skb); break; default: if (*tx_flags & I40E_TX_FLAGS_TSO) return -1; skb_checksum_help(skb); return 0; } /* compute outer L3 header size */ tunnel |= ((l4.hdr - ip.hdr) / 4) << I40E_TXD_CTX_QW0_EXT_IPLEN_SHIFT; /* switch IP header pointer from outer to inner header */ ip.hdr = skb_inner_network_header(skb); /* compute tunnel header size */ tunnel |= ((ip.hdr - l4.hdr) / 2) << I40E_TXD_CTX_QW0_NATLEN_SHIFT; /* indicate if we need to offload outer UDP header */ if ((*tx_flags & I40E_TX_FLAGS_TSO) && !(skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL) && (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM)) tunnel |= I40E_TXD_CTX_QW0_L4T_CS_MASK; /* record tunnel offload values */ *cd_tunneling |= tunnel; /* switch L4 header pointer from outer to inner */ l4.hdr = skb_inner_transport_header(skb); l4_proto = 0; /* reset type as we transition from outer to inner headers */ *tx_flags &= ~(I40E_TX_FLAGS_IPV4 | I40E_TX_FLAGS_IPV6); if (ip.v4->version == 4) *tx_flags |= I40E_TX_FLAGS_IPV4; if (ip.v6->version == 6) *tx_flags |= I40E_TX_FLAGS_IPV6; } /* Enable IP checksum offloads */ if (*tx_flags & I40E_TX_FLAGS_IPV4) { l4_proto = ip.v4->protocol; /* the stack computes the IP header already, the only time we * need the hardware to recompute it is in the case of TSO. */ cmd |= (*tx_flags & I40E_TX_FLAGS_TSO) ? I40E_TX_DESC_CMD_IIPT_IPV4_CSUM : I40E_TX_DESC_CMD_IIPT_IPV4; } else if (*tx_flags & I40E_TX_FLAGS_IPV6) { cmd |= I40E_TX_DESC_CMD_IIPT_IPV6; exthdr = ip.hdr + sizeof(*ip.v6); l4_proto = ip.v6->nexthdr; if (l4.hdr != exthdr) ipv6_skip_exthdr(skb, exthdr - skb->data, &l4_proto, &frag_off); } /* compute inner L3 header size */ offset |= ((l4.hdr - ip.hdr) / 4) << I40E_TX_DESC_LENGTH_IPLEN_SHIFT; /* Enable L4 checksum offloads */ switch (l4_proto) { case IPPROTO_TCP: /* enable checksum offloads */ cmd |= I40E_TX_DESC_CMD_L4T_EOFT_TCP; offset |= l4.tcp->doff << I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT; break; case IPPROTO_SCTP: /* enable SCTP checksum offload */ cmd |= I40E_TX_DESC_CMD_L4T_EOFT_SCTP; offset |= (sizeof(struct sctphdr) >> 2) << I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT; break; case IPPROTO_UDP: /* enable UDP checksum offload */ cmd |= I40E_TX_DESC_CMD_L4T_EOFT_UDP; offset |= (sizeof(struct udphdr) >> 2) << I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT; break; default: if (*tx_flags & I40E_TX_FLAGS_TSO) return -1; skb_checksum_help(skb); return 0; } *td_cmd |= cmd; *td_offset |= offset; return 1; } /** * i40e_create_tx_ctx - Build the Tx context descriptor * @tx_ring: ring to create the descriptor on * @cd_type_cmd_tso_mss: Quad Word 1 * @cd_tunneling: Quad Word 0 - bits 0-31 * @cd_l2tag2: Quad Word 0 - bits 32-63 **/ static void i40e_create_tx_ctx(struct i40e_ring *tx_ring, const u64 cd_type_cmd_tso_mss, const u32 cd_tunneling, const u32 cd_l2tag2) { struct i40e_tx_context_desc *context_desc; int i = tx_ring->next_to_use; if ((cd_type_cmd_tso_mss == I40E_TX_DESC_DTYPE_CONTEXT) && !cd_tunneling && !cd_l2tag2) return; /* grab the next descriptor */ context_desc = I40E_TX_CTXTDESC(tx_ring, i); i++; tx_ring->next_to_use = (i < tx_ring->count) ? i : 0; /* cpu_to_le32 and assign to struct fields */ context_desc->tunneling_params = cpu_to_le32(cd_tunneling); context_desc->l2tag2 = cpu_to_le16(cd_l2tag2); context_desc->rsvd = cpu_to_le16(0); context_desc->type_cmd_tso_mss = cpu_to_le64(cd_type_cmd_tso_mss); } /** * __i40e_maybe_stop_tx - 2nd level check for tx stop conditions * @tx_ring: the ring to be checked * @size: the size buffer we want to assure is available * * Returns -EBUSY if a stop is needed, else 0 **/ int __i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size) { netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index); /* Memory barrier before checking head and tail */ smp_mb(); ++tx_ring->tx_stats.tx_stopped; /* Check again in a case another CPU has just made room available. */ if (likely(I40E_DESC_UNUSED(tx_ring) < size)) return -EBUSY; /* A reprieve! - use start_queue because it doesn't call schedule */ netif_start_subqueue(tx_ring->netdev, tx_ring->queue_index); ++tx_ring->tx_stats.restart_queue; return 0; } /** * __i40e_chk_linearize - Check if there are more than 8 buffers per packet * @skb: send buffer * * Note: Our HW can't DMA more than 8 buffers to build a packet on the wire * and so we need to figure out the cases where we need to linearize the skb. * * For TSO we need to count the TSO header and segment payload separately. * As such we need to check cases where we have 7 fragments or more as we * can potentially require 9 DMA transactions, 1 for the TSO header, 1 for * the segment payload in the first descriptor, and another 7 for the * fragments. **/ bool __i40e_chk_linearize(struct sk_buff *skb) { const skb_frag_t *frag, *stale; int nr_frags, sum; /* no need to check if number of frags is less than 7 */ nr_frags = skb_shinfo(skb)->nr_frags; if (nr_frags < (I40E_MAX_BUFFER_TXD - 1)) return false; /* We need to walk through the list and validate that each group * of 6 fragments totals at least gso_size. */ nr_frags -= I40E_MAX_BUFFER_TXD - 2; frag = &skb_shinfo(skb)->frags[0]; /* Initialize size to the negative value of gso_size minus 1. We * use this as the worst case scenerio in which the frag ahead * of us only provides one byte which is why we are limited to 6 * descriptors for a single transmit as the header and previous * fragment are already consuming 2 descriptors. */ sum = 1 - skb_shinfo(skb)->gso_size; /* Add size of frags 0 through 4 to create our initial sum */ sum += skb_frag_size(frag++); sum += skb_frag_size(frag++); sum += skb_frag_size(frag++); sum += skb_frag_size(frag++); sum += skb_frag_size(frag++); /* Walk through fragments adding latest fragment, testing it, and * then removing stale fragments from the sum. */ for (stale = &skb_shinfo(skb)->frags[0];; stale++) { int stale_size = skb_frag_size(stale); sum += skb_frag_size(frag++); /* The stale fragment may present us with a smaller * descriptor than the actual fragment size. To account * for that we need to remove all the data on the front and * figure out what the remainder would be in the last * descriptor associated with the fragment. */ if (stale_size > I40E_MAX_DATA_PER_TXD) { int align_pad = -(skb_frag_off(stale)) & (I40E_MAX_READ_REQ_SIZE - 1); sum -= align_pad; stale_size -= align_pad; do { sum -= I40E_MAX_DATA_PER_TXD_ALIGNED; stale_size -= I40E_MAX_DATA_PER_TXD_ALIGNED; } while (stale_size > I40E_MAX_DATA_PER_TXD); } /* if sum is negative we failed to make sufficient progress */ if (sum < 0) return true; if (!nr_frags--) break; sum -= stale_size; } return false; } /** * i40e_tx_map - Build the Tx descriptor * @tx_ring: ring to send buffer on * @skb: send buffer * @first: first buffer info buffer to use * @tx_flags: collected send information * @hdr_len: size of the packet header * @td_cmd: the command field in the descriptor * @td_offset: offset for checksum or crc * * Returns 0 on success, -1 on failure to DMA **/ static inline int i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb, struct i40e_tx_buffer *first, u32 tx_flags, const u8 hdr_len, u32 td_cmd, u32 td_offset) { unsigned int data_len = skb->data_len; unsigned int size = skb_headlen(skb); skb_frag_t *frag; struct i40e_tx_buffer *tx_bi; struct i40e_tx_desc *tx_desc; u16 i = tx_ring->next_to_use; u32 td_tag = 0; dma_addr_t dma; u16 desc_count = 1; if (tx_flags & I40E_TX_FLAGS_HW_VLAN) { td_cmd |= I40E_TX_DESC_CMD_IL2TAG1; td_tag = (tx_flags & I40E_TX_FLAGS_VLAN_MASK) >> I40E_TX_FLAGS_VLAN_SHIFT; } first->tx_flags = tx_flags; dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE); tx_desc = I40E_TX_DESC(tx_ring, i); tx_bi = first; for (frag = &skb_shinfo(skb)->frags[0];; frag++) { unsigned int max_data = I40E_MAX_DATA_PER_TXD_ALIGNED; if (dma_mapping_error(tx_ring->dev, dma)) goto dma_error; /* record length, and DMA address */ dma_unmap_len_set(tx_bi, len, size); dma_unmap_addr_set(tx_bi, dma, dma); /* align size to end of page */ max_data += -dma & (I40E_MAX_READ_REQ_SIZE - 1); tx_desc->buffer_addr = cpu_to_le64(dma); while (unlikely(size > I40E_MAX_DATA_PER_TXD)) { tx_desc->cmd_type_offset_bsz = build_ctob(td_cmd, td_offset, max_data, td_tag); tx_desc++; i++; desc_count++; if (i == tx_ring->count) { tx_desc = I40E_TX_DESC(tx_ring, 0); i = 0; } dma += max_data; size -= max_data; max_data = I40E_MAX_DATA_PER_TXD_ALIGNED; tx_desc->buffer_addr = cpu_to_le64(dma); } if (likely(!data_len)) break; tx_desc->cmd_type_offset_bsz = build_ctob(td_cmd, td_offset, size, td_tag); tx_desc++; i++; desc_count++; if (i == tx_ring->count) { tx_desc = I40E_TX_DESC(tx_ring, 0); i = 0; } size = skb_frag_size(frag); data_len -= size; dma = skb_frag_dma_map(tx_ring->dev, frag, 0, size, DMA_TO_DEVICE); tx_bi = &tx_ring->tx_bi[i]; } netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount); i++; if (i == tx_ring->count) i = 0; tx_ring->next_to_use = i; i40e_maybe_stop_tx(tx_ring, DESC_NEEDED); /* write last descriptor with EOP bit */ td_cmd |= I40E_TX_DESC_CMD_EOP; /* We OR these values together to check both against 4 (WB_STRIDE) * below. This is safe since we don't re-use desc_count afterwards. */ desc_count |= ++tx_ring->packet_stride; if (desc_count >= WB_STRIDE) { /* write last descriptor with RS bit set */ td_cmd |= I40E_TX_DESC_CMD_RS; tx_ring->packet_stride = 0; } tx_desc->cmd_type_offset_bsz = build_ctob(td_cmd, td_offset, size, td_tag); skb_tx_timestamp(skb); /* Force memory writes to complete before letting h/w know there * are new descriptors to fetch. * * We also use this memory barrier to make certain all of the * status bits have been updated before next_to_watch is written. */ wmb(); /* set next_to_watch value indicating a packet is present */ first->next_to_watch = tx_desc; /* notify HW of packet */ if (netif_xmit_stopped(txring_txq(tx_ring)) || !netdev_xmit_more()) { writel(i, tx_ring->tail); } return 0; dma_error: dev_info(tx_ring->dev, "TX DMA map failed\n"); /* clear dma mappings for failed tx_bi map */ for (;;) { tx_bi = &tx_ring->tx_bi[i]; i40e_unmap_and_free_tx_resource(tx_ring, tx_bi); if (tx_bi == first) break; if (i == 0) i = tx_ring->count; i--; } tx_ring->next_to_use = i; return -1; } static u16 i40e_swdcb_skb_tx_hash(struct net_device *dev, const struct sk_buff *skb, u16 num_tx_queues) { u32 jhash_initval_salt = 0xd631614b; u32 hash; if (skb->sk && skb->sk->sk_hash) hash = skb->sk->sk_hash; else hash = (__force u16)skb->protocol ^ skb->hash; hash = jhash_1word(hash, jhash_initval_salt); return (u16)(((u64)hash * num_tx_queues) >> 32); } u16 i40e_lan_select_queue(struct net_device *netdev, struct sk_buff *skb, struct net_device __always_unused *sb_dev) { struct i40e_netdev_priv *np = netdev_priv(netdev); struct i40e_vsi *vsi = np->vsi; struct i40e_hw *hw; u16 qoffset; u16 qcount; u8 tclass; u16 hash; u8 prio; /* is DCB enabled at all? */ if (vsi->tc_config.numtc == 1 || i40e_is_tc_mqprio_enabled(vsi->back)) return netdev_pick_tx(netdev, skb, sb_dev); prio = skb->priority; hw = &vsi->back->hw; tclass = hw->local_dcbx_config.etscfg.prioritytable[prio]; /* sanity check */ if (unlikely(!(vsi->tc_config.enabled_tc & BIT(tclass)))) tclass = 0; /* select a queue assigned for the given TC */ qcount = vsi->tc_config.tc_info[tclass].qcount; hash = i40e_swdcb_skb_tx_hash(netdev, skb, qcount); qoffset = vsi->tc_config.tc_info[tclass].qoffset; return qoffset + hash; } /** * i40e_xmit_xdp_ring - transmits an XDP buffer to an XDP Tx ring * @xdpf: data to transmit * @xdp_ring: XDP Tx ring **/ static int i40e_xmit_xdp_ring(struct xdp_frame *xdpf, struct i40e_ring *xdp_ring) { struct skb_shared_info *sinfo = xdp_get_shared_info_from_frame(xdpf); u8 nr_frags = unlikely(xdp_frame_has_frags(xdpf)) ? sinfo->nr_frags : 0; u16 i = 0, index = xdp_ring->next_to_use; struct i40e_tx_buffer *tx_head = &xdp_ring->tx_bi[index]; struct i40e_tx_buffer *tx_bi = tx_head; struct i40e_tx_desc *tx_desc = I40E_TX_DESC(xdp_ring, index); void *data = xdpf->data; u32 size = xdpf->len; if (unlikely(I40E_DESC_UNUSED(xdp_ring) < 1 + nr_frags)) { xdp_ring->tx_stats.tx_busy++; return I40E_XDP_CONSUMED; } tx_head->bytecount = xdp_get_frame_len(xdpf); tx_head->gso_segs = 1; tx_head->xdpf = xdpf; for (;;) { dma_addr_t dma; dma = dma_map_single(xdp_ring->dev, data, size, DMA_TO_DEVICE); if (dma_mapping_error(xdp_ring->dev, dma)) goto unmap; /* record length, and DMA address */ dma_unmap_len_set(tx_bi, len, size); dma_unmap_addr_set(tx_bi, dma, dma); tx_desc->buffer_addr = cpu_to_le64(dma); tx_desc->cmd_type_offset_bsz = build_ctob(I40E_TX_DESC_CMD_ICRC, 0, size, 0); if (++index == xdp_ring->count) index = 0; if (i == nr_frags) break; tx_bi = &xdp_ring->tx_bi[index]; tx_desc = I40E_TX_DESC(xdp_ring, index); data = skb_frag_address(&sinfo->frags[i]); size = skb_frag_size(&sinfo->frags[i]); i++; } tx_desc->cmd_type_offset_bsz |= cpu_to_le64(I40E_TXD_CMD << I40E_TXD_QW1_CMD_SHIFT); /* Make certain all of the status bits have been updated * before next_to_watch is written. */ smp_wmb(); xdp_ring->xdp_tx_active++; tx_head->next_to_watch = tx_desc; xdp_ring->next_to_use = index; return I40E_XDP_TX; unmap: for (;;) { tx_bi = &xdp_ring->tx_bi[index]; if (dma_unmap_len(tx_bi, len)) dma_unmap_page(xdp_ring->dev, dma_unmap_addr(tx_bi, dma), dma_unmap_len(tx_bi, len), DMA_TO_DEVICE); dma_unmap_len_set(tx_bi, len, 0); if (tx_bi == tx_head) break; if (!index) index += xdp_ring->count; index--; } return I40E_XDP_CONSUMED; } /** * i40e_xmit_frame_ring - Sends buffer on Tx ring * @skb: send buffer * @tx_ring: ring to send buffer on * * Returns NETDEV_TX_OK if sent, else an error code **/ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb, struct i40e_ring *tx_ring) { u64 cd_type_cmd_tso_mss = I40E_TX_DESC_DTYPE_CONTEXT; u32 cd_tunneling = 0, cd_l2tag2 = 0; struct i40e_tx_buffer *first; u32 td_offset = 0; u32 tx_flags = 0; u32 td_cmd = 0; u8 hdr_len = 0; int tso, count; int tsyn; /* prefetch the data, we'll need it later */ prefetch(skb->data); i40e_trace(xmit_frame_ring, skb, tx_ring); count = i40e_xmit_descriptor_count(skb); if (i40e_chk_linearize(skb, count)) { if (__skb_linearize(skb)) { dev_kfree_skb_any(skb); return NETDEV_TX_OK; } count = i40e_txd_use_count(skb->len); tx_ring->tx_stats.tx_linearize++; } /* need: 1 descriptor per page * PAGE_SIZE/I40E_MAX_DATA_PER_TXD, * + 1 desc for skb_head_len/I40E_MAX_DATA_PER_TXD, * + 4 desc gap to avoid the cache line where head is, * + 1 desc for context descriptor, * otherwise try next time */ if (i40e_maybe_stop_tx(tx_ring, count + 4 + 1)) { tx_ring->tx_stats.tx_busy++; return NETDEV_TX_BUSY; } /* record the location of the first descriptor for this packet */ first = &tx_ring->tx_bi[tx_ring->next_to_use]; first->skb = skb; first->bytecount = skb->len; first->gso_segs = 1; /* prepare the xmit flags */ if (i40e_tx_prepare_vlan_flags(skb, tx_ring, &tx_flags)) goto out_drop; tso = i40e_tso(first, &hdr_len, &cd_type_cmd_tso_mss); if (tso < 0) goto out_drop; else if (tso) tx_flags |= I40E_TX_FLAGS_TSO; /* Always offload the checksum, since it's in the data descriptor */ tso = i40e_tx_enable_csum(skb, &tx_flags, &td_cmd, &td_offset, tx_ring, &cd_tunneling); if (tso < 0) goto out_drop; tsyn = i40e_tsyn(tx_ring, skb, tx_flags, &cd_type_cmd_tso_mss); if (tsyn) tx_flags |= I40E_TX_FLAGS_TSYN; /* always enable CRC insertion offload */ td_cmd |= I40E_TX_DESC_CMD_ICRC; i40e_create_tx_ctx(tx_ring, cd_type_cmd_tso_mss, cd_tunneling, cd_l2tag2); /* Add Flow Director ATR if it's enabled. * * NOTE: this must always be directly before the data descriptor. */ i40e_atr(tx_ring, skb, tx_flags); if (i40e_tx_map(tx_ring, skb, first, tx_flags, hdr_len, td_cmd, td_offset)) goto cleanup_tx_tstamp; return NETDEV_TX_OK; out_drop: i40e_trace(xmit_frame_ring_drop, first->skb, tx_ring); dev_kfree_skb_any(first->skb); first->skb = NULL; cleanup_tx_tstamp: if (unlikely(tx_flags & I40E_TX_FLAGS_TSYN)) { struct i40e_pf *pf = i40e_netdev_to_pf(tx_ring->netdev); dev_kfree_skb_any(pf->ptp_tx_skb); pf->ptp_tx_skb = NULL; clear_bit_unlock(__I40E_PTP_TX_IN_PROGRESS, pf->state); } return NETDEV_TX_OK; } /** * i40e_lan_xmit_frame - Selects the correct VSI and Tx queue to send buffer * @skb: send buffer * @netdev: network interface device structure * * Returns NETDEV_TX_OK if sent, else an error code **/ netdev_tx_t i40e_lan_xmit_frame(struct sk_buff *skb, struct net_device *netdev) { struct i40e_netdev_priv *np = netdev_priv(netdev); struct i40e_vsi *vsi = np->vsi; struct i40e_ring *tx_ring = vsi->tx_rings[skb->queue_mapping]; /* hardware can't handle really short frames, hardware padding works * beyond this point */ if (skb_put_padto(skb, I40E_MIN_TX_LEN)) return NETDEV_TX_OK; return i40e_xmit_frame_ring(skb, tx_ring); } /** * i40e_xdp_xmit - Implements ndo_xdp_xmit * @dev: netdev * @n: number of frames * @frames: array of XDP buffer pointers * @flags: XDP extra info * * Returns number of frames successfully sent. Failed frames * will be free'ed by XDP core. * * For error cases, a negative errno code is returned and no-frames * are transmitted (caller must handle freeing frames). **/ int i40e_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames, u32 flags) { struct i40e_netdev_priv *np = netdev_priv(dev); unsigned int queue_index = smp_processor_id(); struct i40e_vsi *vsi = np->vsi; struct i40e_pf *pf = vsi->back; struct i40e_ring *xdp_ring; int nxmit = 0; int i; if (test_bit(__I40E_VSI_DOWN, vsi->state)) return -ENETDOWN; if (!i40e_enabled_xdp_vsi(vsi) || queue_index >= vsi->num_queue_pairs || test_bit(__I40E_CONFIG_BUSY, pf->state)) return -ENXIO; if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK)) return -EINVAL; xdp_ring = vsi->xdp_rings[queue_index]; for (i = 0; i < n; i++) { struct xdp_frame *xdpf = frames[i]; int err; err = i40e_xmit_xdp_ring(xdpf, xdp_ring); if (err != I40E_XDP_TX) break; nxmit++; } if (unlikely(flags & XDP_XMIT_FLUSH)) i40e_xdp_ring_update_tail(xdp_ring); return nxmit; }
linux-master
drivers/net/ethernet/intel/i40e/i40e_txrx.c
// SPDX-License-Identifier: GPL-2.0 /* Copyright(c) 2013 - 2021 Intel Corporation. */ #include "i40e.h" #include "i40e_type.h" #include "i40e_adminq.h" #include "i40e_prototype.h" #include <linux/avf/virtchnl.h> /** * i40e_set_mac_type - Sets MAC type * @hw: pointer to the HW structure * * This function sets the mac type of the adapter based on the * vendor ID and device ID stored in the hw structure. **/ int i40e_set_mac_type(struct i40e_hw *hw) { int status = 0; if (hw->vendor_id == PCI_VENDOR_ID_INTEL) { switch (hw->device_id) { case I40E_DEV_ID_SFP_XL710: case I40E_DEV_ID_QEMU: case I40E_DEV_ID_KX_B: case I40E_DEV_ID_KX_C: case I40E_DEV_ID_QSFP_A: case I40E_DEV_ID_QSFP_B: case I40E_DEV_ID_QSFP_C: case I40E_DEV_ID_1G_BASE_T_BC: case I40E_DEV_ID_5G_BASE_T_BC: case I40E_DEV_ID_10G_BASE_T: case I40E_DEV_ID_10G_BASE_T4: case I40E_DEV_ID_10G_BASE_T_BC: case I40E_DEV_ID_10G_B: case I40E_DEV_ID_10G_SFP: case I40E_DEV_ID_20G_KR2: case I40E_DEV_ID_20G_KR2_A: case I40E_DEV_ID_25G_B: case I40E_DEV_ID_25G_SFP28: case I40E_DEV_ID_X710_N3000: case I40E_DEV_ID_XXV710_N3000: hw->mac.type = I40E_MAC_XL710; break; case I40E_DEV_ID_KX_X722: case I40E_DEV_ID_QSFP_X722: case I40E_DEV_ID_SFP_X722: case I40E_DEV_ID_1G_BASE_T_X722: case I40E_DEV_ID_10G_BASE_T_X722: case I40E_DEV_ID_SFP_I_X722: case I40E_DEV_ID_SFP_X722_A: hw->mac.type = I40E_MAC_X722; break; default: hw->mac.type = I40E_MAC_GENERIC; break; } } else { status = -ENODEV; } hw_dbg(hw, "i40e_set_mac_type found mac: %d, returns: %d\n", hw->mac.type, status); return status; } /** * i40e_aq_str - convert AQ err code to a string * @hw: pointer to the HW structure * @aq_err: the AQ error code to convert **/ const char *i40e_aq_str(struct i40e_hw *hw, enum i40e_admin_queue_err aq_err) { switch (aq_err) { case I40E_AQ_RC_OK: return "OK"; case I40E_AQ_RC_EPERM: return "I40E_AQ_RC_EPERM"; case I40E_AQ_RC_ENOENT: return "I40E_AQ_RC_ENOENT"; case I40E_AQ_RC_ESRCH: return "I40E_AQ_RC_ESRCH"; case I40E_AQ_RC_EINTR: return "I40E_AQ_RC_EINTR"; case I40E_AQ_RC_EIO: return "I40E_AQ_RC_EIO"; case I40E_AQ_RC_ENXIO: return "I40E_AQ_RC_ENXIO"; case I40E_AQ_RC_E2BIG: return "I40E_AQ_RC_E2BIG"; case I40E_AQ_RC_EAGAIN: return "I40E_AQ_RC_EAGAIN"; case I40E_AQ_RC_ENOMEM: return "I40E_AQ_RC_ENOMEM"; case I40E_AQ_RC_EACCES: return "I40E_AQ_RC_EACCES"; case I40E_AQ_RC_EFAULT: return "I40E_AQ_RC_EFAULT"; case I40E_AQ_RC_EBUSY: return "I40E_AQ_RC_EBUSY"; case I40E_AQ_RC_EEXIST: return "I40E_AQ_RC_EEXIST"; case I40E_AQ_RC_EINVAL: return "I40E_AQ_RC_EINVAL"; case I40E_AQ_RC_ENOTTY: return "I40E_AQ_RC_ENOTTY"; case I40E_AQ_RC_ENOSPC: return "I40E_AQ_RC_ENOSPC"; case I40E_AQ_RC_ENOSYS: return "I40E_AQ_RC_ENOSYS"; case I40E_AQ_RC_ERANGE: return "I40E_AQ_RC_ERANGE"; case I40E_AQ_RC_EFLUSHED: return "I40E_AQ_RC_EFLUSHED"; case I40E_AQ_RC_BAD_ADDR: return "I40E_AQ_RC_BAD_ADDR"; case I40E_AQ_RC_EMODE: return "I40E_AQ_RC_EMODE"; case I40E_AQ_RC_EFBIG: return "I40E_AQ_RC_EFBIG"; } snprintf(hw->err_str, sizeof(hw->err_str), "%d", aq_err); return hw->err_str; } /** * i40e_debug_aq * @hw: debug mask related to admin queue * @mask: debug mask * @desc: pointer to admin queue descriptor * @buffer: pointer to command buffer * @buf_len: max length of buffer * * Dumps debug log about adminq command with descriptor contents. **/ void i40e_debug_aq(struct i40e_hw *hw, enum i40e_debug_mask mask, void *desc, void *buffer, u16 buf_len) { struct i40e_aq_desc *aq_desc = (struct i40e_aq_desc *)desc; u32 effective_mask = hw->debug_mask & mask; char prefix[27]; u16 len; u8 *buf = (u8 *)buffer; if (!effective_mask || !desc) return; len = le16_to_cpu(aq_desc->datalen); i40e_debug(hw, mask & I40E_DEBUG_AQ_DESCRIPTOR, "AQ CMD: opcode 0x%04X, flags 0x%04X, datalen 0x%04X, retval 0x%04X\n", le16_to_cpu(aq_desc->opcode), le16_to_cpu(aq_desc->flags), le16_to_cpu(aq_desc->datalen), le16_to_cpu(aq_desc->retval)); i40e_debug(hw, mask & I40E_DEBUG_AQ_DESCRIPTOR, "\tcookie (h,l) 0x%08X 0x%08X\n", le32_to_cpu(aq_desc->cookie_high), le32_to_cpu(aq_desc->cookie_low)); i40e_debug(hw, mask & I40E_DEBUG_AQ_DESCRIPTOR, "\tparam (0,1) 0x%08X 0x%08X\n", le32_to_cpu(aq_desc->params.internal.param0), le32_to_cpu(aq_desc->params.internal.param1)); i40e_debug(hw, mask & I40E_DEBUG_AQ_DESCRIPTOR, "\taddr (h,l) 0x%08X 0x%08X\n", le32_to_cpu(aq_desc->params.external.addr_high), le32_to_cpu(aq_desc->params.external.addr_low)); if (buffer && buf_len != 0 && len != 0 && (effective_mask & I40E_DEBUG_AQ_DESC_BUFFER)) { i40e_debug(hw, mask, "AQ CMD Buffer:\n"); if (buf_len < len) len = buf_len; snprintf(prefix, sizeof(prefix), "i40e %02x:%02x.%x: \t0x", hw->bus.bus_id, hw->bus.device, hw->bus.func); print_hex_dump(KERN_INFO, prefix, DUMP_PREFIX_OFFSET, 16, 1, buf, len, false); } } /** * i40e_check_asq_alive * @hw: pointer to the hw struct * * Returns true if Queue is enabled else false. **/ bool i40e_check_asq_alive(struct i40e_hw *hw) { if (hw->aq.asq.len) return !!(rd32(hw, hw->aq.asq.len) & I40E_PF_ATQLEN_ATQENABLE_MASK); else return false; } /** * i40e_aq_queue_shutdown * @hw: pointer to the hw struct * @unloading: is the driver unloading itself * * Tell the Firmware that we're shutting down the AdminQ and whether * or not the driver is unloading as well. **/ int i40e_aq_queue_shutdown(struct i40e_hw *hw, bool unloading) { struct i40e_aq_desc desc; struct i40e_aqc_queue_shutdown *cmd = (struct i40e_aqc_queue_shutdown *)&desc.params.raw; int status; i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_queue_shutdown); if (unloading) cmd->driver_unloading = cpu_to_le32(I40E_AQ_DRIVER_UNLOADING); status = i40e_asq_send_command(hw, &desc, NULL, 0, NULL); return status; } /** * i40e_aq_get_set_rss_lut * @hw: pointer to the hardware structure * @vsi_id: vsi fw index * @pf_lut: for PF table set true, for VSI table set false * @lut: pointer to the lut buffer provided by the caller * @lut_size: size of the lut buffer * @set: set true to set the table, false to get the table * * Internal function to get or set RSS look up table **/ static int i40e_aq_get_set_rss_lut(struct i40e_hw *hw, u16 vsi_id, bool pf_lut, u8 *lut, u16 lut_size, bool set) { struct i40e_aq_desc desc; struct i40e_aqc_get_set_rss_lut *cmd_resp = (struct i40e_aqc_get_set_rss_lut *)&desc.params.raw; int status; if (set) i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_set_rss_lut); else i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_get_rss_lut); /* Indirect command */ desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF); desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_RD); cmd_resp->vsi_id = cpu_to_le16((u16)((vsi_id << I40E_AQC_SET_RSS_LUT_VSI_ID_SHIFT) & I40E_AQC_SET_RSS_LUT_VSI_ID_MASK)); cmd_resp->vsi_id |= cpu_to_le16((u16)I40E_AQC_SET_RSS_LUT_VSI_VALID); if (pf_lut) cmd_resp->flags |= cpu_to_le16((u16) ((I40E_AQC_SET_RSS_LUT_TABLE_TYPE_PF << I40E_AQC_SET_RSS_LUT_TABLE_TYPE_SHIFT) & I40E_AQC_SET_RSS_LUT_TABLE_TYPE_MASK)); else cmd_resp->flags |= cpu_to_le16((u16) ((I40E_AQC_SET_RSS_LUT_TABLE_TYPE_VSI << I40E_AQC_SET_RSS_LUT_TABLE_TYPE_SHIFT) & I40E_AQC_SET_RSS_LUT_TABLE_TYPE_MASK)); status = i40e_asq_send_command(hw, &desc, lut, lut_size, NULL); return status; } /** * i40e_aq_get_rss_lut * @hw: pointer to the hardware structure * @vsi_id: vsi fw index * @pf_lut: for PF table set true, for VSI table set false * @lut: pointer to the lut buffer provided by the caller * @lut_size: size of the lut buffer * * get the RSS lookup table, PF or VSI type **/ int i40e_aq_get_rss_lut(struct i40e_hw *hw, u16 vsi_id, bool pf_lut, u8 *lut, u16 lut_size) { return i40e_aq_get_set_rss_lut(hw, vsi_id, pf_lut, lut, lut_size, false); } /** * i40e_aq_set_rss_lut * @hw: pointer to the hardware structure * @vsi_id: vsi fw index * @pf_lut: for PF table set true, for VSI table set false * @lut: pointer to the lut buffer provided by the caller * @lut_size: size of the lut buffer * * set the RSS lookup table, PF or VSI type **/ int i40e_aq_set_rss_lut(struct i40e_hw *hw, u16 vsi_id, bool pf_lut, u8 *lut, u16 lut_size) { return i40e_aq_get_set_rss_lut(hw, vsi_id, pf_lut, lut, lut_size, true); } /** * i40e_aq_get_set_rss_key * @hw: pointer to the hw struct * @vsi_id: vsi fw index * @key: pointer to key info struct * @set: set true to set the key, false to get the key * * get the RSS key per VSI **/ static int i40e_aq_get_set_rss_key(struct i40e_hw *hw, u16 vsi_id, struct i40e_aqc_get_set_rss_key_data *key, bool set) { struct i40e_aq_desc desc; struct i40e_aqc_get_set_rss_key *cmd_resp = (struct i40e_aqc_get_set_rss_key *)&desc.params.raw; u16 key_size = sizeof(struct i40e_aqc_get_set_rss_key_data); int status; if (set) i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_set_rss_key); else i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_get_rss_key); /* Indirect command */ desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF); desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_RD); cmd_resp->vsi_id = cpu_to_le16((u16)((vsi_id << I40E_AQC_SET_RSS_KEY_VSI_ID_SHIFT) & I40E_AQC_SET_RSS_KEY_VSI_ID_MASK)); cmd_resp->vsi_id |= cpu_to_le16((u16)I40E_AQC_SET_RSS_KEY_VSI_VALID); status = i40e_asq_send_command(hw, &desc, key, key_size, NULL); return status; } /** * i40e_aq_get_rss_key * @hw: pointer to the hw struct * @vsi_id: vsi fw index * @key: pointer to key info struct * **/ int i40e_aq_get_rss_key(struct i40e_hw *hw, u16 vsi_id, struct i40e_aqc_get_set_rss_key_data *key) { return i40e_aq_get_set_rss_key(hw, vsi_id, key, false); } /** * i40e_aq_set_rss_key * @hw: pointer to the hw struct * @vsi_id: vsi fw index * @key: pointer to key info struct * * set the RSS key per VSI **/ int i40e_aq_set_rss_key(struct i40e_hw *hw, u16 vsi_id, struct i40e_aqc_get_set_rss_key_data *key) { return i40e_aq_get_set_rss_key(hw, vsi_id, key, true); } /* The i40e_ptype_lookup table is used to convert from the 8-bit ptype in the * hardware to a bit-field that can be used by SW to more easily determine the * packet type. * * Macros are used to shorten the table lines and make this table human * readable. * * We store the PTYPE in the top byte of the bit field - this is just so that * we can check that the table doesn't have a row missing, as the index into * the table should be the PTYPE. * * Typical work flow: * * IF NOT i40e_ptype_lookup[ptype].known * THEN * Packet is unknown * ELSE IF i40e_ptype_lookup[ptype].outer_ip == I40E_RX_PTYPE_OUTER_IP * Use the rest of the fields to look at the tunnels, inner protocols, etc * ELSE * Use the enum i40e_rx_l2_ptype to decode the packet type * ENDIF */ /* macro to make the table lines short, use explicit indexing with [PTYPE] */ #define I40E_PTT(PTYPE, OUTER_IP, OUTER_IP_VER, OUTER_FRAG, T, TE, TEF, I, PL)\ [PTYPE] = { \ 1, \ I40E_RX_PTYPE_OUTER_##OUTER_IP, \ I40E_RX_PTYPE_OUTER_##OUTER_IP_VER, \ I40E_RX_PTYPE_##OUTER_FRAG, \ I40E_RX_PTYPE_TUNNEL_##T, \ I40E_RX_PTYPE_TUNNEL_END_##TE, \ I40E_RX_PTYPE_##TEF, \ I40E_RX_PTYPE_INNER_PROT_##I, \ I40E_RX_PTYPE_PAYLOAD_LAYER_##PL } #define I40E_PTT_UNUSED_ENTRY(PTYPE) [PTYPE] = { 0, 0, 0, 0, 0, 0, 0, 0, 0 } /* shorter macros makes the table fit but are terse */ #define I40E_RX_PTYPE_NOF I40E_RX_PTYPE_NOT_FRAG #define I40E_RX_PTYPE_FRG I40E_RX_PTYPE_FRAG #define I40E_RX_PTYPE_INNER_PROT_TS I40E_RX_PTYPE_INNER_PROT_TIMESYNC /* Lookup table mapping in the 8-bit HW PTYPE to the bit field for decoding */ struct i40e_rx_ptype_decoded i40e_ptype_lookup[BIT(8)] = { /* L2 Packet types */ I40E_PTT_UNUSED_ENTRY(0), I40E_PTT(1, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2), I40E_PTT(2, L2, NONE, NOF, NONE, NONE, NOF, TS, PAY2), I40E_PTT(3, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2), I40E_PTT_UNUSED_ENTRY(4), I40E_PTT_UNUSED_ENTRY(5), I40E_PTT(6, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2), I40E_PTT(7, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2), I40E_PTT_UNUSED_ENTRY(8), I40E_PTT_UNUSED_ENTRY(9), I40E_PTT(10, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2), I40E_PTT(11, L2, NONE, NOF, NONE, NONE, NOF, NONE, NONE), I40E_PTT(12, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3), I40E_PTT(13, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3), I40E_PTT(14, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3), I40E_PTT(15, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3), I40E_PTT(16, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3), I40E_PTT(17, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3), I40E_PTT(18, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3), I40E_PTT(19, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3), I40E_PTT(20, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3), I40E_PTT(21, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3), /* Non Tunneled IPv4 */ I40E_PTT(22, IP, IPV4, FRG, NONE, NONE, NOF, NONE, PAY3), I40E_PTT(23, IP, IPV4, NOF, NONE, NONE, NOF, NONE, PAY3), I40E_PTT(24, IP, IPV4, NOF, NONE, NONE, NOF, UDP, PAY4), I40E_PTT_UNUSED_ENTRY(25), I40E_PTT(26, IP, IPV4, NOF, NONE, NONE, NOF, TCP, PAY4), I40E_PTT(27, IP, IPV4, NOF, NONE, NONE, NOF, SCTP, PAY4), I40E_PTT(28, IP, IPV4, NOF, NONE, NONE, NOF, ICMP, PAY4), /* IPv4 --> IPv4 */ I40E_PTT(29, IP, IPV4, NOF, IP_IP, IPV4, FRG, NONE, PAY3), I40E_PTT(30, IP, IPV4, NOF, IP_IP, IPV4, NOF, NONE, PAY3), I40E_PTT(31, IP, IPV4, NOF, IP_IP, IPV4, NOF, UDP, PAY4), I40E_PTT_UNUSED_ENTRY(32), I40E_PTT(33, IP, IPV4, NOF, IP_IP, IPV4, NOF, TCP, PAY4), I40E_PTT(34, IP, IPV4, NOF, IP_IP, IPV4, NOF, SCTP, PAY4), I40E_PTT(35, IP, IPV4, NOF, IP_IP, IPV4, NOF, ICMP, PAY4), /* IPv4 --> IPv6 */ I40E_PTT(36, IP, IPV4, NOF, IP_IP, IPV6, FRG, NONE, PAY3), I40E_PTT(37, IP, IPV4, NOF, IP_IP, IPV6, NOF, NONE, PAY3), I40E_PTT(38, IP, IPV4, NOF, IP_IP, IPV6, NOF, UDP, PAY4), I40E_PTT_UNUSED_ENTRY(39), I40E_PTT(40, IP, IPV4, NOF, IP_IP, IPV6, NOF, TCP, PAY4), I40E_PTT(41, IP, IPV4, NOF, IP_IP, IPV6, NOF, SCTP, PAY4), I40E_PTT(42, IP, IPV4, NOF, IP_IP, IPV6, NOF, ICMP, PAY4), /* IPv4 --> GRE/NAT */ I40E_PTT(43, IP, IPV4, NOF, IP_GRENAT, NONE, NOF, NONE, PAY3), /* IPv4 --> GRE/NAT --> IPv4 */ I40E_PTT(44, IP, IPV4, NOF, IP_GRENAT, IPV4, FRG, NONE, PAY3), I40E_PTT(45, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, NONE, PAY3), I40E_PTT(46, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, UDP, PAY4), I40E_PTT_UNUSED_ENTRY(47), I40E_PTT(48, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, TCP, PAY4), I40E_PTT(49, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, SCTP, PAY4), I40E_PTT(50, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, ICMP, PAY4), /* IPv4 --> GRE/NAT --> IPv6 */ I40E_PTT(51, IP, IPV4, NOF, IP_GRENAT, IPV6, FRG, NONE, PAY3), I40E_PTT(52, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, NONE, PAY3), I40E_PTT(53, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, UDP, PAY4), I40E_PTT_UNUSED_ENTRY(54), I40E_PTT(55, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, TCP, PAY4), I40E_PTT(56, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, SCTP, PAY4), I40E_PTT(57, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, ICMP, PAY4), /* IPv4 --> GRE/NAT --> MAC */ I40E_PTT(58, IP, IPV4, NOF, IP_GRENAT_MAC, NONE, NOF, NONE, PAY3), /* IPv4 --> GRE/NAT --> MAC --> IPv4 */ I40E_PTT(59, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, FRG, NONE, PAY3), I40E_PTT(60, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, NONE, PAY3), I40E_PTT(61, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, UDP, PAY4), I40E_PTT_UNUSED_ENTRY(62), I40E_PTT(63, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, TCP, PAY4), I40E_PTT(64, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, SCTP, PAY4), I40E_PTT(65, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, ICMP, PAY4), /* IPv4 --> GRE/NAT -> MAC --> IPv6 */ I40E_PTT(66, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, FRG, NONE, PAY3), I40E_PTT(67, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, NONE, PAY3), I40E_PTT(68, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, UDP, PAY4), I40E_PTT_UNUSED_ENTRY(69), I40E_PTT(70, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, TCP, PAY4), I40E_PTT(71, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, SCTP, PAY4), I40E_PTT(72, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, ICMP, PAY4), /* IPv4 --> GRE/NAT --> MAC/VLAN */ I40E_PTT(73, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, NONE, NOF, NONE, PAY3), /* IPv4 ---> GRE/NAT -> MAC/VLAN --> IPv4 */ I40E_PTT(74, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, FRG, NONE, PAY3), I40E_PTT(75, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, NONE, PAY3), I40E_PTT(76, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, UDP, PAY4), I40E_PTT_UNUSED_ENTRY(77), I40E_PTT(78, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, TCP, PAY4), I40E_PTT(79, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, SCTP, PAY4), I40E_PTT(80, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, ICMP, PAY4), /* IPv4 -> GRE/NAT -> MAC/VLAN --> IPv6 */ I40E_PTT(81, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, FRG, NONE, PAY3), I40E_PTT(82, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, NONE, PAY3), I40E_PTT(83, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, UDP, PAY4), I40E_PTT_UNUSED_ENTRY(84), I40E_PTT(85, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, TCP, PAY4), I40E_PTT(86, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, SCTP, PAY4), I40E_PTT(87, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, ICMP, PAY4), /* Non Tunneled IPv6 */ I40E_PTT(88, IP, IPV6, FRG, NONE, NONE, NOF, NONE, PAY3), I40E_PTT(89, IP, IPV6, NOF, NONE, NONE, NOF, NONE, PAY3), I40E_PTT(90, IP, IPV6, NOF, NONE, NONE, NOF, UDP, PAY4), I40E_PTT_UNUSED_ENTRY(91), I40E_PTT(92, IP, IPV6, NOF, NONE, NONE, NOF, TCP, PAY4), I40E_PTT(93, IP, IPV6, NOF, NONE, NONE, NOF, SCTP, PAY4), I40E_PTT(94, IP, IPV6, NOF, NONE, NONE, NOF, ICMP, PAY4), /* IPv6 --> IPv4 */ I40E_PTT(95, IP, IPV6, NOF, IP_IP, IPV4, FRG, NONE, PAY3), I40E_PTT(96, IP, IPV6, NOF, IP_IP, IPV4, NOF, NONE, PAY3), I40E_PTT(97, IP, IPV6, NOF, IP_IP, IPV4, NOF, UDP, PAY4), I40E_PTT_UNUSED_ENTRY(98), I40E_PTT(99, IP, IPV6, NOF, IP_IP, IPV4, NOF, TCP, PAY4), I40E_PTT(100, IP, IPV6, NOF, IP_IP, IPV4, NOF, SCTP, PAY4), I40E_PTT(101, IP, IPV6, NOF, IP_IP, IPV4, NOF, ICMP, PAY4), /* IPv6 --> IPv6 */ I40E_PTT(102, IP, IPV6, NOF, IP_IP, IPV6, FRG, NONE, PAY3), I40E_PTT(103, IP, IPV6, NOF, IP_IP, IPV6, NOF, NONE, PAY3), I40E_PTT(104, IP, IPV6, NOF, IP_IP, IPV6, NOF, UDP, PAY4), I40E_PTT_UNUSED_ENTRY(105), I40E_PTT(106, IP, IPV6, NOF, IP_IP, IPV6, NOF, TCP, PAY4), I40E_PTT(107, IP, IPV6, NOF, IP_IP, IPV6, NOF, SCTP, PAY4), I40E_PTT(108, IP, IPV6, NOF, IP_IP, IPV6, NOF, ICMP, PAY4), /* IPv6 --> GRE/NAT */ I40E_PTT(109, IP, IPV6, NOF, IP_GRENAT, NONE, NOF, NONE, PAY3), /* IPv6 --> GRE/NAT -> IPv4 */ I40E_PTT(110, IP, IPV6, NOF, IP_GRENAT, IPV4, FRG, NONE, PAY3), I40E_PTT(111, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, NONE, PAY3), I40E_PTT(112, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, UDP, PAY4), I40E_PTT_UNUSED_ENTRY(113), I40E_PTT(114, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, TCP, PAY4), I40E_PTT(115, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, SCTP, PAY4), I40E_PTT(116, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, ICMP, PAY4), /* IPv6 --> GRE/NAT -> IPv6 */ I40E_PTT(117, IP, IPV6, NOF, IP_GRENAT, IPV6, FRG, NONE, PAY3), I40E_PTT(118, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, NONE, PAY3), I40E_PTT(119, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, UDP, PAY4), I40E_PTT_UNUSED_ENTRY(120), I40E_PTT(121, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, TCP, PAY4), I40E_PTT(122, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, SCTP, PAY4), I40E_PTT(123, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, ICMP, PAY4), /* IPv6 --> GRE/NAT -> MAC */ I40E_PTT(124, IP, IPV6, NOF, IP_GRENAT_MAC, NONE, NOF, NONE, PAY3), /* IPv6 --> GRE/NAT -> MAC -> IPv4 */ I40E_PTT(125, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, FRG, NONE, PAY3), I40E_PTT(126, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, NONE, PAY3), I40E_PTT(127, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, UDP, PAY4), I40E_PTT_UNUSED_ENTRY(128), I40E_PTT(129, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, TCP, PAY4), I40E_PTT(130, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, SCTP, PAY4), I40E_PTT(131, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, ICMP, PAY4), /* IPv6 --> GRE/NAT -> MAC -> IPv6 */ I40E_PTT(132, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, FRG, NONE, PAY3), I40E_PTT(133, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, NONE, PAY3), I40E_PTT(134, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, UDP, PAY4), I40E_PTT_UNUSED_ENTRY(135), I40E_PTT(136, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, TCP, PAY4), I40E_PTT(137, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, SCTP, PAY4), I40E_PTT(138, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, ICMP, PAY4), /* IPv6 --> GRE/NAT -> MAC/VLAN */ I40E_PTT(139, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, NONE, NOF, NONE, PAY3), /* IPv6 --> GRE/NAT -> MAC/VLAN --> IPv4 */ I40E_PTT(140, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, FRG, NONE, PAY3), I40E_PTT(141, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, NONE, PAY3), I40E_PTT(142, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, UDP, PAY4), I40E_PTT_UNUSED_ENTRY(143), I40E_PTT(144, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, TCP, PAY4), I40E_PTT(145, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, SCTP, PAY4), I40E_PTT(146, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, ICMP, PAY4), /* IPv6 --> GRE/NAT -> MAC/VLAN --> IPv6 */ I40E_PTT(147, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, FRG, NONE, PAY3), I40E_PTT(148, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, NONE, PAY3), I40E_PTT(149, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, UDP, PAY4), I40E_PTT_UNUSED_ENTRY(150), I40E_PTT(151, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, TCP, PAY4), I40E_PTT(152, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, SCTP, PAY4), I40E_PTT(153, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, ICMP, PAY4), /* unused entries */ [154 ... 255] = { 0, 0, 0, 0, 0, 0, 0, 0, 0 } }; /** * i40e_init_shared_code - Initialize the shared code * @hw: pointer to hardware structure * * This assigns the MAC type and PHY code and inits the NVM. * Does not touch the hardware. This function must be called prior to any * other function in the shared code. The i40e_hw structure should be * memset to 0 prior to calling this function. The following fields in * hw structure should be filled in prior to calling this function: * hw_addr, back, device_id, vendor_id, subsystem_device_id, * subsystem_vendor_id, and revision_id **/ int i40e_init_shared_code(struct i40e_hw *hw) { u32 port, ari, func_rid; int status = 0; i40e_set_mac_type(hw); switch (hw->mac.type) { case I40E_MAC_XL710: case I40E_MAC_X722: break; default: return -ENODEV; } hw->phy.get_link_info = true; /* Determine port number and PF number*/ port = (rd32(hw, I40E_PFGEN_PORTNUM) & I40E_PFGEN_PORTNUM_PORT_NUM_MASK) >> I40E_PFGEN_PORTNUM_PORT_NUM_SHIFT; hw->port = (u8)port; ari = (rd32(hw, I40E_GLPCI_CAPSUP) & I40E_GLPCI_CAPSUP_ARI_EN_MASK) >> I40E_GLPCI_CAPSUP_ARI_EN_SHIFT; func_rid = rd32(hw, I40E_PF_FUNC_RID); if (ari) hw->pf_id = (u8)(func_rid & 0xff); else hw->pf_id = (u8)(func_rid & 0x7); status = i40e_init_nvm(hw); return status; } /** * i40e_aq_mac_address_read - Retrieve the MAC addresses * @hw: pointer to the hw struct * @flags: a return indicator of what addresses were added to the addr store * @addrs: the requestor's mac addr store * @cmd_details: pointer to command details structure or NULL **/ static int i40e_aq_mac_address_read(struct i40e_hw *hw, u16 *flags, struct i40e_aqc_mac_address_read_data *addrs, struct i40e_asq_cmd_details *cmd_details) { struct i40e_aq_desc desc; struct i40e_aqc_mac_address_read *cmd_data = (struct i40e_aqc_mac_address_read *)&desc.params.raw; int status; i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_mac_address_read); desc.flags |= cpu_to_le16(I40E_AQ_FLAG_BUF); status = i40e_asq_send_command(hw, &desc, addrs, sizeof(*addrs), cmd_details); *flags = le16_to_cpu(cmd_data->command_flags); return status; } /** * i40e_aq_mac_address_write - Change the MAC addresses * @hw: pointer to the hw struct * @flags: indicates which MAC to be written * @mac_addr: address to write * @cmd_details: pointer to command details structure or NULL **/ int i40e_aq_mac_address_write(struct i40e_hw *hw, u16 flags, u8 *mac_addr, struct i40e_asq_cmd_details *cmd_details) { struct i40e_aq_desc desc; struct i40e_aqc_mac_address_write *cmd_data = (struct i40e_aqc_mac_address_write *)&desc.params.raw; int status; i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_mac_address_write); cmd_data->command_flags = cpu_to_le16(flags); cmd_data->mac_sah = cpu_to_le16((u16)mac_addr[0] << 8 | mac_addr[1]); cmd_data->mac_sal = cpu_to_le32(((u32)mac_addr[2] << 24) | ((u32)mac_addr[3] << 16) | ((u32)mac_addr[4] << 8) | mac_addr[5]); status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); return status; } /** * i40e_get_mac_addr - get MAC address * @hw: pointer to the HW structure * @mac_addr: pointer to MAC address * * Reads the adapter's MAC address from register **/ int i40e_get_mac_addr(struct i40e_hw *hw, u8 *mac_addr) { struct i40e_aqc_mac_address_read_data addrs; u16 flags = 0; int status; status = i40e_aq_mac_address_read(hw, &flags, &addrs, NULL); if (flags & I40E_AQC_LAN_ADDR_VALID) ether_addr_copy(mac_addr, addrs.pf_lan_mac); return status; } /** * i40e_get_port_mac_addr - get Port MAC address * @hw: pointer to the HW structure * @mac_addr: pointer to Port MAC address * * Reads the adapter's Port MAC address **/ int i40e_get_port_mac_addr(struct i40e_hw *hw, u8 *mac_addr) { struct i40e_aqc_mac_address_read_data addrs; u16 flags = 0; int status; status = i40e_aq_mac_address_read(hw, &flags, &addrs, NULL); if (status) return status; if (flags & I40E_AQC_PORT_ADDR_VALID) ether_addr_copy(mac_addr, addrs.port_mac); else status = -EINVAL; return status; } /** * i40e_pre_tx_queue_cfg - pre tx queue configure * @hw: pointer to the HW structure * @queue: target PF queue index * @enable: state change request * * Handles hw requirement to indicate intention to enable * or disable target queue. **/ void i40e_pre_tx_queue_cfg(struct i40e_hw *hw, u32 queue, bool enable) { u32 abs_queue_idx = hw->func_caps.base_queue + queue; u32 reg_block = 0; u32 reg_val; if (abs_queue_idx >= 128) { reg_block = abs_queue_idx / 128; abs_queue_idx %= 128; } reg_val = rd32(hw, I40E_GLLAN_TXPRE_QDIS(reg_block)); reg_val &= ~I40E_GLLAN_TXPRE_QDIS_QINDX_MASK; reg_val |= (abs_queue_idx << I40E_GLLAN_TXPRE_QDIS_QINDX_SHIFT); if (enable) reg_val |= I40E_GLLAN_TXPRE_QDIS_CLEAR_QDIS_MASK; else reg_val |= I40E_GLLAN_TXPRE_QDIS_SET_QDIS_MASK; wr32(hw, I40E_GLLAN_TXPRE_QDIS(reg_block), reg_val); } /** * i40e_read_pba_string - Reads part number string from EEPROM * @hw: pointer to hardware structure * @pba_num: stores the part number string from the EEPROM * @pba_num_size: part number string buffer length * * Reads the part number string from the EEPROM. **/ int i40e_read_pba_string(struct i40e_hw *hw, u8 *pba_num, u32 pba_num_size) { u16 pba_word = 0; u16 pba_size = 0; u16 pba_ptr = 0; int status = 0; u16 i = 0; status = i40e_read_nvm_word(hw, I40E_SR_PBA_FLAGS, &pba_word); if (status || (pba_word != 0xFAFA)) { hw_dbg(hw, "Failed to read PBA flags or flag is invalid.\n"); return status; } status = i40e_read_nvm_word(hw, I40E_SR_PBA_BLOCK_PTR, &pba_ptr); if (status) { hw_dbg(hw, "Failed to read PBA Block pointer.\n"); return status; } status = i40e_read_nvm_word(hw, pba_ptr, &pba_size); if (status) { hw_dbg(hw, "Failed to read PBA Block size.\n"); return status; } /* Subtract one to get PBA word count (PBA Size word is included in * total size) */ pba_size--; if (pba_num_size < (((u32)pba_size * 2) + 1)) { hw_dbg(hw, "Buffer too small for PBA data.\n"); return -EINVAL; } for (i = 0; i < pba_size; i++) { status = i40e_read_nvm_word(hw, (pba_ptr + 1) + i, &pba_word); if (status) { hw_dbg(hw, "Failed to read PBA Block word %d.\n", i); return status; } pba_num[(i * 2)] = (pba_word >> 8) & 0xFF; pba_num[(i * 2) + 1] = pba_word & 0xFF; } pba_num[(pba_size * 2)] = '\0'; return status; } /** * i40e_get_media_type - Gets media type * @hw: pointer to the hardware structure **/ static enum i40e_media_type i40e_get_media_type(struct i40e_hw *hw) { enum i40e_media_type media; switch (hw->phy.link_info.phy_type) { case I40E_PHY_TYPE_10GBASE_SR: case I40E_PHY_TYPE_10GBASE_LR: case I40E_PHY_TYPE_1000BASE_SX: case I40E_PHY_TYPE_1000BASE_LX: case I40E_PHY_TYPE_40GBASE_SR4: case I40E_PHY_TYPE_40GBASE_LR4: case I40E_PHY_TYPE_25GBASE_LR: case I40E_PHY_TYPE_25GBASE_SR: media = I40E_MEDIA_TYPE_FIBER; break; case I40E_PHY_TYPE_100BASE_TX: case I40E_PHY_TYPE_1000BASE_T: case I40E_PHY_TYPE_2_5GBASE_T_LINK_STATUS: case I40E_PHY_TYPE_5GBASE_T_LINK_STATUS: case I40E_PHY_TYPE_10GBASE_T: media = I40E_MEDIA_TYPE_BASET; break; case I40E_PHY_TYPE_10GBASE_CR1_CU: case I40E_PHY_TYPE_40GBASE_CR4_CU: case I40E_PHY_TYPE_10GBASE_CR1: case I40E_PHY_TYPE_40GBASE_CR4: case I40E_PHY_TYPE_10GBASE_SFPP_CU: case I40E_PHY_TYPE_40GBASE_AOC: case I40E_PHY_TYPE_10GBASE_AOC: case I40E_PHY_TYPE_25GBASE_CR: case I40E_PHY_TYPE_25GBASE_AOC: case I40E_PHY_TYPE_25GBASE_ACC: media = I40E_MEDIA_TYPE_DA; break; case I40E_PHY_TYPE_1000BASE_KX: case I40E_PHY_TYPE_10GBASE_KX4: case I40E_PHY_TYPE_10GBASE_KR: case I40E_PHY_TYPE_40GBASE_KR4: case I40E_PHY_TYPE_20GBASE_KR2: case I40E_PHY_TYPE_25GBASE_KR: media = I40E_MEDIA_TYPE_BACKPLANE; break; case I40E_PHY_TYPE_SGMII: case I40E_PHY_TYPE_XAUI: case I40E_PHY_TYPE_XFI: case I40E_PHY_TYPE_XLAUI: case I40E_PHY_TYPE_XLPPI: default: media = I40E_MEDIA_TYPE_UNKNOWN; break; } return media; } /** * i40e_poll_globr - Poll for Global Reset completion * @hw: pointer to the hardware structure * @retry_limit: how many times to retry before failure **/ static int i40e_poll_globr(struct i40e_hw *hw, u32 retry_limit) { u32 cnt, reg = 0; for (cnt = 0; cnt < retry_limit; cnt++) { reg = rd32(hw, I40E_GLGEN_RSTAT); if (!(reg & I40E_GLGEN_RSTAT_DEVSTATE_MASK)) return 0; msleep(100); } hw_dbg(hw, "Global reset failed.\n"); hw_dbg(hw, "I40E_GLGEN_RSTAT = 0x%x\n", reg); return -EIO; } #define I40E_PF_RESET_WAIT_COUNT_A0 200 #define I40E_PF_RESET_WAIT_COUNT 200 /** * i40e_pf_reset - Reset the PF * @hw: pointer to the hardware structure * * Assuming someone else has triggered a global reset, * assure the global reset is complete and then reset the PF **/ int i40e_pf_reset(struct i40e_hw *hw) { u32 cnt = 0; u32 cnt1 = 0; u32 reg = 0; u32 grst_del; /* Poll for Global Reset steady state in case of recent GRST. * The grst delay value is in 100ms units, and we'll wait a * couple counts longer to be sure we don't just miss the end. */ grst_del = (rd32(hw, I40E_GLGEN_RSTCTL) & I40E_GLGEN_RSTCTL_GRSTDEL_MASK) >> I40E_GLGEN_RSTCTL_GRSTDEL_SHIFT; /* It can take upto 15 secs for GRST steady state. * Bump it to 16 secs max to be safe. */ grst_del = grst_del * 20; for (cnt = 0; cnt < grst_del; cnt++) { reg = rd32(hw, I40E_GLGEN_RSTAT); if (!(reg & I40E_GLGEN_RSTAT_DEVSTATE_MASK)) break; msleep(100); } if (reg & I40E_GLGEN_RSTAT_DEVSTATE_MASK) { hw_dbg(hw, "Global reset polling failed to complete.\n"); return -EIO; } /* Now Wait for the FW to be ready */ for (cnt1 = 0; cnt1 < I40E_PF_RESET_WAIT_COUNT; cnt1++) { reg = rd32(hw, I40E_GLNVM_ULD); reg &= (I40E_GLNVM_ULD_CONF_CORE_DONE_MASK | I40E_GLNVM_ULD_CONF_GLOBAL_DONE_MASK); if (reg == (I40E_GLNVM_ULD_CONF_CORE_DONE_MASK | I40E_GLNVM_ULD_CONF_GLOBAL_DONE_MASK)) { hw_dbg(hw, "Core and Global modules ready %d\n", cnt1); break; } usleep_range(10000, 20000); } if (!(reg & (I40E_GLNVM_ULD_CONF_CORE_DONE_MASK | I40E_GLNVM_ULD_CONF_GLOBAL_DONE_MASK))) { hw_dbg(hw, "wait for FW Reset complete timedout\n"); hw_dbg(hw, "I40E_GLNVM_ULD = 0x%x\n", reg); return -EIO; } /* If there was a Global Reset in progress when we got here, * we don't need to do the PF Reset */ if (!cnt) { u32 reg2 = 0; if (hw->revision_id == 0) cnt = I40E_PF_RESET_WAIT_COUNT_A0; else cnt = I40E_PF_RESET_WAIT_COUNT; reg = rd32(hw, I40E_PFGEN_CTRL); wr32(hw, I40E_PFGEN_CTRL, (reg | I40E_PFGEN_CTRL_PFSWR_MASK)); for (; cnt; cnt--) { reg = rd32(hw, I40E_PFGEN_CTRL); if (!(reg & I40E_PFGEN_CTRL_PFSWR_MASK)) break; reg2 = rd32(hw, I40E_GLGEN_RSTAT); if (reg2 & I40E_GLGEN_RSTAT_DEVSTATE_MASK) break; usleep_range(1000, 2000); } if (reg2 & I40E_GLGEN_RSTAT_DEVSTATE_MASK) { if (i40e_poll_globr(hw, grst_del)) return -EIO; } else if (reg & I40E_PFGEN_CTRL_PFSWR_MASK) { hw_dbg(hw, "PF reset polling failed to complete.\n"); return -EIO; } } i40e_clear_pxe_mode(hw); return 0; } /** * i40e_clear_hw - clear out any left over hw state * @hw: pointer to the hw struct * * Clear queues and interrupts, typically called at init time, * but after the capabilities have been found so we know how many * queues and msix vectors have been allocated. **/ void i40e_clear_hw(struct i40e_hw *hw) { u32 num_queues, base_queue; u32 num_pf_int; u32 num_vf_int; u32 num_vfs; u32 i, j; u32 val; u32 eol = 0x7ff; /* get number of interrupts, queues, and VFs */ val = rd32(hw, I40E_GLPCI_CNF2); num_pf_int = (val & I40E_GLPCI_CNF2_MSI_X_PF_N_MASK) >> I40E_GLPCI_CNF2_MSI_X_PF_N_SHIFT; num_vf_int = (val & I40E_GLPCI_CNF2_MSI_X_VF_N_MASK) >> I40E_GLPCI_CNF2_MSI_X_VF_N_SHIFT; val = rd32(hw, I40E_PFLAN_QALLOC); base_queue = (val & I40E_PFLAN_QALLOC_FIRSTQ_MASK) >> I40E_PFLAN_QALLOC_FIRSTQ_SHIFT; j = (val & I40E_PFLAN_QALLOC_LASTQ_MASK) >> I40E_PFLAN_QALLOC_LASTQ_SHIFT; if (val & I40E_PFLAN_QALLOC_VALID_MASK) num_queues = (j - base_queue) + 1; else num_queues = 0; val = rd32(hw, I40E_PF_VT_PFALLOC); i = (val & I40E_PF_VT_PFALLOC_FIRSTVF_MASK) >> I40E_PF_VT_PFALLOC_FIRSTVF_SHIFT; j = (val & I40E_PF_VT_PFALLOC_LASTVF_MASK) >> I40E_PF_VT_PFALLOC_LASTVF_SHIFT; if (val & I40E_PF_VT_PFALLOC_VALID_MASK) num_vfs = (j - i) + 1; else num_vfs = 0; /* stop all the interrupts */ wr32(hw, I40E_PFINT_ICR0_ENA, 0); val = 0x3 << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT; for (i = 0; i < num_pf_int - 2; i++) wr32(hw, I40E_PFINT_DYN_CTLN(i), val); /* Set the FIRSTQ_INDX field to 0x7FF in PFINT_LNKLSTx */ val = eol << I40E_PFINT_LNKLST0_FIRSTQ_INDX_SHIFT; wr32(hw, I40E_PFINT_LNKLST0, val); for (i = 0; i < num_pf_int - 2; i++) wr32(hw, I40E_PFINT_LNKLSTN(i), val); val = eol << I40E_VPINT_LNKLST0_FIRSTQ_INDX_SHIFT; for (i = 0; i < num_vfs; i++) wr32(hw, I40E_VPINT_LNKLST0(i), val); for (i = 0; i < num_vf_int - 2; i++) wr32(hw, I40E_VPINT_LNKLSTN(i), val); /* warn the HW of the coming Tx disables */ for (i = 0; i < num_queues; i++) { u32 abs_queue_idx = base_queue + i; u32 reg_block = 0; if (abs_queue_idx >= 128) { reg_block = abs_queue_idx / 128; abs_queue_idx %= 128; } val = rd32(hw, I40E_GLLAN_TXPRE_QDIS(reg_block)); val &= ~I40E_GLLAN_TXPRE_QDIS_QINDX_MASK; val |= (abs_queue_idx << I40E_GLLAN_TXPRE_QDIS_QINDX_SHIFT); val |= I40E_GLLAN_TXPRE_QDIS_SET_QDIS_MASK; wr32(hw, I40E_GLLAN_TXPRE_QDIS(reg_block), val); } udelay(400); /* stop all the queues */ for (i = 0; i < num_queues; i++) { wr32(hw, I40E_QINT_TQCTL(i), 0); wr32(hw, I40E_QTX_ENA(i), 0); wr32(hw, I40E_QINT_RQCTL(i), 0); wr32(hw, I40E_QRX_ENA(i), 0); } /* short wait for all queue disables to settle */ udelay(50); } /** * i40e_clear_pxe_mode - clear pxe operations mode * @hw: pointer to the hw struct * * Make sure all PXE mode settings are cleared, including things * like descriptor fetch/write-back mode. **/ void i40e_clear_pxe_mode(struct i40e_hw *hw) { u32 reg; if (i40e_check_asq_alive(hw)) i40e_aq_clear_pxe_mode(hw, NULL); /* Clear single descriptor fetch/write-back mode */ reg = rd32(hw, I40E_GLLAN_RCTL_0); if (hw->revision_id == 0) { /* As a work around clear PXE_MODE instead of setting it */ wr32(hw, I40E_GLLAN_RCTL_0, (reg & (~I40E_GLLAN_RCTL_0_PXE_MODE_MASK))); } else { wr32(hw, I40E_GLLAN_RCTL_0, (reg | I40E_GLLAN_RCTL_0_PXE_MODE_MASK)); } } /** * i40e_led_is_mine - helper to find matching led * @hw: pointer to the hw struct * @idx: index into GPIO registers * * returns: 0 if no match, otherwise the value of the GPIO_CTL register */ static u32 i40e_led_is_mine(struct i40e_hw *hw, int idx) { u32 gpio_val = 0; u32 port; if (!I40E_IS_X710TL_DEVICE(hw->device_id) && !hw->func_caps.led[idx]) return 0; gpio_val = rd32(hw, I40E_GLGEN_GPIO_CTL(idx)); port = (gpio_val & I40E_GLGEN_GPIO_CTL_PRT_NUM_MASK) >> I40E_GLGEN_GPIO_CTL_PRT_NUM_SHIFT; /* if PRT_NUM_NA is 1 then this LED is not port specific, OR * if it is not our port then ignore */ if ((gpio_val & I40E_GLGEN_GPIO_CTL_PRT_NUM_NA_MASK) || (port != hw->port)) return 0; return gpio_val; } #define I40E_FW_LED BIT(4) #define I40E_LED_MODE_VALID (I40E_GLGEN_GPIO_CTL_LED_MODE_MASK >> \ I40E_GLGEN_GPIO_CTL_LED_MODE_SHIFT) #define I40E_LED0 22 #define I40E_PIN_FUNC_SDP 0x0 #define I40E_PIN_FUNC_LED 0x1 /** * i40e_led_get - return current on/off mode * @hw: pointer to the hw struct * * The value returned is the 'mode' field as defined in the * GPIO register definitions: 0x0 = off, 0xf = on, and other * values are variations of possible behaviors relating to * blink, link, and wire. **/ u32 i40e_led_get(struct i40e_hw *hw) { u32 mode = 0; int i; /* as per the documentation GPIO 22-29 are the LED * GPIO pins named LED0..LED7 */ for (i = I40E_LED0; i <= I40E_GLGEN_GPIO_CTL_MAX_INDEX; i++) { u32 gpio_val = i40e_led_is_mine(hw, i); if (!gpio_val) continue; mode = (gpio_val & I40E_GLGEN_GPIO_CTL_LED_MODE_MASK) >> I40E_GLGEN_GPIO_CTL_LED_MODE_SHIFT; break; } return mode; } /** * i40e_led_set - set new on/off mode * @hw: pointer to the hw struct * @mode: 0=off, 0xf=on (else see manual for mode details) * @blink: true if the LED should blink when on, false if steady * * if this function is used to turn on the blink it should * be used to disable the blink when restoring the original state. **/ void i40e_led_set(struct i40e_hw *hw, u32 mode, bool blink) { int i; if (mode & ~I40E_LED_MODE_VALID) { hw_dbg(hw, "invalid mode passed in %X\n", mode); return; } /* as per the documentation GPIO 22-29 are the LED * GPIO pins named LED0..LED7 */ for (i = I40E_LED0; i <= I40E_GLGEN_GPIO_CTL_MAX_INDEX; i++) { u32 gpio_val = i40e_led_is_mine(hw, i); if (!gpio_val) continue; if (I40E_IS_X710TL_DEVICE(hw->device_id)) { u32 pin_func = 0; if (mode & I40E_FW_LED) pin_func = I40E_PIN_FUNC_SDP; else pin_func = I40E_PIN_FUNC_LED; gpio_val &= ~I40E_GLGEN_GPIO_CTL_PIN_FUNC_MASK; gpio_val |= ((pin_func << I40E_GLGEN_GPIO_CTL_PIN_FUNC_SHIFT) & I40E_GLGEN_GPIO_CTL_PIN_FUNC_MASK); } gpio_val &= ~I40E_GLGEN_GPIO_CTL_LED_MODE_MASK; /* this & is a bit of paranoia, but serves as a range check */ gpio_val |= ((mode << I40E_GLGEN_GPIO_CTL_LED_MODE_SHIFT) & I40E_GLGEN_GPIO_CTL_LED_MODE_MASK); if (blink) gpio_val |= BIT(I40E_GLGEN_GPIO_CTL_LED_BLINK_SHIFT); else gpio_val &= ~BIT(I40E_GLGEN_GPIO_CTL_LED_BLINK_SHIFT); wr32(hw, I40E_GLGEN_GPIO_CTL(i), gpio_val); break; } } /* Admin command wrappers */ /** * i40e_aq_get_phy_capabilities * @hw: pointer to the hw struct * @abilities: structure for PHY capabilities to be filled * @qualified_modules: report Qualified Modules * @report_init: report init capabilities (active are default) * @cmd_details: pointer to command details structure or NULL * * Returns the various PHY abilities supported on the Port. **/ int i40e_aq_get_phy_capabilities(struct i40e_hw *hw, bool qualified_modules, bool report_init, struct i40e_aq_get_phy_abilities_resp *abilities, struct i40e_asq_cmd_details *cmd_details) { u16 abilities_size = sizeof(struct i40e_aq_get_phy_abilities_resp); u16 max_delay = I40E_MAX_PHY_TIMEOUT, total_delay = 0; struct i40e_aq_desc desc; int status; if (!abilities) return -EINVAL; do { i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_get_phy_abilities); desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF); if (abilities_size > I40E_AQ_LARGE_BUF) desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB); if (qualified_modules) desc.params.external.param0 |= cpu_to_le32(I40E_AQ_PHY_REPORT_QUALIFIED_MODULES); if (report_init) desc.params.external.param0 |= cpu_to_le32(I40E_AQ_PHY_REPORT_INITIAL_VALUES); status = i40e_asq_send_command(hw, &desc, abilities, abilities_size, cmd_details); switch (hw->aq.asq_last_status) { case I40E_AQ_RC_EIO: status = -EIO; break; case I40E_AQ_RC_EAGAIN: usleep_range(1000, 2000); total_delay++; status = -EIO; break; /* also covers I40E_AQ_RC_OK */ default: break; } } while ((hw->aq.asq_last_status == I40E_AQ_RC_EAGAIN) && (total_delay < max_delay)); if (status) return status; if (report_init) { if (hw->mac.type == I40E_MAC_XL710 && hw->aq.api_maj_ver == I40E_FW_API_VERSION_MAJOR && hw->aq.api_min_ver >= I40E_MINOR_VER_GET_LINK_INFO_XL710) { status = i40e_aq_get_link_info(hw, true, NULL, NULL); } else { hw->phy.phy_types = le32_to_cpu(abilities->phy_type); hw->phy.phy_types |= ((u64)abilities->phy_type_ext << 32); } } return status; } /** * i40e_aq_set_phy_config * @hw: pointer to the hw struct * @config: structure with PHY configuration to be set * @cmd_details: pointer to command details structure or NULL * * Set the various PHY configuration parameters * supported on the Port.One or more of the Set PHY config parameters may be * ignored in an MFP mode as the PF may not have the privilege to set some * of the PHY Config parameters. This status will be indicated by the * command response. **/ int i40e_aq_set_phy_config(struct i40e_hw *hw, struct i40e_aq_set_phy_config *config, struct i40e_asq_cmd_details *cmd_details) { struct i40e_aq_desc desc; struct i40e_aq_set_phy_config *cmd = (struct i40e_aq_set_phy_config *)&desc.params.raw; int status; if (!config) return -EINVAL; i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_set_phy_config); *cmd = *config; status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); return status; } static noinline_for_stack int i40e_set_fc_status(struct i40e_hw *hw, struct i40e_aq_get_phy_abilities_resp *abilities, bool atomic_restart) { struct i40e_aq_set_phy_config config; enum i40e_fc_mode fc_mode = hw->fc.requested_mode; u8 pause_mask = 0x0; switch (fc_mode) { case I40E_FC_FULL: pause_mask |= I40E_AQ_PHY_FLAG_PAUSE_TX; pause_mask |= I40E_AQ_PHY_FLAG_PAUSE_RX; break; case I40E_FC_RX_PAUSE: pause_mask |= I40E_AQ_PHY_FLAG_PAUSE_RX; break; case I40E_FC_TX_PAUSE: pause_mask |= I40E_AQ_PHY_FLAG_PAUSE_TX; break; default: break; } memset(&config, 0, sizeof(struct i40e_aq_set_phy_config)); /* clear the old pause settings */ config.abilities = abilities->abilities & ~(I40E_AQ_PHY_FLAG_PAUSE_TX) & ~(I40E_AQ_PHY_FLAG_PAUSE_RX); /* set the new abilities */ config.abilities |= pause_mask; /* If the abilities have changed, then set the new config */ if (config.abilities == abilities->abilities) return 0; /* Auto restart link so settings take effect */ if (atomic_restart) config.abilities |= I40E_AQ_PHY_ENABLE_ATOMIC_LINK; /* Copy over all the old settings */ config.phy_type = abilities->phy_type; config.phy_type_ext = abilities->phy_type_ext; config.link_speed = abilities->link_speed; config.eee_capability = abilities->eee_capability; config.eeer = abilities->eeer_val; config.low_power_ctrl = abilities->d3_lpan; config.fec_config = abilities->fec_cfg_curr_mod_ext_info & I40E_AQ_PHY_FEC_CONFIG_MASK; return i40e_aq_set_phy_config(hw, &config, NULL); } /** * i40e_set_fc * @hw: pointer to the hw struct * @aq_failures: buffer to return AdminQ failure information * @atomic_restart: whether to enable atomic link restart * * Set the requested flow control mode using set_phy_config. **/ int i40e_set_fc(struct i40e_hw *hw, u8 *aq_failures, bool atomic_restart) { struct i40e_aq_get_phy_abilities_resp abilities; int status; *aq_failures = 0x0; /* Get the current phy config */ status = i40e_aq_get_phy_capabilities(hw, false, false, &abilities, NULL); if (status) { *aq_failures |= I40E_SET_FC_AQ_FAIL_GET; return status; } status = i40e_set_fc_status(hw, &abilities, atomic_restart); if (status) *aq_failures |= I40E_SET_FC_AQ_FAIL_SET; /* Update the link info */ status = i40e_update_link_info(hw); if (status) { /* Wait a little bit (on 40G cards it sometimes takes a really * long time for link to come back from the atomic reset) * and try once more */ msleep(1000); status = i40e_update_link_info(hw); } if (status) *aq_failures |= I40E_SET_FC_AQ_FAIL_UPDATE; return status; } /** * i40e_aq_clear_pxe_mode * @hw: pointer to the hw struct * @cmd_details: pointer to command details structure or NULL * * Tell the firmware that the driver is taking over from PXE **/ int i40e_aq_clear_pxe_mode(struct i40e_hw *hw, struct i40e_asq_cmd_details *cmd_details) { struct i40e_aq_desc desc; struct i40e_aqc_clear_pxe *cmd = (struct i40e_aqc_clear_pxe *)&desc.params.raw; int status; i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_clear_pxe_mode); cmd->rx_cnt = 0x2; status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); wr32(hw, I40E_GLLAN_RCTL_0, 0x1); return status; } /** * i40e_aq_set_link_restart_an * @hw: pointer to the hw struct * @enable_link: if true: enable link, if false: disable link * @cmd_details: pointer to command details structure or NULL * * Sets up the link and restarts the Auto-Negotiation over the link. **/ int i40e_aq_set_link_restart_an(struct i40e_hw *hw, bool enable_link, struct i40e_asq_cmd_details *cmd_details) { struct i40e_aq_desc desc; struct i40e_aqc_set_link_restart_an *cmd = (struct i40e_aqc_set_link_restart_an *)&desc.params.raw; int status; i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_set_link_restart_an); cmd->command = I40E_AQ_PHY_RESTART_AN; if (enable_link) cmd->command |= I40E_AQ_PHY_LINK_ENABLE; else cmd->command &= ~I40E_AQ_PHY_LINK_ENABLE; status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); return status; } /** * i40e_aq_get_link_info * @hw: pointer to the hw struct * @enable_lse: enable/disable LinkStatusEvent reporting * @link: pointer to link status structure - optional * @cmd_details: pointer to command details structure or NULL * * Returns the link status of the adapter. **/ int i40e_aq_get_link_info(struct i40e_hw *hw, bool enable_lse, struct i40e_link_status *link, struct i40e_asq_cmd_details *cmd_details) { struct i40e_aq_desc desc; struct i40e_aqc_get_link_status *resp = (struct i40e_aqc_get_link_status *)&desc.params.raw; struct i40e_link_status *hw_link_info = &hw->phy.link_info; bool tx_pause, rx_pause; u16 command_flags; int status; i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_get_link_status); if (enable_lse) command_flags = I40E_AQ_LSE_ENABLE; else command_flags = I40E_AQ_LSE_DISABLE; resp->command_flags = cpu_to_le16(command_flags); status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); if (status) goto aq_get_link_info_exit; /* save off old link status information */ hw->phy.link_info_old = *hw_link_info; /* update link status */ hw_link_info->phy_type = (enum i40e_aq_phy_type)resp->phy_type; hw->phy.media_type = i40e_get_media_type(hw); hw_link_info->link_speed = (enum i40e_aq_link_speed)resp->link_speed; hw_link_info->link_info = resp->link_info; hw_link_info->an_info = resp->an_info; hw_link_info->fec_info = resp->config & (I40E_AQ_CONFIG_FEC_KR_ENA | I40E_AQ_CONFIG_FEC_RS_ENA); hw_link_info->ext_info = resp->ext_info; hw_link_info->loopback = resp->loopback & I40E_AQ_LOOPBACK_MASK; hw_link_info->max_frame_size = le16_to_cpu(resp->max_frame_size); hw_link_info->pacing = resp->config & I40E_AQ_CONFIG_PACING_MASK; /* update fc info */ tx_pause = !!(resp->an_info & I40E_AQ_LINK_PAUSE_TX); rx_pause = !!(resp->an_info & I40E_AQ_LINK_PAUSE_RX); if (tx_pause & rx_pause) hw->fc.current_mode = I40E_FC_FULL; else if (tx_pause) hw->fc.current_mode = I40E_FC_TX_PAUSE; else if (rx_pause) hw->fc.current_mode = I40E_FC_RX_PAUSE; else hw->fc.current_mode = I40E_FC_NONE; if (resp->config & I40E_AQ_CONFIG_CRC_ENA) hw_link_info->crc_enable = true; else hw_link_info->crc_enable = false; if (resp->command_flags & cpu_to_le16(I40E_AQ_LSE_IS_ENABLED)) hw_link_info->lse_enable = true; else hw_link_info->lse_enable = false; if ((hw->mac.type == I40E_MAC_XL710) && (hw->aq.fw_maj_ver < 4 || (hw->aq.fw_maj_ver == 4 && hw->aq.fw_min_ver < 40)) && hw_link_info->phy_type == 0xE) hw_link_info->phy_type = I40E_PHY_TYPE_10GBASE_SFPP_CU; if (hw->flags & I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE && hw->mac.type != I40E_MAC_X722) { __le32 tmp; memcpy(&tmp, resp->link_type, sizeof(tmp)); hw->phy.phy_types = le32_to_cpu(tmp); hw->phy.phy_types |= ((u64)resp->link_type_ext << 32); } /* save link status information */ if (link) *link = *hw_link_info; /* flag cleared so helper functions don't call AQ again */ hw->phy.get_link_info = false; aq_get_link_info_exit: return status; } /** * i40e_aq_set_phy_int_mask * @hw: pointer to the hw struct * @mask: interrupt mask to be set * @cmd_details: pointer to command details structure or NULL * * Set link interrupt mask. **/ int i40e_aq_set_phy_int_mask(struct i40e_hw *hw, u16 mask, struct i40e_asq_cmd_details *cmd_details) { struct i40e_aq_desc desc; struct i40e_aqc_set_phy_int_mask *cmd = (struct i40e_aqc_set_phy_int_mask *)&desc.params.raw; int status; i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_set_phy_int_mask); cmd->event_mask = cpu_to_le16(mask); status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); return status; } /** * i40e_aq_set_mac_loopback * @hw: pointer to the HW struct * @ena_lpbk: Enable or Disable loopback * @cmd_details: pointer to command details structure or NULL * * Enable/disable loopback on a given port */ int i40e_aq_set_mac_loopback(struct i40e_hw *hw, bool ena_lpbk, struct i40e_asq_cmd_details *cmd_details) { struct i40e_aq_desc desc; struct i40e_aqc_set_lb_mode *cmd = (struct i40e_aqc_set_lb_mode *)&desc.params.raw; i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_set_lb_modes); if (ena_lpbk) { if (hw->nvm.version <= I40E_LEGACY_LOOPBACK_NVM_VER) cmd->lb_mode = cpu_to_le16(I40E_AQ_LB_MAC_LOCAL_LEGACY); else cmd->lb_mode = cpu_to_le16(I40E_AQ_LB_MAC_LOCAL); } return i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); } /** * i40e_aq_set_phy_debug * @hw: pointer to the hw struct * @cmd_flags: debug command flags * @cmd_details: pointer to command details structure or NULL * * Reset the external PHY. **/ int i40e_aq_set_phy_debug(struct i40e_hw *hw, u8 cmd_flags, struct i40e_asq_cmd_details *cmd_details) { struct i40e_aq_desc desc; struct i40e_aqc_set_phy_debug *cmd = (struct i40e_aqc_set_phy_debug *)&desc.params.raw; int status; i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_set_phy_debug); cmd->command_flags = cmd_flags; status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); return status; } /** * i40e_is_aq_api_ver_ge * @aq: pointer to AdminQ info containing HW API version to compare * @maj: API major value * @min: API minor value * * Assert whether current HW API version is greater/equal than provided. **/ static bool i40e_is_aq_api_ver_ge(struct i40e_adminq_info *aq, u16 maj, u16 min) { return (aq->api_maj_ver > maj || (aq->api_maj_ver == maj && aq->api_min_ver >= min)); } /** * i40e_aq_add_vsi * @hw: pointer to the hw struct * @vsi_ctx: pointer to a vsi context struct * @cmd_details: pointer to command details structure or NULL * * Add a VSI context to the hardware. **/ int i40e_aq_add_vsi(struct i40e_hw *hw, struct i40e_vsi_context *vsi_ctx, struct i40e_asq_cmd_details *cmd_details) { struct i40e_aq_desc desc; struct i40e_aqc_add_get_update_vsi *cmd = (struct i40e_aqc_add_get_update_vsi *)&desc.params.raw; struct i40e_aqc_add_get_update_vsi_completion *resp = (struct i40e_aqc_add_get_update_vsi_completion *) &desc.params.raw; int status; i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_add_vsi); cmd->uplink_seid = cpu_to_le16(vsi_ctx->uplink_seid); cmd->connection_type = vsi_ctx->connection_type; cmd->vf_id = vsi_ctx->vf_num; cmd->vsi_flags = cpu_to_le16(vsi_ctx->flags); desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD)); status = i40e_asq_send_command_atomic(hw, &desc, &vsi_ctx->info, sizeof(vsi_ctx->info), cmd_details, true); if (status) goto aq_add_vsi_exit; vsi_ctx->seid = le16_to_cpu(resp->seid); vsi_ctx->vsi_number = le16_to_cpu(resp->vsi_number); vsi_ctx->vsis_allocated = le16_to_cpu(resp->vsi_used); vsi_ctx->vsis_unallocated = le16_to_cpu(resp->vsi_free); aq_add_vsi_exit: return status; } /** * i40e_aq_set_default_vsi * @hw: pointer to the hw struct * @seid: vsi number * @cmd_details: pointer to command details structure or NULL **/ int i40e_aq_set_default_vsi(struct i40e_hw *hw, u16 seid, struct i40e_asq_cmd_details *cmd_details) { struct i40e_aq_desc desc; struct i40e_aqc_set_vsi_promiscuous_modes *cmd = (struct i40e_aqc_set_vsi_promiscuous_modes *) &desc.params.raw; int status; i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_set_vsi_promiscuous_modes); cmd->promiscuous_flags = cpu_to_le16(I40E_AQC_SET_VSI_DEFAULT); cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_DEFAULT); cmd->seid = cpu_to_le16(seid); status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); return status; } /** * i40e_aq_clear_default_vsi * @hw: pointer to the hw struct * @seid: vsi number * @cmd_details: pointer to command details structure or NULL **/ int i40e_aq_clear_default_vsi(struct i40e_hw *hw, u16 seid, struct i40e_asq_cmd_details *cmd_details) { struct i40e_aq_desc desc; struct i40e_aqc_set_vsi_promiscuous_modes *cmd = (struct i40e_aqc_set_vsi_promiscuous_modes *) &desc.params.raw; int status; i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_set_vsi_promiscuous_modes); cmd->promiscuous_flags = cpu_to_le16(0); cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_DEFAULT); cmd->seid = cpu_to_le16(seid); status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); return status; } /** * i40e_aq_set_vsi_unicast_promiscuous * @hw: pointer to the hw struct * @seid: vsi number * @set: set unicast promiscuous enable/disable * @cmd_details: pointer to command details structure or NULL * @rx_only_promisc: flag to decide if egress traffic gets mirrored in promisc **/ int i40e_aq_set_vsi_unicast_promiscuous(struct i40e_hw *hw, u16 seid, bool set, struct i40e_asq_cmd_details *cmd_details, bool rx_only_promisc) { struct i40e_aq_desc desc; struct i40e_aqc_set_vsi_promiscuous_modes *cmd = (struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw; u16 flags = 0; int status; i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_set_vsi_promiscuous_modes); if (set) { flags |= I40E_AQC_SET_VSI_PROMISC_UNICAST; if (rx_only_promisc && i40e_is_aq_api_ver_ge(&hw->aq, 1, 5)) flags |= I40E_AQC_SET_VSI_PROMISC_RX_ONLY; } cmd->promiscuous_flags = cpu_to_le16(flags); cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_UNICAST); if (i40e_is_aq_api_ver_ge(&hw->aq, 1, 5)) cmd->valid_flags |= cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_RX_ONLY); cmd->seid = cpu_to_le16(seid); status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); return status; } /** * i40e_aq_set_vsi_multicast_promiscuous * @hw: pointer to the hw struct * @seid: vsi number * @set: set multicast promiscuous enable/disable * @cmd_details: pointer to command details structure or NULL **/ int i40e_aq_set_vsi_multicast_promiscuous(struct i40e_hw *hw, u16 seid, bool set, struct i40e_asq_cmd_details *cmd_details) { struct i40e_aq_desc desc; struct i40e_aqc_set_vsi_promiscuous_modes *cmd = (struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw; u16 flags = 0; int status; i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_set_vsi_promiscuous_modes); if (set) flags |= I40E_AQC_SET_VSI_PROMISC_MULTICAST; cmd->promiscuous_flags = cpu_to_le16(flags); cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_MULTICAST); cmd->seid = cpu_to_le16(seid); status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); return status; } /** * i40e_aq_set_vsi_mc_promisc_on_vlan * @hw: pointer to the hw struct * @seid: vsi number * @enable: set MAC L2 layer unicast promiscuous enable/disable for a given VLAN * @vid: The VLAN tag filter - capture any multicast packet with this VLAN tag * @cmd_details: pointer to command details structure or NULL **/ int i40e_aq_set_vsi_mc_promisc_on_vlan(struct i40e_hw *hw, u16 seid, bool enable, u16 vid, struct i40e_asq_cmd_details *cmd_details) { struct i40e_aq_desc desc; struct i40e_aqc_set_vsi_promiscuous_modes *cmd = (struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw; u16 flags = 0; int status; i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_set_vsi_promiscuous_modes); if (enable) flags |= I40E_AQC_SET_VSI_PROMISC_MULTICAST; cmd->promiscuous_flags = cpu_to_le16(flags); cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_MULTICAST); cmd->seid = cpu_to_le16(seid); cmd->vlan_tag = cpu_to_le16(vid | I40E_AQC_SET_VSI_VLAN_VALID); status = i40e_asq_send_command_atomic(hw, &desc, NULL, 0, cmd_details, true); return status; } /** * i40e_aq_set_vsi_uc_promisc_on_vlan * @hw: pointer to the hw struct * @seid: vsi number * @enable: set MAC L2 layer unicast promiscuous enable/disable for a given VLAN * @vid: The VLAN tag filter - capture any unicast packet with this VLAN tag * @cmd_details: pointer to command details structure or NULL **/ int i40e_aq_set_vsi_uc_promisc_on_vlan(struct i40e_hw *hw, u16 seid, bool enable, u16 vid, struct i40e_asq_cmd_details *cmd_details) { struct i40e_aq_desc desc; struct i40e_aqc_set_vsi_promiscuous_modes *cmd = (struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw; u16 flags = 0; int status; i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_set_vsi_promiscuous_modes); if (enable) { flags |= I40E_AQC_SET_VSI_PROMISC_UNICAST; if (i40e_is_aq_api_ver_ge(&hw->aq, 1, 5)) flags |= I40E_AQC_SET_VSI_PROMISC_RX_ONLY; } cmd->promiscuous_flags = cpu_to_le16(flags); cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_UNICAST); if (i40e_is_aq_api_ver_ge(&hw->aq, 1, 5)) cmd->valid_flags |= cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_RX_ONLY); cmd->seid = cpu_to_le16(seid); cmd->vlan_tag = cpu_to_le16(vid | I40E_AQC_SET_VSI_VLAN_VALID); status = i40e_asq_send_command_atomic(hw, &desc, NULL, 0, cmd_details, true); return status; } /** * i40e_aq_set_vsi_bc_promisc_on_vlan * @hw: pointer to the hw struct * @seid: vsi number * @enable: set broadcast promiscuous enable/disable for a given VLAN * @vid: The VLAN tag filter - capture any broadcast packet with this VLAN tag * @cmd_details: pointer to command details structure or NULL **/ int i40e_aq_set_vsi_bc_promisc_on_vlan(struct i40e_hw *hw, u16 seid, bool enable, u16 vid, struct i40e_asq_cmd_details *cmd_details) { struct i40e_aq_desc desc; struct i40e_aqc_set_vsi_promiscuous_modes *cmd = (struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw; u16 flags = 0; int status; i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_set_vsi_promiscuous_modes); if (enable) flags |= I40E_AQC_SET_VSI_PROMISC_BROADCAST; cmd->promiscuous_flags = cpu_to_le16(flags); cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_BROADCAST); cmd->seid = cpu_to_le16(seid); cmd->vlan_tag = cpu_to_le16(vid | I40E_AQC_SET_VSI_VLAN_VALID); status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); return status; } /** * i40e_aq_set_vsi_broadcast * @hw: pointer to the hw struct * @seid: vsi number * @set_filter: true to set filter, false to clear filter * @cmd_details: pointer to command details structure or NULL * * Set or clear the broadcast promiscuous flag (filter) for a given VSI. **/ int i40e_aq_set_vsi_broadcast(struct i40e_hw *hw, u16 seid, bool set_filter, struct i40e_asq_cmd_details *cmd_details) { struct i40e_aq_desc desc; struct i40e_aqc_set_vsi_promiscuous_modes *cmd = (struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw; int status; i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_set_vsi_promiscuous_modes); if (set_filter) cmd->promiscuous_flags |= cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_BROADCAST); else cmd->promiscuous_flags &= cpu_to_le16(~I40E_AQC_SET_VSI_PROMISC_BROADCAST); cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_BROADCAST); cmd->seid = cpu_to_le16(seid); status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); return status; } /** * i40e_aq_set_vsi_vlan_promisc - control the VLAN promiscuous setting * @hw: pointer to the hw struct * @seid: vsi number * @enable: set MAC L2 layer unicast promiscuous enable/disable for a given VLAN * @cmd_details: pointer to command details structure or NULL **/ int i40e_aq_set_vsi_vlan_promisc(struct i40e_hw *hw, u16 seid, bool enable, struct i40e_asq_cmd_details *cmd_details) { struct i40e_aq_desc desc; struct i40e_aqc_set_vsi_promiscuous_modes *cmd = (struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw; u16 flags = 0; int status; i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_set_vsi_promiscuous_modes); if (enable) flags |= I40E_AQC_SET_VSI_PROMISC_VLAN; cmd->promiscuous_flags = cpu_to_le16(flags); cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_VLAN); cmd->seid = cpu_to_le16(seid); status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); return status; } /** * i40e_aq_get_vsi_params - get VSI configuration info * @hw: pointer to the hw struct * @vsi_ctx: pointer to a vsi context struct * @cmd_details: pointer to command details structure or NULL **/ int i40e_aq_get_vsi_params(struct i40e_hw *hw, struct i40e_vsi_context *vsi_ctx, struct i40e_asq_cmd_details *cmd_details) { struct i40e_aq_desc desc; struct i40e_aqc_add_get_update_vsi *cmd = (struct i40e_aqc_add_get_update_vsi *)&desc.params.raw; struct i40e_aqc_add_get_update_vsi_completion *resp = (struct i40e_aqc_add_get_update_vsi_completion *) &desc.params.raw; int status; i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_get_vsi_parameters); cmd->uplink_seid = cpu_to_le16(vsi_ctx->seid); desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF); status = i40e_asq_send_command(hw, &desc, &vsi_ctx->info, sizeof(vsi_ctx->info), NULL); if (status) goto aq_get_vsi_params_exit; vsi_ctx->seid = le16_to_cpu(resp->seid); vsi_ctx->vsi_number = le16_to_cpu(resp->vsi_number); vsi_ctx->vsis_allocated = le16_to_cpu(resp->vsi_used); vsi_ctx->vsis_unallocated = le16_to_cpu(resp->vsi_free); aq_get_vsi_params_exit: return status; } /** * i40e_aq_update_vsi_params * @hw: pointer to the hw struct * @vsi_ctx: pointer to a vsi context struct * @cmd_details: pointer to command details structure or NULL * * Update a VSI context. **/ int i40e_aq_update_vsi_params(struct i40e_hw *hw, struct i40e_vsi_context *vsi_ctx, struct i40e_asq_cmd_details *cmd_details) { struct i40e_aq_desc desc; struct i40e_aqc_add_get_update_vsi *cmd = (struct i40e_aqc_add_get_update_vsi *)&desc.params.raw; struct i40e_aqc_add_get_update_vsi_completion *resp = (struct i40e_aqc_add_get_update_vsi_completion *) &desc.params.raw; int status; i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_update_vsi_parameters); cmd->uplink_seid = cpu_to_le16(vsi_ctx->seid); desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD)); status = i40e_asq_send_command_atomic(hw, &desc, &vsi_ctx->info, sizeof(vsi_ctx->info), cmd_details, true); vsi_ctx->vsis_allocated = le16_to_cpu(resp->vsi_used); vsi_ctx->vsis_unallocated = le16_to_cpu(resp->vsi_free); return status; } /** * i40e_aq_get_switch_config * @hw: pointer to the hardware structure * @buf: pointer to the result buffer * @buf_size: length of input buffer * @start_seid: seid to start for the report, 0 == beginning * @cmd_details: pointer to command details structure or NULL * * Fill the buf with switch configuration returned from AdminQ command **/ int i40e_aq_get_switch_config(struct i40e_hw *hw, struct i40e_aqc_get_switch_config_resp *buf, u16 buf_size, u16 *start_seid, struct i40e_asq_cmd_details *cmd_details) { struct i40e_aq_desc desc; struct i40e_aqc_switch_seid *scfg = (struct i40e_aqc_switch_seid *)&desc.params.raw; int status; i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_get_switch_config); desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF); if (buf_size > I40E_AQ_LARGE_BUF) desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB); scfg->seid = cpu_to_le16(*start_seid); status = i40e_asq_send_command(hw, &desc, buf, buf_size, cmd_details); *start_seid = le16_to_cpu(scfg->seid); return status; } /** * i40e_aq_set_switch_config * @hw: pointer to the hardware structure * @flags: bit flag values to set * @mode: cloud filter mode * @valid_flags: which bit flags to set * @mode: cloud filter mode * @cmd_details: pointer to command details structure or NULL * * Set switch configuration bits **/ int i40e_aq_set_switch_config(struct i40e_hw *hw, u16 flags, u16 valid_flags, u8 mode, struct i40e_asq_cmd_details *cmd_details) { struct i40e_aq_desc desc; struct i40e_aqc_set_switch_config *scfg = (struct i40e_aqc_set_switch_config *)&desc.params.raw; int status; i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_set_switch_config); scfg->flags = cpu_to_le16(flags); scfg->valid_flags = cpu_to_le16(valid_flags); scfg->mode = mode; if (hw->flags & I40E_HW_FLAG_802_1AD_CAPABLE) { scfg->switch_tag = cpu_to_le16(hw->switch_tag); scfg->first_tag = cpu_to_le16(hw->first_tag); scfg->second_tag = cpu_to_le16(hw->second_tag); } status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); return status; } /** * i40e_aq_get_firmware_version * @hw: pointer to the hw struct * @fw_major_version: firmware major version * @fw_minor_version: firmware minor version * @fw_build: firmware build number * @api_major_version: major queue version * @api_minor_version: minor queue version * @cmd_details: pointer to command details structure or NULL * * Get the firmware version from the admin queue commands **/ int i40e_aq_get_firmware_version(struct i40e_hw *hw, u16 *fw_major_version, u16 *fw_minor_version, u32 *fw_build, u16 *api_major_version, u16 *api_minor_version, struct i40e_asq_cmd_details *cmd_details) { struct i40e_aq_desc desc; struct i40e_aqc_get_version *resp = (struct i40e_aqc_get_version *)&desc.params.raw; int status; i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_get_version); status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); if (!status) { if (fw_major_version) *fw_major_version = le16_to_cpu(resp->fw_major); if (fw_minor_version) *fw_minor_version = le16_to_cpu(resp->fw_minor); if (fw_build) *fw_build = le32_to_cpu(resp->fw_build); if (api_major_version) *api_major_version = le16_to_cpu(resp->api_major); if (api_minor_version) *api_minor_version = le16_to_cpu(resp->api_minor); } return status; } /** * i40e_aq_send_driver_version * @hw: pointer to the hw struct * @dv: driver's major, minor version * @cmd_details: pointer to command details structure or NULL * * Send the driver version to the firmware **/ int i40e_aq_send_driver_version(struct i40e_hw *hw, struct i40e_driver_version *dv, struct i40e_asq_cmd_details *cmd_details) { struct i40e_aq_desc desc; struct i40e_aqc_driver_version *cmd = (struct i40e_aqc_driver_version *)&desc.params.raw; int status; u16 len; if (dv == NULL) return -EINVAL; i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_driver_version); desc.flags |= cpu_to_le16(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD); cmd->driver_major_ver = dv->major_version; cmd->driver_minor_ver = dv->minor_version; cmd->driver_build_ver = dv->build_version; cmd->driver_subbuild_ver = dv->subbuild_version; len = 0; while (len < sizeof(dv->driver_string) && (dv->driver_string[len] < 0x80) && dv->driver_string[len]) len++; status = i40e_asq_send_command(hw, &desc, dv->driver_string, len, cmd_details); return status; } /** * i40e_get_link_status - get status of the HW network link * @hw: pointer to the hw struct * @link_up: pointer to bool (true/false = linkup/linkdown) * * Variable link_up true if link is up, false if link is down. * The variable link_up is invalid if returned value of status != 0 * * Side effect: LinkStatusEvent reporting becomes enabled **/ int i40e_get_link_status(struct i40e_hw *hw, bool *link_up) { int status = 0; if (hw->phy.get_link_info) { status = i40e_update_link_info(hw); if (status) i40e_debug(hw, I40E_DEBUG_LINK, "get link failed: status %d\n", status); } *link_up = hw->phy.link_info.link_info & I40E_AQ_LINK_UP; return status; } /** * i40e_update_link_info - update status of the HW network link * @hw: pointer to the hw struct **/ noinline_for_stack int i40e_update_link_info(struct i40e_hw *hw) { struct i40e_aq_get_phy_abilities_resp abilities; int status = 0; status = i40e_aq_get_link_info(hw, true, NULL, NULL); if (status) return status; /* extra checking needed to ensure link info to user is timely */ if ((hw->phy.link_info.link_info & I40E_AQ_MEDIA_AVAILABLE) && ((hw->phy.link_info.link_info & I40E_AQ_LINK_UP) || !(hw->phy.link_info_old.link_info & I40E_AQ_LINK_UP))) { status = i40e_aq_get_phy_capabilities(hw, false, false, &abilities, NULL); if (status) return status; if (abilities.fec_cfg_curr_mod_ext_info & I40E_AQ_ENABLE_FEC_AUTO) hw->phy.link_info.req_fec_info = (I40E_AQ_REQUEST_FEC_KR | I40E_AQ_REQUEST_FEC_RS); else hw->phy.link_info.req_fec_info = abilities.fec_cfg_curr_mod_ext_info & (I40E_AQ_REQUEST_FEC_KR | I40E_AQ_REQUEST_FEC_RS); memcpy(hw->phy.link_info.module_type, &abilities.module_type, sizeof(hw->phy.link_info.module_type)); } return status; } /** * i40e_aq_add_veb - Insert a VEB between the VSI and the MAC * @hw: pointer to the hw struct * @uplink_seid: the MAC or other gizmo SEID * @downlink_seid: the VSI SEID * @enabled_tc: bitmap of TCs to be enabled * @default_port: true for default port VSI, false for control port * @veb_seid: pointer to where to put the resulting VEB SEID * @enable_stats: true to turn on VEB stats * @cmd_details: pointer to command details structure or NULL * * This asks the FW to add a VEB between the uplink and downlink * elements. If the uplink SEID is 0, this will be a floating VEB. **/ int i40e_aq_add_veb(struct i40e_hw *hw, u16 uplink_seid, u16 downlink_seid, u8 enabled_tc, bool default_port, u16 *veb_seid, bool enable_stats, struct i40e_asq_cmd_details *cmd_details) { struct i40e_aq_desc desc; struct i40e_aqc_add_veb *cmd = (struct i40e_aqc_add_veb *)&desc.params.raw; struct i40e_aqc_add_veb_completion *resp = (struct i40e_aqc_add_veb_completion *)&desc.params.raw; u16 veb_flags = 0; int status; /* SEIDs need to either both be set or both be 0 for floating VEB */ if (!!uplink_seid != !!downlink_seid) return -EINVAL; i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_add_veb); cmd->uplink_seid = cpu_to_le16(uplink_seid); cmd->downlink_seid = cpu_to_le16(downlink_seid); cmd->enable_tcs = enabled_tc; if (!uplink_seid) veb_flags |= I40E_AQC_ADD_VEB_FLOATING; if (default_port) veb_flags |= I40E_AQC_ADD_VEB_PORT_TYPE_DEFAULT; else veb_flags |= I40E_AQC_ADD_VEB_PORT_TYPE_DATA; /* reverse logic here: set the bitflag to disable the stats */ if (!enable_stats) veb_flags |= I40E_AQC_ADD_VEB_ENABLE_DISABLE_STATS; cmd->veb_flags = cpu_to_le16(veb_flags); status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); if (!status && veb_seid) *veb_seid = le16_to_cpu(resp->veb_seid); return status; } /** * i40e_aq_get_veb_parameters - Retrieve VEB parameters * @hw: pointer to the hw struct * @veb_seid: the SEID of the VEB to query * @switch_id: the uplink switch id * @floating: set to true if the VEB is floating * @statistic_index: index of the stats counter block for this VEB * @vebs_used: number of VEB's used by function * @vebs_free: total VEB's not reserved by any function * @cmd_details: pointer to command details structure or NULL * * This retrieves the parameters for a particular VEB, specified by * uplink_seid, and returns them to the caller. **/ int i40e_aq_get_veb_parameters(struct i40e_hw *hw, u16 veb_seid, u16 *switch_id, bool *floating, u16 *statistic_index, u16 *vebs_used, u16 *vebs_free, struct i40e_asq_cmd_details *cmd_details) { struct i40e_aq_desc desc; struct i40e_aqc_get_veb_parameters_completion *cmd_resp = (struct i40e_aqc_get_veb_parameters_completion *) &desc.params.raw; int status; if (veb_seid == 0) return -EINVAL; i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_get_veb_parameters); cmd_resp->seid = cpu_to_le16(veb_seid); status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); if (status) goto get_veb_exit; if (switch_id) *switch_id = le16_to_cpu(cmd_resp->switch_id); if (statistic_index) *statistic_index = le16_to_cpu(cmd_resp->statistic_index); if (vebs_used) *vebs_used = le16_to_cpu(cmd_resp->vebs_used); if (vebs_free) *vebs_free = le16_to_cpu(cmd_resp->vebs_free); if (floating) { u16 flags = le16_to_cpu(cmd_resp->veb_flags); if (flags & I40E_AQC_ADD_VEB_FLOATING) *floating = true; else *floating = false; } get_veb_exit: return status; } /** * i40e_prepare_add_macvlan * @mv_list: list of macvlans to be added * @desc: pointer to AQ descriptor structure * @count: length of the list * @seid: VSI for the mac address * * Internal helper function that prepares the add macvlan request * and returns the buffer size. **/ static u16 i40e_prepare_add_macvlan(struct i40e_aqc_add_macvlan_element_data *mv_list, struct i40e_aq_desc *desc, u16 count, u16 seid) { struct i40e_aqc_macvlan *cmd = (struct i40e_aqc_macvlan *)&desc->params.raw; u16 buf_size; int i; buf_size = count * sizeof(*mv_list); /* prep the rest of the request */ i40e_fill_default_direct_cmd_desc(desc, i40e_aqc_opc_add_macvlan); cmd->num_addresses = cpu_to_le16(count); cmd->seid[0] = cpu_to_le16(I40E_AQC_MACVLAN_CMD_SEID_VALID | seid); cmd->seid[1] = 0; cmd->seid[2] = 0; for (i = 0; i < count; i++) if (is_multicast_ether_addr(mv_list[i].mac_addr)) mv_list[i].flags |= cpu_to_le16(I40E_AQC_MACVLAN_ADD_USE_SHARED_MAC); desc->flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD)); if (buf_size > I40E_AQ_LARGE_BUF) desc->flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB); return buf_size; } /** * i40e_aq_add_macvlan * @hw: pointer to the hw struct * @seid: VSI for the mac address * @mv_list: list of macvlans to be added * @count: length of the list * @cmd_details: pointer to command details structure or NULL * * Add MAC/VLAN addresses to the HW filtering **/ int i40e_aq_add_macvlan(struct i40e_hw *hw, u16 seid, struct i40e_aqc_add_macvlan_element_data *mv_list, u16 count, struct i40e_asq_cmd_details *cmd_details) { struct i40e_aq_desc desc; u16 buf_size; if (count == 0 || !mv_list || !hw) return -EINVAL; buf_size = i40e_prepare_add_macvlan(mv_list, &desc, count, seid); return i40e_asq_send_command_atomic(hw, &desc, mv_list, buf_size, cmd_details, true); } /** * i40e_aq_add_macvlan_v2 * @hw: pointer to the hw struct * @seid: VSI for the mac address * @mv_list: list of macvlans to be added * @count: length of the list * @cmd_details: pointer to command details structure or NULL * @aq_status: pointer to Admin Queue status return value * * Add MAC/VLAN addresses to the HW filtering. * The _v2 version returns the last Admin Queue status in aq_status * to avoid race conditions in access to hw->aq.asq_last_status. * It also calls _v2 versions of asq_send_command functions to * get the aq_status on the stack. **/ int i40e_aq_add_macvlan_v2(struct i40e_hw *hw, u16 seid, struct i40e_aqc_add_macvlan_element_data *mv_list, u16 count, struct i40e_asq_cmd_details *cmd_details, enum i40e_admin_queue_err *aq_status) { struct i40e_aq_desc desc; u16 buf_size; if (count == 0 || !mv_list || !hw) return -EINVAL; buf_size = i40e_prepare_add_macvlan(mv_list, &desc, count, seid); return i40e_asq_send_command_atomic_v2(hw, &desc, mv_list, buf_size, cmd_details, true, aq_status); } /** * i40e_aq_remove_macvlan * @hw: pointer to the hw struct * @seid: VSI for the mac address * @mv_list: list of macvlans to be removed * @count: length of the list * @cmd_details: pointer to command details structure or NULL * * Remove MAC/VLAN addresses from the HW filtering **/ int i40e_aq_remove_macvlan(struct i40e_hw *hw, u16 seid, struct i40e_aqc_remove_macvlan_element_data *mv_list, u16 count, struct i40e_asq_cmd_details *cmd_details) { struct i40e_aq_desc desc; struct i40e_aqc_macvlan *cmd = (struct i40e_aqc_macvlan *)&desc.params.raw; u16 buf_size; int status; if (count == 0 || !mv_list || !hw) return -EINVAL; buf_size = count * sizeof(*mv_list); /* prep the rest of the request */ i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_remove_macvlan); cmd->num_addresses = cpu_to_le16(count); cmd->seid[0] = cpu_to_le16(I40E_AQC_MACVLAN_CMD_SEID_VALID | seid); cmd->seid[1] = 0; cmd->seid[2] = 0; desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD)); if (buf_size > I40E_AQ_LARGE_BUF) desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB); status = i40e_asq_send_command_atomic(hw, &desc, mv_list, buf_size, cmd_details, true); return status; } /** * i40e_aq_remove_macvlan_v2 * @hw: pointer to the hw struct * @seid: VSI for the mac address * @mv_list: list of macvlans to be removed * @count: length of the list * @cmd_details: pointer to command details structure or NULL * @aq_status: pointer to Admin Queue status return value * * Remove MAC/VLAN addresses from the HW filtering. * The _v2 version returns the last Admin Queue status in aq_status * to avoid race conditions in access to hw->aq.asq_last_status. * It also calls _v2 versions of asq_send_command functions to * get the aq_status on the stack. **/ int i40e_aq_remove_macvlan_v2(struct i40e_hw *hw, u16 seid, struct i40e_aqc_remove_macvlan_element_data *mv_list, u16 count, struct i40e_asq_cmd_details *cmd_details, enum i40e_admin_queue_err *aq_status) { struct i40e_aqc_macvlan *cmd; struct i40e_aq_desc desc; u16 buf_size; if (count == 0 || !mv_list || !hw) return -EINVAL; buf_size = count * sizeof(*mv_list); /* prep the rest of the request */ i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_remove_macvlan); cmd = (struct i40e_aqc_macvlan *)&desc.params.raw; cmd->num_addresses = cpu_to_le16(count); cmd->seid[0] = cpu_to_le16(I40E_AQC_MACVLAN_CMD_SEID_VALID | seid); cmd->seid[1] = 0; cmd->seid[2] = 0; desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD)); if (buf_size > I40E_AQ_LARGE_BUF) desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB); return i40e_asq_send_command_atomic_v2(hw, &desc, mv_list, buf_size, cmd_details, true, aq_status); } /** * i40e_mirrorrule_op - Internal helper function to add/delete mirror rule * @hw: pointer to the hw struct * @opcode: AQ opcode for add or delete mirror rule * @sw_seid: Switch SEID (to which rule refers) * @rule_type: Rule Type (ingress/egress/VLAN) * @id: Destination VSI SEID or Rule ID * @count: length of the list * @mr_list: list of mirrored VSI SEIDs or VLAN IDs * @cmd_details: pointer to command details structure or NULL * @rule_id: Rule ID returned from FW * @rules_used: Number of rules used in internal switch * @rules_free: Number of rules free in internal switch * * Add/Delete a mirror rule to a specific switch. Mirror rules are supported for * VEBs/VEPA elements only **/ static int i40e_mirrorrule_op(struct i40e_hw *hw, u16 opcode, u16 sw_seid, u16 rule_type, u16 id, u16 count, __le16 *mr_list, struct i40e_asq_cmd_details *cmd_details, u16 *rule_id, u16 *rules_used, u16 *rules_free) { struct i40e_aq_desc desc; struct i40e_aqc_add_delete_mirror_rule *cmd = (struct i40e_aqc_add_delete_mirror_rule *)&desc.params.raw; struct i40e_aqc_add_delete_mirror_rule_completion *resp = (struct i40e_aqc_add_delete_mirror_rule_completion *)&desc.params.raw; u16 buf_size; int status; buf_size = count * sizeof(*mr_list); /* prep the rest of the request */ i40e_fill_default_direct_cmd_desc(&desc, opcode); cmd->seid = cpu_to_le16(sw_seid); cmd->rule_type = cpu_to_le16(rule_type & I40E_AQC_MIRROR_RULE_TYPE_MASK); cmd->num_entries = cpu_to_le16(count); /* Dest VSI for add, rule_id for delete */ cmd->destination = cpu_to_le16(id); if (mr_list) { desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD)); if (buf_size > I40E_AQ_LARGE_BUF) desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB); } status = i40e_asq_send_command(hw, &desc, mr_list, buf_size, cmd_details); if (!status || hw->aq.asq_last_status == I40E_AQ_RC_ENOSPC) { if (rule_id) *rule_id = le16_to_cpu(resp->rule_id); if (rules_used) *rules_used = le16_to_cpu(resp->mirror_rules_used); if (rules_free) *rules_free = le16_to_cpu(resp->mirror_rules_free); } return status; } /** * i40e_aq_add_mirrorrule - add a mirror rule * @hw: pointer to the hw struct * @sw_seid: Switch SEID (to which rule refers) * @rule_type: Rule Type (ingress/egress/VLAN) * @dest_vsi: SEID of VSI to which packets will be mirrored * @count: length of the list * @mr_list: list of mirrored VSI SEIDs or VLAN IDs * @cmd_details: pointer to command details structure or NULL * @rule_id: Rule ID returned from FW * @rules_used: Number of rules used in internal switch * @rules_free: Number of rules free in internal switch * * Add mirror rule. Mirror rules are supported for VEBs or VEPA elements only **/ int i40e_aq_add_mirrorrule(struct i40e_hw *hw, u16 sw_seid, u16 rule_type, u16 dest_vsi, u16 count, __le16 *mr_list, struct i40e_asq_cmd_details *cmd_details, u16 *rule_id, u16 *rules_used, u16 *rules_free) { if (!(rule_type == I40E_AQC_MIRROR_RULE_TYPE_ALL_INGRESS || rule_type == I40E_AQC_MIRROR_RULE_TYPE_ALL_EGRESS)) { if (count == 0 || !mr_list) return -EINVAL; } return i40e_mirrorrule_op(hw, i40e_aqc_opc_add_mirror_rule, sw_seid, rule_type, dest_vsi, count, mr_list, cmd_details, rule_id, rules_used, rules_free); } /** * i40e_aq_delete_mirrorrule - delete a mirror rule * @hw: pointer to the hw struct * @sw_seid: Switch SEID (to which rule refers) * @rule_type: Rule Type (ingress/egress/VLAN) * @count: length of the list * @rule_id: Rule ID that is returned in the receive desc as part of * add_mirrorrule. * @mr_list: list of mirrored VLAN IDs to be removed * @cmd_details: pointer to command details structure or NULL * @rules_used: Number of rules used in internal switch * @rules_free: Number of rules free in internal switch * * Delete a mirror rule. Mirror rules are supported for VEBs/VEPA elements only **/ int i40e_aq_delete_mirrorrule(struct i40e_hw *hw, u16 sw_seid, u16 rule_type, u16 rule_id, u16 count, __le16 *mr_list, struct i40e_asq_cmd_details *cmd_details, u16 *rules_used, u16 *rules_free) { /* Rule ID has to be valid except rule_type: INGRESS VLAN mirroring */ if (rule_type == I40E_AQC_MIRROR_RULE_TYPE_VLAN) { /* count and mr_list shall be valid for rule_type INGRESS VLAN * mirroring. For other rule_type, count and rule_type should * not matter. */ if (count == 0 || !mr_list) return -EINVAL; } return i40e_mirrorrule_op(hw, i40e_aqc_opc_delete_mirror_rule, sw_seid, rule_type, rule_id, count, mr_list, cmd_details, NULL, rules_used, rules_free); } /** * i40e_aq_send_msg_to_vf * @hw: pointer to the hardware structure * @vfid: VF id to send msg * @v_opcode: opcodes for VF-PF communication * @v_retval: return error code * @msg: pointer to the msg buffer * @msglen: msg length * @cmd_details: pointer to command details * * send msg to vf **/ int i40e_aq_send_msg_to_vf(struct i40e_hw *hw, u16 vfid, u32 v_opcode, u32 v_retval, u8 *msg, u16 msglen, struct i40e_asq_cmd_details *cmd_details) { struct i40e_aq_desc desc; struct i40e_aqc_pf_vf_message *cmd = (struct i40e_aqc_pf_vf_message *)&desc.params.raw; int status; i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_send_msg_to_vf); cmd->id = cpu_to_le32(vfid); desc.cookie_high = cpu_to_le32(v_opcode); desc.cookie_low = cpu_to_le32(v_retval); desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_SI); if (msglen) { desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD)); if (msglen > I40E_AQ_LARGE_BUF) desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB); desc.datalen = cpu_to_le16(msglen); } status = i40e_asq_send_command(hw, &desc, msg, msglen, cmd_details); return status; } /** * i40e_aq_debug_read_register * @hw: pointer to the hw struct * @reg_addr: register address * @reg_val: register value * @cmd_details: pointer to command details structure or NULL * * Read the register using the admin queue commands **/ int i40e_aq_debug_read_register(struct i40e_hw *hw, u32 reg_addr, u64 *reg_val, struct i40e_asq_cmd_details *cmd_details) { struct i40e_aq_desc desc; struct i40e_aqc_debug_reg_read_write *cmd_resp = (struct i40e_aqc_debug_reg_read_write *)&desc.params.raw; int status; if (reg_val == NULL) return -EINVAL; i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_debug_read_reg); cmd_resp->address = cpu_to_le32(reg_addr); status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); if (!status) { *reg_val = ((u64)le32_to_cpu(cmd_resp->value_high) << 32) | (u64)le32_to_cpu(cmd_resp->value_low); } return status; } /** * i40e_aq_debug_write_register * @hw: pointer to the hw struct * @reg_addr: register address * @reg_val: register value * @cmd_details: pointer to command details structure or NULL * * Write to a register using the admin queue commands **/ int i40e_aq_debug_write_register(struct i40e_hw *hw, u32 reg_addr, u64 reg_val, struct i40e_asq_cmd_details *cmd_details) { struct i40e_aq_desc desc; struct i40e_aqc_debug_reg_read_write *cmd = (struct i40e_aqc_debug_reg_read_write *)&desc.params.raw; int status; i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_debug_write_reg); cmd->address = cpu_to_le32(reg_addr); cmd->value_high = cpu_to_le32((u32)(reg_val >> 32)); cmd->value_low = cpu_to_le32((u32)(reg_val & 0xFFFFFFFF)); status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); return status; } /** * i40e_aq_request_resource * @hw: pointer to the hw struct * @resource: resource id * @access: access type * @sdp_number: resource number * @timeout: the maximum time in ms that the driver may hold the resource * @cmd_details: pointer to command details structure or NULL * * requests common resource using the admin queue commands **/ int i40e_aq_request_resource(struct i40e_hw *hw, enum i40e_aq_resources_ids resource, enum i40e_aq_resource_access_type access, u8 sdp_number, u64 *timeout, struct i40e_asq_cmd_details *cmd_details) { struct i40e_aq_desc desc; struct i40e_aqc_request_resource *cmd_resp = (struct i40e_aqc_request_resource *)&desc.params.raw; int status; i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_request_resource); cmd_resp->resource_id = cpu_to_le16(resource); cmd_resp->access_type = cpu_to_le16(access); cmd_resp->resource_number = cpu_to_le32(sdp_number); status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); /* The completion specifies the maximum time in ms that the driver * may hold the resource in the Timeout field. * If the resource is held by someone else, the command completes with * busy return value and the timeout field indicates the maximum time * the current owner of the resource has to free it. */ if (!status || hw->aq.asq_last_status == I40E_AQ_RC_EBUSY) *timeout = le32_to_cpu(cmd_resp->timeout); return status; } /** * i40e_aq_release_resource * @hw: pointer to the hw struct * @resource: resource id * @sdp_number: resource number * @cmd_details: pointer to command details structure or NULL * * release common resource using the admin queue commands **/ int i40e_aq_release_resource(struct i40e_hw *hw, enum i40e_aq_resources_ids resource, u8 sdp_number, struct i40e_asq_cmd_details *cmd_details) { struct i40e_aq_desc desc; struct i40e_aqc_request_resource *cmd = (struct i40e_aqc_request_resource *)&desc.params.raw; int status; i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_release_resource); cmd->resource_id = cpu_to_le16(resource); cmd->resource_number = cpu_to_le32(sdp_number); status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); return status; } /** * i40e_aq_read_nvm * @hw: pointer to the hw struct * @module_pointer: module pointer location in words from the NVM beginning * @offset: byte offset from the module beginning * @length: length of the section to be read (in bytes from the offset) * @data: command buffer (size [bytes] = length) * @last_command: tells if this is the last command in a series * @cmd_details: pointer to command details structure or NULL * * Read the NVM using the admin queue commands **/ int i40e_aq_read_nvm(struct i40e_hw *hw, u8 module_pointer, u32 offset, u16 length, void *data, bool last_command, struct i40e_asq_cmd_details *cmd_details) { struct i40e_aq_desc desc; struct i40e_aqc_nvm_update *cmd = (struct i40e_aqc_nvm_update *)&desc.params.raw; int status; /* In offset the highest byte must be zeroed. */ if (offset & 0xFF000000) { status = -EINVAL; goto i40e_aq_read_nvm_exit; } i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_nvm_read); /* If this is the last command in a series, set the proper flag. */ if (last_command) cmd->command_flags |= I40E_AQ_NVM_LAST_CMD; cmd->module_pointer = module_pointer; cmd->offset = cpu_to_le32(offset); cmd->length = cpu_to_le16(length); desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF); if (length > I40E_AQ_LARGE_BUF) desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB); status = i40e_asq_send_command(hw, &desc, data, length, cmd_details); i40e_aq_read_nvm_exit: return status; } /** * i40e_aq_erase_nvm * @hw: pointer to the hw struct * @module_pointer: module pointer location in words from the NVM beginning * @offset: offset in the module (expressed in 4 KB from module's beginning) * @length: length of the section to be erased (expressed in 4 KB) * @last_command: tells if this is the last command in a series * @cmd_details: pointer to command details structure or NULL * * Erase the NVM sector using the admin queue commands **/ int i40e_aq_erase_nvm(struct i40e_hw *hw, u8 module_pointer, u32 offset, u16 length, bool last_command, struct i40e_asq_cmd_details *cmd_details) { struct i40e_aq_desc desc; struct i40e_aqc_nvm_update *cmd = (struct i40e_aqc_nvm_update *)&desc.params.raw; int status; /* In offset the highest byte must be zeroed. */ if (offset & 0xFF000000) { status = -EINVAL; goto i40e_aq_erase_nvm_exit; } i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_nvm_erase); /* If this is the last command in a series, set the proper flag. */ if (last_command) cmd->command_flags |= I40E_AQ_NVM_LAST_CMD; cmd->module_pointer = module_pointer; cmd->offset = cpu_to_le32(offset); cmd->length = cpu_to_le16(length); status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); i40e_aq_erase_nvm_exit: return status; } /** * i40e_parse_discover_capabilities * @hw: pointer to the hw struct * @buff: pointer to a buffer containing device/function capability records * @cap_count: number of capability records in the list * @list_type_opc: type of capabilities list to parse * * Parse the device/function capabilities list. **/ static void i40e_parse_discover_capabilities(struct i40e_hw *hw, void *buff, u32 cap_count, enum i40e_admin_queue_opc list_type_opc) { struct i40e_aqc_list_capabilities_element_resp *cap; u32 valid_functions, num_functions; u32 number, logical_id, phys_id; struct i40e_hw_capabilities *p; u16 id, ocp_cfg_word0; u8 major_rev; int status; u32 i = 0; cap = (struct i40e_aqc_list_capabilities_element_resp *) buff; if (list_type_opc == i40e_aqc_opc_list_dev_capabilities) p = &hw->dev_caps; else if (list_type_opc == i40e_aqc_opc_list_func_capabilities) p = &hw->func_caps; else return; for (i = 0; i < cap_count; i++, cap++) { id = le16_to_cpu(cap->id); number = le32_to_cpu(cap->number); logical_id = le32_to_cpu(cap->logical_id); phys_id = le32_to_cpu(cap->phys_id); major_rev = cap->major_rev; switch (id) { case I40E_AQ_CAP_ID_SWITCH_MODE: p->switch_mode = number; break; case I40E_AQ_CAP_ID_MNG_MODE: p->management_mode = number; if (major_rev > 1) { p->mng_protocols_over_mctp = logical_id; i40e_debug(hw, I40E_DEBUG_INIT, "HW Capability: Protocols over MCTP = %d\n", p->mng_protocols_over_mctp); } else { p->mng_protocols_over_mctp = 0; } break; case I40E_AQ_CAP_ID_NPAR_ACTIVE: p->npar_enable = number; break; case I40E_AQ_CAP_ID_OS2BMC_CAP: p->os2bmc = number; break; case I40E_AQ_CAP_ID_FUNCTIONS_VALID: p->valid_functions = number; break; case I40E_AQ_CAP_ID_SRIOV: if (number == 1) p->sr_iov_1_1 = true; break; case I40E_AQ_CAP_ID_VF: p->num_vfs = number; p->vf_base_id = logical_id; break; case I40E_AQ_CAP_ID_VMDQ: if (number == 1) p->vmdq = true; break; case I40E_AQ_CAP_ID_8021QBG: if (number == 1) p->evb_802_1_qbg = true; break; case I40E_AQ_CAP_ID_8021QBR: if (number == 1) p->evb_802_1_qbh = true; break; case I40E_AQ_CAP_ID_VSI: p->num_vsis = number; break; case I40E_AQ_CAP_ID_DCB: if (number == 1) { p->dcb = true; p->enabled_tcmap = logical_id; p->maxtc = phys_id; } break; case I40E_AQ_CAP_ID_FCOE: if (number == 1) p->fcoe = true; break; case I40E_AQ_CAP_ID_ISCSI: if (number == 1) p->iscsi = true; break; case I40E_AQ_CAP_ID_RSS: p->rss = true; p->rss_table_size = number; p->rss_table_entry_width = logical_id; break; case I40E_AQ_CAP_ID_RXQ: p->num_rx_qp = number; p->base_queue = phys_id; break; case I40E_AQ_CAP_ID_TXQ: p->num_tx_qp = number; p->base_queue = phys_id; break; case I40E_AQ_CAP_ID_MSIX: p->num_msix_vectors = number; i40e_debug(hw, I40E_DEBUG_INIT, "HW Capability: MSIX vector count = %d\n", p->num_msix_vectors); break; case I40E_AQ_CAP_ID_VF_MSIX: p->num_msix_vectors_vf = number; break; case I40E_AQ_CAP_ID_FLEX10: if (major_rev == 1) { if (number == 1) { p->flex10_enable = true; p->flex10_capable = true; } } else { /* Capability revision >= 2 */ if (number & 1) p->flex10_enable = true; if (number & 2) p->flex10_capable = true; } p->flex10_mode = logical_id; p->flex10_status = phys_id; break; case I40E_AQ_CAP_ID_CEM: if (number == 1) p->mgmt_cem = true; break; case I40E_AQ_CAP_ID_IWARP: if (number == 1) p->iwarp = true; break; case I40E_AQ_CAP_ID_LED: if (phys_id < I40E_HW_CAP_MAX_GPIO) p->led[phys_id] = true; break; case I40E_AQ_CAP_ID_SDP: if (phys_id < I40E_HW_CAP_MAX_GPIO) p->sdp[phys_id] = true; break; case I40E_AQ_CAP_ID_MDIO: if (number == 1) { p->mdio_port_num = phys_id; p->mdio_port_mode = logical_id; } break; case I40E_AQ_CAP_ID_1588: if (number == 1) p->ieee_1588 = true; break; case I40E_AQ_CAP_ID_FLOW_DIRECTOR: p->fd = true; p->fd_filters_guaranteed = number; p->fd_filters_best_effort = logical_id; break; case I40E_AQ_CAP_ID_WSR_PROT: p->wr_csr_prot = (u64)number; p->wr_csr_prot |= (u64)logical_id << 32; break; case I40E_AQ_CAP_ID_NVM_MGMT: if (number & I40E_NVM_MGMT_SEC_REV_DISABLED) p->sec_rev_disabled = true; if (number & I40E_NVM_MGMT_UPDATE_DISABLED) p->update_disabled = true; break; default: break; } } if (p->fcoe) i40e_debug(hw, I40E_DEBUG_ALL, "device is FCoE capable\n"); /* Software override ensuring FCoE is disabled if npar or mfp * mode because it is not supported in these modes. */ if (p->npar_enable || p->flex10_enable) p->fcoe = false; /* count the enabled ports (aka the "not disabled" ports) */ hw->num_ports = 0; for (i = 0; i < 4; i++) { u32 port_cfg_reg = I40E_PRTGEN_CNF + (4 * i); u64 port_cfg = 0; /* use AQ read to get the physical register offset instead * of the port relative offset */ i40e_aq_debug_read_register(hw, port_cfg_reg, &port_cfg, NULL); if (!(port_cfg & I40E_PRTGEN_CNF_PORT_DIS_MASK)) hw->num_ports++; } /* OCP cards case: if a mezz is removed the Ethernet port is at * disabled state in PRTGEN_CNF register. Additional NVM read is * needed in order to check if we are dealing with OCP card. * Those cards have 4 PFs at minimum, so using PRTGEN_CNF for counting * physical ports results in wrong partition id calculation and thus * not supporting WoL. */ if (hw->mac.type == I40E_MAC_X722) { if (!i40e_acquire_nvm(hw, I40E_RESOURCE_READ)) { status = i40e_aq_read_nvm(hw, I40E_SR_EMP_MODULE_PTR, 2 * I40E_SR_OCP_CFG_WORD0, sizeof(ocp_cfg_word0), &ocp_cfg_word0, true, NULL); if (!status && (ocp_cfg_word0 & I40E_SR_OCP_ENABLED)) hw->num_ports = 4; i40e_release_nvm(hw); } } valid_functions = p->valid_functions; num_functions = 0; while (valid_functions) { if (valid_functions & 1) num_functions++; valid_functions >>= 1; } /* partition id is 1-based, and functions are evenly spread * across the ports as partitions */ if (hw->num_ports != 0) { hw->partition_id = (hw->pf_id / hw->num_ports) + 1; hw->num_partitions = num_functions / hw->num_ports; } /* additional HW specific goodies that might * someday be HW version specific */ p->rx_buf_chain_len = I40E_MAX_CHAINED_RX_BUFFERS; } /** * i40e_aq_discover_capabilities * @hw: pointer to the hw struct * @buff: a virtual buffer to hold the capabilities * @buff_size: Size of the virtual buffer * @data_size: Size of the returned data, or buff size needed if AQ err==ENOMEM * @list_type_opc: capabilities type to discover - pass in the command opcode * @cmd_details: pointer to command details structure or NULL * * Get the device capabilities descriptions from the firmware **/ int i40e_aq_discover_capabilities(struct i40e_hw *hw, void *buff, u16 buff_size, u16 *data_size, enum i40e_admin_queue_opc list_type_opc, struct i40e_asq_cmd_details *cmd_details) { struct i40e_aqc_list_capabilites *cmd; struct i40e_aq_desc desc; int status = 0; cmd = (struct i40e_aqc_list_capabilites *)&desc.params.raw; if (list_type_opc != i40e_aqc_opc_list_func_capabilities && list_type_opc != i40e_aqc_opc_list_dev_capabilities) { status = -EINVAL; goto exit; } i40e_fill_default_direct_cmd_desc(&desc, list_type_opc); desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF); if (buff_size > I40E_AQ_LARGE_BUF) desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB); status = i40e_asq_send_command(hw, &desc, buff, buff_size, cmd_details); *data_size = le16_to_cpu(desc.datalen); if (status) goto exit; i40e_parse_discover_capabilities(hw, buff, le32_to_cpu(cmd->count), list_type_opc); exit: return status; } /** * i40e_aq_update_nvm * @hw: pointer to the hw struct * @module_pointer: module pointer location in words from the NVM beginning * @offset: byte offset from the module beginning * @length: length of the section to be written (in bytes from the offset) * @data: command buffer (size [bytes] = length) * @last_command: tells if this is the last command in a series * @preservation_flags: Preservation mode flags * @cmd_details: pointer to command details structure or NULL * * Update the NVM using the admin queue commands **/ int i40e_aq_update_nvm(struct i40e_hw *hw, u8 module_pointer, u32 offset, u16 length, void *data, bool last_command, u8 preservation_flags, struct i40e_asq_cmd_details *cmd_details) { struct i40e_aq_desc desc; struct i40e_aqc_nvm_update *cmd = (struct i40e_aqc_nvm_update *)&desc.params.raw; int status; /* In offset the highest byte must be zeroed. */ if (offset & 0xFF000000) { status = -EINVAL; goto i40e_aq_update_nvm_exit; } i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_nvm_update); /* If this is the last command in a series, set the proper flag. */ if (last_command) cmd->command_flags |= I40E_AQ_NVM_LAST_CMD; if (hw->mac.type == I40E_MAC_X722) { if (preservation_flags == I40E_NVM_PRESERVATION_FLAGS_SELECTED) cmd->command_flags |= (I40E_AQ_NVM_PRESERVATION_FLAGS_SELECTED << I40E_AQ_NVM_PRESERVATION_FLAGS_SHIFT); else if (preservation_flags == I40E_NVM_PRESERVATION_FLAGS_ALL) cmd->command_flags |= (I40E_AQ_NVM_PRESERVATION_FLAGS_ALL << I40E_AQ_NVM_PRESERVATION_FLAGS_SHIFT); } cmd->module_pointer = module_pointer; cmd->offset = cpu_to_le32(offset); cmd->length = cpu_to_le16(length); desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD)); if (length > I40E_AQ_LARGE_BUF) desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB); status = i40e_asq_send_command(hw, &desc, data, length, cmd_details); i40e_aq_update_nvm_exit: return status; } /** * i40e_aq_rearrange_nvm * @hw: pointer to the hw struct * @rearrange_nvm: defines direction of rearrangement * @cmd_details: pointer to command details structure or NULL * * Rearrange NVM structure, available only for transition FW **/ int i40e_aq_rearrange_nvm(struct i40e_hw *hw, u8 rearrange_nvm, struct i40e_asq_cmd_details *cmd_details) { struct i40e_aqc_nvm_update *cmd; struct i40e_aq_desc desc; int status; cmd = (struct i40e_aqc_nvm_update *)&desc.params.raw; i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_nvm_update); rearrange_nvm &= (I40E_AQ_NVM_REARRANGE_TO_FLAT | I40E_AQ_NVM_REARRANGE_TO_STRUCT); if (!rearrange_nvm) { status = -EINVAL; goto i40e_aq_rearrange_nvm_exit; } cmd->command_flags |= rearrange_nvm; status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); i40e_aq_rearrange_nvm_exit: return status; } /** * i40e_aq_get_lldp_mib * @hw: pointer to the hw struct * @bridge_type: type of bridge requested * @mib_type: Local, Remote or both Local and Remote MIBs * @buff: pointer to a user supplied buffer to store the MIB block * @buff_size: size of the buffer (in bytes) * @local_len : length of the returned Local LLDP MIB * @remote_len: length of the returned Remote LLDP MIB * @cmd_details: pointer to command details structure or NULL * * Requests the complete LLDP MIB (entire packet). **/ int i40e_aq_get_lldp_mib(struct i40e_hw *hw, u8 bridge_type, u8 mib_type, void *buff, u16 buff_size, u16 *local_len, u16 *remote_len, struct i40e_asq_cmd_details *cmd_details) { struct i40e_aq_desc desc; struct i40e_aqc_lldp_get_mib *cmd = (struct i40e_aqc_lldp_get_mib *)&desc.params.raw; struct i40e_aqc_lldp_get_mib *resp = (struct i40e_aqc_lldp_get_mib *)&desc.params.raw; int status; if (buff_size == 0 || !buff) return -EINVAL; i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_lldp_get_mib); /* Indirect Command */ desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF); cmd->type = mib_type & I40E_AQ_LLDP_MIB_TYPE_MASK; cmd->type |= ((bridge_type << I40E_AQ_LLDP_BRIDGE_TYPE_SHIFT) & I40E_AQ_LLDP_BRIDGE_TYPE_MASK); desc.datalen = cpu_to_le16(buff_size); desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF); if (buff_size > I40E_AQ_LARGE_BUF) desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB); status = i40e_asq_send_command(hw, &desc, buff, buff_size, cmd_details); if (!status) { if (local_len != NULL) *local_len = le16_to_cpu(resp->local_len); if (remote_len != NULL) *remote_len = le16_to_cpu(resp->remote_len); } return status; } /** * i40e_aq_set_lldp_mib - Set the LLDP MIB * @hw: pointer to the hw struct * @mib_type: Local, Remote or both Local and Remote MIBs * @buff: pointer to a user supplied buffer to store the MIB block * @buff_size: size of the buffer (in bytes) * @cmd_details: pointer to command details structure or NULL * * Set the LLDP MIB. **/ int i40e_aq_set_lldp_mib(struct i40e_hw *hw, u8 mib_type, void *buff, u16 buff_size, struct i40e_asq_cmd_details *cmd_details) { struct i40e_aqc_lldp_set_local_mib *cmd; struct i40e_aq_desc desc; int status; cmd = (struct i40e_aqc_lldp_set_local_mib *)&desc.params.raw; if (buff_size == 0 || !buff) return -EINVAL; i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_lldp_set_local_mib); /* Indirect Command */ desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD)); if (buff_size > I40E_AQ_LARGE_BUF) desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB); desc.datalen = cpu_to_le16(buff_size); cmd->type = mib_type; cmd->length = cpu_to_le16(buff_size); cmd->address_high = cpu_to_le32(upper_32_bits((uintptr_t)buff)); cmd->address_low = cpu_to_le32(lower_32_bits((uintptr_t)buff)); status = i40e_asq_send_command(hw, &desc, buff, buff_size, cmd_details); return status; } /** * i40e_aq_cfg_lldp_mib_change_event * @hw: pointer to the hw struct * @enable_update: Enable or Disable event posting * @cmd_details: pointer to command details structure or NULL * * Enable or Disable posting of an event on ARQ when LLDP MIB * associated with the interface changes **/ int i40e_aq_cfg_lldp_mib_change_event(struct i40e_hw *hw, bool enable_update, struct i40e_asq_cmd_details *cmd_details) { struct i40e_aq_desc desc; struct i40e_aqc_lldp_update_mib *cmd = (struct i40e_aqc_lldp_update_mib *)&desc.params.raw; int status; i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_lldp_update_mib); if (!enable_update) cmd->command |= I40E_AQ_LLDP_MIB_UPDATE_DISABLE; status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); return status; } /** * i40e_aq_restore_lldp * @hw: pointer to the hw struct * @setting: pointer to factory setting variable or NULL * @restore: True if factory settings should be restored * @cmd_details: pointer to command details structure or NULL * * Restore LLDP Agent factory settings if @restore set to True. In other case * only returns factory setting in AQ response. **/ int i40e_aq_restore_lldp(struct i40e_hw *hw, u8 *setting, bool restore, struct i40e_asq_cmd_details *cmd_details) { struct i40e_aq_desc desc; struct i40e_aqc_lldp_restore *cmd = (struct i40e_aqc_lldp_restore *)&desc.params.raw; int status; if (!(hw->flags & I40E_HW_FLAG_FW_LLDP_PERSISTENT)) { i40e_debug(hw, I40E_DEBUG_ALL, "Restore LLDP not supported by current FW version.\n"); return -ENODEV; } i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_lldp_restore); if (restore) cmd->command |= I40E_AQ_LLDP_AGENT_RESTORE; status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); if (setting) *setting = cmd->command & 1; return status; } /** * i40e_aq_stop_lldp * @hw: pointer to the hw struct * @shutdown_agent: True if LLDP Agent needs to be Shutdown * @persist: True if stop of LLDP should be persistent across power cycles * @cmd_details: pointer to command details structure or NULL * * Stop or Shutdown the embedded LLDP Agent **/ int i40e_aq_stop_lldp(struct i40e_hw *hw, bool shutdown_agent, bool persist, struct i40e_asq_cmd_details *cmd_details) { struct i40e_aq_desc desc; struct i40e_aqc_lldp_stop *cmd = (struct i40e_aqc_lldp_stop *)&desc.params.raw; int status; i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_lldp_stop); if (shutdown_agent) cmd->command |= I40E_AQ_LLDP_AGENT_SHUTDOWN; if (persist) { if (hw->flags & I40E_HW_FLAG_FW_LLDP_PERSISTENT) cmd->command |= I40E_AQ_LLDP_AGENT_STOP_PERSIST; else i40e_debug(hw, I40E_DEBUG_ALL, "Persistent Stop LLDP not supported by current FW version.\n"); } status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); return status; } /** * i40e_aq_start_lldp * @hw: pointer to the hw struct * @persist: True if start of LLDP should be persistent across power cycles * @cmd_details: pointer to command details structure or NULL * * Start the embedded LLDP Agent on all ports. **/ int i40e_aq_start_lldp(struct i40e_hw *hw, bool persist, struct i40e_asq_cmd_details *cmd_details) { struct i40e_aq_desc desc; struct i40e_aqc_lldp_start *cmd = (struct i40e_aqc_lldp_start *)&desc.params.raw; int status; i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_lldp_start); cmd->command = I40E_AQ_LLDP_AGENT_START; if (persist) { if (hw->flags & I40E_HW_FLAG_FW_LLDP_PERSISTENT) cmd->command |= I40E_AQ_LLDP_AGENT_START_PERSIST; else i40e_debug(hw, I40E_DEBUG_ALL, "Persistent Start LLDP not supported by current FW version.\n"); } status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); return status; } /** * i40e_aq_set_dcb_parameters * @hw: pointer to the hw struct * @cmd_details: pointer to command details structure or NULL * @dcb_enable: True if DCB configuration needs to be applied * **/ int i40e_aq_set_dcb_parameters(struct i40e_hw *hw, bool dcb_enable, struct i40e_asq_cmd_details *cmd_details) { struct i40e_aq_desc desc; struct i40e_aqc_set_dcb_parameters *cmd = (struct i40e_aqc_set_dcb_parameters *)&desc.params.raw; int status; if (!(hw->flags & I40E_HW_FLAG_FW_LLDP_STOPPABLE)) return -ENODEV; i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_set_dcb_parameters); if (dcb_enable) { cmd->valid_flags = I40E_DCB_VALID; cmd->command = I40E_AQ_DCB_SET_AGENT; } status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); return status; } /** * i40e_aq_get_cee_dcb_config * @hw: pointer to the hw struct * @buff: response buffer that stores CEE operational configuration * @buff_size: size of the buffer passed * @cmd_details: pointer to command details structure or NULL * * Get CEE DCBX mode operational configuration from firmware **/ int i40e_aq_get_cee_dcb_config(struct i40e_hw *hw, void *buff, u16 buff_size, struct i40e_asq_cmd_details *cmd_details) { struct i40e_aq_desc desc; int status; if (buff_size == 0 || !buff) return -EINVAL; i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_get_cee_dcb_cfg); desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF); status = i40e_asq_send_command(hw, &desc, (void *)buff, buff_size, cmd_details); return status; } /** * i40e_aq_add_udp_tunnel * @hw: pointer to the hw struct * @udp_port: the UDP port to add in Host byte order * @protocol_index: protocol index type * @filter_index: pointer to filter index * @cmd_details: pointer to command details structure or NULL * * Note: Firmware expects the udp_port value to be in Little Endian format, * and this function will call cpu_to_le16 to convert from Host byte order to * Little Endian order. **/ int i40e_aq_add_udp_tunnel(struct i40e_hw *hw, u16 udp_port, u8 protocol_index, u8 *filter_index, struct i40e_asq_cmd_details *cmd_details) { struct i40e_aq_desc desc; struct i40e_aqc_add_udp_tunnel *cmd = (struct i40e_aqc_add_udp_tunnel *)&desc.params.raw; struct i40e_aqc_del_udp_tunnel_completion *resp = (struct i40e_aqc_del_udp_tunnel_completion *)&desc.params.raw; int status; i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_add_udp_tunnel); cmd->udp_port = cpu_to_le16(udp_port); cmd->protocol_type = protocol_index; status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); if (!status && filter_index) *filter_index = resp->index; return status; } /** * i40e_aq_del_udp_tunnel * @hw: pointer to the hw struct * @index: filter index * @cmd_details: pointer to command details structure or NULL **/ int i40e_aq_del_udp_tunnel(struct i40e_hw *hw, u8 index, struct i40e_asq_cmd_details *cmd_details) { struct i40e_aq_desc desc; struct i40e_aqc_remove_udp_tunnel *cmd = (struct i40e_aqc_remove_udp_tunnel *)&desc.params.raw; int status; i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_del_udp_tunnel); cmd->index = index; status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); return status; } /** * i40e_aq_delete_element - Delete switch element * @hw: pointer to the hw struct * @seid: the SEID to delete from the switch * @cmd_details: pointer to command details structure or NULL * * This deletes a switch element from the switch. **/ int i40e_aq_delete_element(struct i40e_hw *hw, u16 seid, struct i40e_asq_cmd_details *cmd_details) { struct i40e_aq_desc desc; struct i40e_aqc_switch_seid *cmd = (struct i40e_aqc_switch_seid *)&desc.params.raw; int status; if (seid == 0) return -EINVAL; i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_delete_element); cmd->seid = cpu_to_le16(seid); status = i40e_asq_send_command_atomic(hw, &desc, NULL, 0, cmd_details, true); return status; } /** * i40e_aq_dcb_updated - DCB Updated Command * @hw: pointer to the hw struct * @cmd_details: pointer to command details structure or NULL * * EMP will return when the shared RPB settings have been * recomputed and modified. The retval field in the descriptor * will be set to 0 when RPB is modified. **/ int i40e_aq_dcb_updated(struct i40e_hw *hw, struct i40e_asq_cmd_details *cmd_details) { struct i40e_aq_desc desc; int status; i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_dcb_updated); status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); return status; } /** * i40e_aq_tx_sched_cmd - generic Tx scheduler AQ command handler * @hw: pointer to the hw struct * @seid: seid for the physical port/switching component/vsi * @buff: Indirect buffer to hold data parameters and response * @buff_size: Indirect buffer size * @opcode: Tx scheduler AQ command opcode * @cmd_details: pointer to command details structure or NULL * * Generic command handler for Tx scheduler AQ commands **/ static int i40e_aq_tx_sched_cmd(struct i40e_hw *hw, u16 seid, void *buff, u16 buff_size, enum i40e_admin_queue_opc opcode, struct i40e_asq_cmd_details *cmd_details) { struct i40e_aq_desc desc; struct i40e_aqc_tx_sched_ind *cmd = (struct i40e_aqc_tx_sched_ind *)&desc.params.raw; int status; bool cmd_param_flag = false; switch (opcode) { case i40e_aqc_opc_configure_vsi_ets_sla_bw_limit: case i40e_aqc_opc_configure_vsi_tc_bw: case i40e_aqc_opc_enable_switching_comp_ets: case i40e_aqc_opc_modify_switching_comp_ets: case i40e_aqc_opc_disable_switching_comp_ets: case i40e_aqc_opc_configure_switching_comp_ets_bw_limit: case i40e_aqc_opc_configure_switching_comp_bw_config: cmd_param_flag = true; break; case i40e_aqc_opc_query_vsi_bw_config: case i40e_aqc_opc_query_vsi_ets_sla_config: case i40e_aqc_opc_query_switching_comp_ets_config: case i40e_aqc_opc_query_port_ets_config: case i40e_aqc_opc_query_switching_comp_bw_config: cmd_param_flag = false; break; default: return -EINVAL; } i40e_fill_default_direct_cmd_desc(&desc, opcode); /* Indirect command */ desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF); if (cmd_param_flag) desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_RD); if (buff_size > I40E_AQ_LARGE_BUF) desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB); desc.datalen = cpu_to_le16(buff_size); cmd->vsi_seid = cpu_to_le16(seid); status = i40e_asq_send_command(hw, &desc, buff, buff_size, cmd_details); return status; } /** * i40e_aq_config_vsi_bw_limit - Configure VSI BW Limit * @hw: pointer to the hw struct * @seid: VSI seid * @credit: BW limit credits (0 = disabled) * @max_credit: Max BW limit credits * @cmd_details: pointer to command details structure or NULL **/ int i40e_aq_config_vsi_bw_limit(struct i40e_hw *hw, u16 seid, u16 credit, u8 max_credit, struct i40e_asq_cmd_details *cmd_details) { struct i40e_aq_desc desc; struct i40e_aqc_configure_vsi_bw_limit *cmd = (struct i40e_aqc_configure_vsi_bw_limit *)&desc.params.raw; int status; i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_configure_vsi_bw_limit); cmd->vsi_seid = cpu_to_le16(seid); cmd->credit = cpu_to_le16(credit); cmd->max_credit = max_credit; status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); return status; } /** * i40e_aq_config_vsi_tc_bw - Config VSI BW Allocation per TC * @hw: pointer to the hw struct * @seid: VSI seid * @bw_data: Buffer holding enabled TCs, relative TC BW limit/credits * @cmd_details: pointer to command details structure or NULL **/ int i40e_aq_config_vsi_tc_bw(struct i40e_hw *hw, u16 seid, struct i40e_aqc_configure_vsi_tc_bw_data *bw_data, struct i40e_asq_cmd_details *cmd_details) { return i40e_aq_tx_sched_cmd(hw, seid, (void *)bw_data, sizeof(*bw_data), i40e_aqc_opc_configure_vsi_tc_bw, cmd_details); } /** * i40e_aq_config_switch_comp_ets - Enable/Disable/Modify ETS on the port * @hw: pointer to the hw struct * @seid: seid of the switching component connected to Physical Port * @ets_data: Buffer holding ETS parameters * @opcode: Tx scheduler AQ command opcode * @cmd_details: pointer to command details structure or NULL **/ int i40e_aq_config_switch_comp_ets(struct i40e_hw *hw, u16 seid, struct i40e_aqc_configure_switching_comp_ets_data *ets_data, enum i40e_admin_queue_opc opcode, struct i40e_asq_cmd_details *cmd_details) { return i40e_aq_tx_sched_cmd(hw, seid, (void *)ets_data, sizeof(*ets_data), opcode, cmd_details); } /** * i40e_aq_config_switch_comp_bw_config - Config Switch comp BW Alloc per TC * @hw: pointer to the hw struct * @seid: seid of the switching component * @bw_data: Buffer holding enabled TCs, relative/absolute TC BW limit/credits * @cmd_details: pointer to command details structure or NULL **/ int i40e_aq_config_switch_comp_bw_config(struct i40e_hw *hw, u16 seid, struct i40e_aqc_configure_switching_comp_bw_config_data *bw_data, struct i40e_asq_cmd_details *cmd_details) { return i40e_aq_tx_sched_cmd(hw, seid, (void *)bw_data, sizeof(*bw_data), i40e_aqc_opc_configure_switching_comp_bw_config, cmd_details); } /** * i40e_aq_query_vsi_bw_config - Query VSI BW configuration * @hw: pointer to the hw struct * @seid: seid of the VSI * @bw_data: Buffer to hold VSI BW configuration * @cmd_details: pointer to command details structure or NULL **/ int i40e_aq_query_vsi_bw_config(struct i40e_hw *hw, u16 seid, struct i40e_aqc_query_vsi_bw_config_resp *bw_data, struct i40e_asq_cmd_details *cmd_details) { return i40e_aq_tx_sched_cmd(hw, seid, (void *)bw_data, sizeof(*bw_data), i40e_aqc_opc_query_vsi_bw_config, cmd_details); } /** * i40e_aq_query_vsi_ets_sla_config - Query VSI BW configuration per TC * @hw: pointer to the hw struct * @seid: seid of the VSI * @bw_data: Buffer to hold VSI BW configuration per TC * @cmd_details: pointer to command details structure or NULL **/ int i40e_aq_query_vsi_ets_sla_config(struct i40e_hw *hw, u16 seid, struct i40e_aqc_query_vsi_ets_sla_config_resp *bw_data, struct i40e_asq_cmd_details *cmd_details) { return i40e_aq_tx_sched_cmd(hw, seid, (void *)bw_data, sizeof(*bw_data), i40e_aqc_opc_query_vsi_ets_sla_config, cmd_details); } /** * i40e_aq_query_switch_comp_ets_config - Query Switch comp BW config per TC * @hw: pointer to the hw struct * @seid: seid of the switching component * @bw_data: Buffer to hold switching component's per TC BW config * @cmd_details: pointer to command details structure or NULL **/ int i40e_aq_query_switch_comp_ets_config(struct i40e_hw *hw, u16 seid, struct i40e_aqc_query_switching_comp_ets_config_resp *bw_data, struct i40e_asq_cmd_details *cmd_details) { return i40e_aq_tx_sched_cmd(hw, seid, (void *)bw_data, sizeof(*bw_data), i40e_aqc_opc_query_switching_comp_ets_config, cmd_details); } /** * i40e_aq_query_port_ets_config - Query Physical Port ETS configuration * @hw: pointer to the hw struct * @seid: seid of the VSI or switching component connected to Physical Port * @bw_data: Buffer to hold current ETS configuration for the Physical Port * @cmd_details: pointer to command details structure or NULL **/ int i40e_aq_query_port_ets_config(struct i40e_hw *hw, u16 seid, struct i40e_aqc_query_port_ets_config_resp *bw_data, struct i40e_asq_cmd_details *cmd_details) { return i40e_aq_tx_sched_cmd(hw, seid, (void *)bw_data, sizeof(*bw_data), i40e_aqc_opc_query_port_ets_config, cmd_details); } /** * i40e_aq_query_switch_comp_bw_config - Query Switch comp BW configuration * @hw: pointer to the hw struct * @seid: seid of the switching component * @bw_data: Buffer to hold switching component's BW configuration * @cmd_details: pointer to command details structure or NULL **/ int i40e_aq_query_switch_comp_bw_config(struct i40e_hw *hw, u16 seid, struct i40e_aqc_query_switching_comp_bw_config_resp *bw_data, struct i40e_asq_cmd_details *cmd_details) { return i40e_aq_tx_sched_cmd(hw, seid, (void *)bw_data, sizeof(*bw_data), i40e_aqc_opc_query_switching_comp_bw_config, cmd_details); } /** * i40e_validate_filter_settings * @hw: pointer to the hardware structure * @settings: Filter control settings * * Check and validate the filter control settings passed. * The function checks for the valid filter/context sizes being * passed for FCoE and PE. * * Returns 0 if the values passed are valid and within * range else returns an error. **/ static int i40e_validate_filter_settings(struct i40e_hw *hw, struct i40e_filter_control_settings *settings) { u32 fcoe_cntx_size, fcoe_filt_size; u32 fcoe_fmax; u32 val; /* Validate FCoE settings passed */ switch (settings->fcoe_filt_num) { case I40E_HASH_FILTER_SIZE_1K: case I40E_HASH_FILTER_SIZE_2K: case I40E_HASH_FILTER_SIZE_4K: case I40E_HASH_FILTER_SIZE_8K: case I40E_HASH_FILTER_SIZE_16K: case I40E_HASH_FILTER_SIZE_32K: fcoe_filt_size = I40E_HASH_FILTER_BASE_SIZE; fcoe_filt_size <<= (u32)settings->fcoe_filt_num; break; default: return -EINVAL; } switch (settings->fcoe_cntx_num) { case I40E_DMA_CNTX_SIZE_512: case I40E_DMA_CNTX_SIZE_1K: case I40E_DMA_CNTX_SIZE_2K: case I40E_DMA_CNTX_SIZE_4K: fcoe_cntx_size = I40E_DMA_CNTX_BASE_SIZE; fcoe_cntx_size <<= (u32)settings->fcoe_cntx_num; break; default: return -EINVAL; } /* Validate PE settings passed */ switch (settings->pe_filt_num) { case I40E_HASH_FILTER_SIZE_1K: case I40E_HASH_FILTER_SIZE_2K: case I40E_HASH_FILTER_SIZE_4K: case I40E_HASH_FILTER_SIZE_8K: case I40E_HASH_FILTER_SIZE_16K: case I40E_HASH_FILTER_SIZE_32K: case I40E_HASH_FILTER_SIZE_64K: case I40E_HASH_FILTER_SIZE_128K: case I40E_HASH_FILTER_SIZE_256K: case I40E_HASH_FILTER_SIZE_512K: case I40E_HASH_FILTER_SIZE_1M: break; default: return -EINVAL; } switch (settings->pe_cntx_num) { case I40E_DMA_CNTX_SIZE_512: case I40E_DMA_CNTX_SIZE_1K: case I40E_DMA_CNTX_SIZE_2K: case I40E_DMA_CNTX_SIZE_4K: case I40E_DMA_CNTX_SIZE_8K: case I40E_DMA_CNTX_SIZE_16K: case I40E_DMA_CNTX_SIZE_32K: case I40E_DMA_CNTX_SIZE_64K: case I40E_DMA_CNTX_SIZE_128K: case I40E_DMA_CNTX_SIZE_256K: break; default: return -EINVAL; } /* FCHSIZE + FCDSIZE should not be greater than PMFCOEFMAX */ val = rd32(hw, I40E_GLHMC_FCOEFMAX); fcoe_fmax = (val & I40E_GLHMC_FCOEFMAX_PMFCOEFMAX_MASK) >> I40E_GLHMC_FCOEFMAX_PMFCOEFMAX_SHIFT; if (fcoe_filt_size + fcoe_cntx_size > fcoe_fmax) return -EINVAL; return 0; } /** * i40e_set_filter_control * @hw: pointer to the hardware structure * @settings: Filter control settings * * Set the Queue Filters for PE/FCoE and enable filters required * for a single PF. It is expected that these settings are programmed * at the driver initialization time. **/ int i40e_set_filter_control(struct i40e_hw *hw, struct i40e_filter_control_settings *settings) { u32 hash_lut_size = 0; int ret = 0; u32 val; if (!settings) return -EINVAL; /* Validate the input settings */ ret = i40e_validate_filter_settings(hw, settings); if (ret) return ret; /* Read the PF Queue Filter control register */ val = i40e_read_rx_ctl(hw, I40E_PFQF_CTL_0); /* Program required PE hash buckets for the PF */ val &= ~I40E_PFQF_CTL_0_PEHSIZE_MASK; val |= ((u32)settings->pe_filt_num << I40E_PFQF_CTL_0_PEHSIZE_SHIFT) & I40E_PFQF_CTL_0_PEHSIZE_MASK; /* Program required PE contexts for the PF */ val &= ~I40E_PFQF_CTL_0_PEDSIZE_MASK; val |= ((u32)settings->pe_cntx_num << I40E_PFQF_CTL_0_PEDSIZE_SHIFT) & I40E_PFQF_CTL_0_PEDSIZE_MASK; /* Program required FCoE hash buckets for the PF */ val &= ~I40E_PFQF_CTL_0_PFFCHSIZE_MASK; val |= ((u32)settings->fcoe_filt_num << I40E_PFQF_CTL_0_PFFCHSIZE_SHIFT) & I40E_PFQF_CTL_0_PFFCHSIZE_MASK; /* Program required FCoE DDP contexts for the PF */ val &= ~I40E_PFQF_CTL_0_PFFCDSIZE_MASK; val |= ((u32)settings->fcoe_cntx_num << I40E_PFQF_CTL_0_PFFCDSIZE_SHIFT) & I40E_PFQF_CTL_0_PFFCDSIZE_MASK; /* Program Hash LUT size for the PF */ val &= ~I40E_PFQF_CTL_0_HASHLUTSIZE_MASK; if (settings->hash_lut_size == I40E_HASH_LUT_SIZE_512) hash_lut_size = 1; val |= (hash_lut_size << I40E_PFQF_CTL_0_HASHLUTSIZE_SHIFT) & I40E_PFQF_CTL_0_HASHLUTSIZE_MASK; /* Enable FDIR, Ethertype and MACVLAN filters for PF and VFs */ if (settings->enable_fdir) val |= I40E_PFQF_CTL_0_FD_ENA_MASK; if (settings->enable_ethtype) val |= I40E_PFQF_CTL_0_ETYPE_ENA_MASK; if (settings->enable_macvlan) val |= I40E_PFQF_CTL_0_MACVLAN_ENA_MASK; i40e_write_rx_ctl(hw, I40E_PFQF_CTL_0, val); return 0; } /** * i40e_aq_add_rem_control_packet_filter - Add or Remove Control Packet Filter * @hw: pointer to the hw struct * @mac_addr: MAC address to use in the filter * @ethtype: Ethertype to use in the filter * @flags: Flags that needs to be applied to the filter * @vsi_seid: seid of the control VSI * @queue: VSI queue number to send the packet to * @is_add: Add control packet filter if True else remove * @stats: Structure to hold information on control filter counts * @cmd_details: pointer to command details structure or NULL * * This command will Add or Remove control packet filter for a control VSI. * In return it will update the total number of perfect filter count in * the stats member. **/ int i40e_aq_add_rem_control_packet_filter(struct i40e_hw *hw, u8 *mac_addr, u16 ethtype, u16 flags, u16 vsi_seid, u16 queue, bool is_add, struct i40e_control_filter_stats *stats, struct i40e_asq_cmd_details *cmd_details) { struct i40e_aq_desc desc; struct i40e_aqc_add_remove_control_packet_filter *cmd = (struct i40e_aqc_add_remove_control_packet_filter *) &desc.params.raw; struct i40e_aqc_add_remove_control_packet_filter_completion *resp = (struct i40e_aqc_add_remove_control_packet_filter_completion *) &desc.params.raw; int status; if (vsi_seid == 0) return -EINVAL; if (is_add) { i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_add_control_packet_filter); cmd->queue = cpu_to_le16(queue); } else { i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_remove_control_packet_filter); } if (mac_addr) ether_addr_copy(cmd->mac, mac_addr); cmd->etype = cpu_to_le16(ethtype); cmd->flags = cpu_to_le16(flags); cmd->seid = cpu_to_le16(vsi_seid); status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); if (!status && stats) { stats->mac_etype_used = le16_to_cpu(resp->mac_etype_used); stats->etype_used = le16_to_cpu(resp->etype_used); stats->mac_etype_free = le16_to_cpu(resp->mac_etype_free); stats->etype_free = le16_to_cpu(resp->etype_free); } return status; } /** * i40e_add_filter_to_drop_tx_flow_control_frames- filter to drop flow control * @hw: pointer to the hw struct * @seid: VSI seid to add ethertype filter from **/ void i40e_add_filter_to_drop_tx_flow_control_frames(struct i40e_hw *hw, u16 seid) { #define I40E_FLOW_CONTROL_ETHTYPE 0x8808 u16 flag = I40E_AQC_ADD_CONTROL_PACKET_FLAGS_IGNORE_MAC | I40E_AQC_ADD_CONTROL_PACKET_FLAGS_DROP | I40E_AQC_ADD_CONTROL_PACKET_FLAGS_TX; u16 ethtype = I40E_FLOW_CONTROL_ETHTYPE; int status; status = i40e_aq_add_rem_control_packet_filter(hw, NULL, ethtype, flag, seid, 0, true, NULL, NULL); if (status) hw_dbg(hw, "Ethtype Filter Add failed: Error pruning Tx flow control frames\n"); } /** * i40e_aq_alternate_read * @hw: pointer to the hardware structure * @reg_addr0: address of first dword to be read * @reg_val0: pointer for data read from 'reg_addr0' * @reg_addr1: address of second dword to be read * @reg_val1: pointer for data read from 'reg_addr1' * * Read one or two dwords from alternate structure. Fields are indicated * by 'reg_addr0' and 'reg_addr1' register numbers. If 'reg_val1' pointer * is not passed then only register at 'reg_addr0' is read. * **/ static int i40e_aq_alternate_read(struct i40e_hw *hw, u32 reg_addr0, u32 *reg_val0, u32 reg_addr1, u32 *reg_val1) { struct i40e_aq_desc desc; struct i40e_aqc_alternate_write *cmd_resp = (struct i40e_aqc_alternate_write *)&desc.params.raw; int status; if (!reg_val0) return -EINVAL; i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_alternate_read); cmd_resp->address0 = cpu_to_le32(reg_addr0); cmd_resp->address1 = cpu_to_le32(reg_addr1); status = i40e_asq_send_command(hw, &desc, NULL, 0, NULL); if (!status) { *reg_val0 = le32_to_cpu(cmd_resp->data0); if (reg_val1) *reg_val1 = le32_to_cpu(cmd_resp->data1); } return status; } /** * i40e_aq_suspend_port_tx * @hw: pointer to the hardware structure * @seid: port seid * @cmd_details: pointer to command details structure or NULL * * Suspend port's Tx traffic **/ int i40e_aq_suspend_port_tx(struct i40e_hw *hw, u16 seid, struct i40e_asq_cmd_details *cmd_details) { struct i40e_aqc_tx_sched_ind *cmd; struct i40e_aq_desc desc; int status; cmd = (struct i40e_aqc_tx_sched_ind *)&desc.params.raw; i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_suspend_port_tx); cmd->vsi_seid = cpu_to_le16(seid); status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); return status; } /** * i40e_aq_resume_port_tx * @hw: pointer to the hardware structure * @cmd_details: pointer to command details structure or NULL * * Resume port's Tx traffic **/ int i40e_aq_resume_port_tx(struct i40e_hw *hw, struct i40e_asq_cmd_details *cmd_details) { struct i40e_aq_desc desc; int status; i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_resume_port_tx); status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); return status; } /** * i40e_set_pci_config_data - store PCI bus info * @hw: pointer to hardware structure * @link_status: the link status word from PCI config space * * Stores the PCI bus info (speed, width, type) within the i40e_hw structure **/ void i40e_set_pci_config_data(struct i40e_hw *hw, u16 link_status) { hw->bus.type = i40e_bus_type_pci_express; switch (link_status & PCI_EXP_LNKSTA_NLW) { case PCI_EXP_LNKSTA_NLW_X1: hw->bus.width = i40e_bus_width_pcie_x1; break; case PCI_EXP_LNKSTA_NLW_X2: hw->bus.width = i40e_bus_width_pcie_x2; break; case PCI_EXP_LNKSTA_NLW_X4: hw->bus.width = i40e_bus_width_pcie_x4; break; case PCI_EXP_LNKSTA_NLW_X8: hw->bus.width = i40e_bus_width_pcie_x8; break; default: hw->bus.width = i40e_bus_width_unknown; break; } switch (link_status & PCI_EXP_LNKSTA_CLS) { case PCI_EXP_LNKSTA_CLS_2_5GB: hw->bus.speed = i40e_bus_speed_2500; break; case PCI_EXP_LNKSTA_CLS_5_0GB: hw->bus.speed = i40e_bus_speed_5000; break; case PCI_EXP_LNKSTA_CLS_8_0GB: hw->bus.speed = i40e_bus_speed_8000; break; default: hw->bus.speed = i40e_bus_speed_unknown; break; } } /** * i40e_aq_debug_dump * @hw: pointer to the hardware structure * @cluster_id: specific cluster to dump * @table_id: table id within cluster * @start_index: index of line in the block to read * @buff_size: dump buffer size * @buff: dump buffer * @ret_buff_size: actual buffer size returned * @ret_next_table: next block to read * @ret_next_index: next index to read * @cmd_details: pointer to command details structure or NULL * * Dump internal FW/HW data for debug purposes. * **/ int i40e_aq_debug_dump(struct i40e_hw *hw, u8 cluster_id, u8 table_id, u32 start_index, u16 buff_size, void *buff, u16 *ret_buff_size, u8 *ret_next_table, u32 *ret_next_index, struct i40e_asq_cmd_details *cmd_details) { struct i40e_aq_desc desc; struct i40e_aqc_debug_dump_internals *cmd = (struct i40e_aqc_debug_dump_internals *)&desc.params.raw; struct i40e_aqc_debug_dump_internals *resp = (struct i40e_aqc_debug_dump_internals *)&desc.params.raw; int status; if (buff_size == 0 || !buff) return -EINVAL; i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_debug_dump_internals); /* Indirect Command */ desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF); if (buff_size > I40E_AQ_LARGE_BUF) desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB); cmd->cluster_id = cluster_id; cmd->table_id = table_id; cmd->idx = cpu_to_le32(start_index); desc.datalen = cpu_to_le16(buff_size); status = i40e_asq_send_command(hw, &desc, buff, buff_size, cmd_details); if (!status) { if (ret_buff_size) *ret_buff_size = le16_to_cpu(desc.datalen); if (ret_next_table) *ret_next_table = resp->table_id; if (ret_next_index) *ret_next_index = le32_to_cpu(resp->idx); } return status; } /** * i40e_read_bw_from_alt_ram * @hw: pointer to the hardware structure * @max_bw: pointer for max_bw read * @min_bw: pointer for min_bw read * @min_valid: pointer for bool that is true if min_bw is a valid value * @max_valid: pointer for bool that is true if max_bw is a valid value * * Read bw from the alternate ram for the given pf **/ int i40e_read_bw_from_alt_ram(struct i40e_hw *hw, u32 *max_bw, u32 *min_bw, bool *min_valid, bool *max_valid) { u32 max_bw_addr, min_bw_addr; int status; /* Calculate the address of the min/max bw registers */ max_bw_addr = I40E_ALT_STRUCT_FIRST_PF_OFFSET + I40E_ALT_STRUCT_MAX_BW_OFFSET + (I40E_ALT_STRUCT_DWORDS_PER_PF * hw->pf_id); min_bw_addr = I40E_ALT_STRUCT_FIRST_PF_OFFSET + I40E_ALT_STRUCT_MIN_BW_OFFSET + (I40E_ALT_STRUCT_DWORDS_PER_PF * hw->pf_id); /* Read the bandwidths from alt ram */ status = i40e_aq_alternate_read(hw, max_bw_addr, max_bw, min_bw_addr, min_bw); if (*min_bw & I40E_ALT_BW_VALID_MASK) *min_valid = true; else *min_valid = false; if (*max_bw & I40E_ALT_BW_VALID_MASK) *max_valid = true; else *max_valid = false; return status; } /** * i40e_aq_configure_partition_bw * @hw: pointer to the hardware structure * @bw_data: Buffer holding valid pfs and bw limits * @cmd_details: pointer to command details * * Configure partitions guaranteed/max bw **/ int i40e_aq_configure_partition_bw(struct i40e_hw *hw, struct i40e_aqc_configure_partition_bw_data *bw_data, struct i40e_asq_cmd_details *cmd_details) { u16 bwd_size = sizeof(*bw_data); struct i40e_aq_desc desc; int status; i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_configure_partition_bw); /* Indirect command */ desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF); desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_RD); if (bwd_size > I40E_AQ_LARGE_BUF) desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB); desc.datalen = cpu_to_le16(bwd_size); status = i40e_asq_send_command(hw, &desc, bw_data, bwd_size, cmd_details); return status; } /** * i40e_read_phy_register_clause22 * @hw: pointer to the HW structure * @reg: register address in the page * @phy_addr: PHY address on MDIO interface * @value: PHY register value * * Reads specified PHY register value **/ int i40e_read_phy_register_clause22(struct i40e_hw *hw, u16 reg, u8 phy_addr, u16 *value) { u8 port_num = (u8)hw->func_caps.mdio_port_num; int status = -EIO; u32 command = 0; u16 retry = 1000; command = (reg << I40E_GLGEN_MSCA_DEVADD_SHIFT) | (phy_addr << I40E_GLGEN_MSCA_PHYADD_SHIFT) | (I40E_MDIO_CLAUSE22_OPCODE_READ_MASK) | (I40E_MDIO_CLAUSE22_STCODE_MASK) | (I40E_GLGEN_MSCA_MDICMD_MASK); wr32(hw, I40E_GLGEN_MSCA(port_num), command); do { command = rd32(hw, I40E_GLGEN_MSCA(port_num)); if (!(command & I40E_GLGEN_MSCA_MDICMD_MASK)) { status = 0; break; } udelay(10); retry--; } while (retry); if (status) { i40e_debug(hw, I40E_DEBUG_PHY, "PHY: Can't write command to external PHY.\n"); } else { command = rd32(hw, I40E_GLGEN_MSRWD(port_num)); *value = (command & I40E_GLGEN_MSRWD_MDIRDDATA_MASK) >> I40E_GLGEN_MSRWD_MDIRDDATA_SHIFT; } return status; } /** * i40e_write_phy_register_clause22 * @hw: pointer to the HW structure * @reg: register address in the page * @phy_addr: PHY address on MDIO interface * @value: PHY register value * * Writes specified PHY register value **/ int i40e_write_phy_register_clause22(struct i40e_hw *hw, u16 reg, u8 phy_addr, u16 value) { u8 port_num = (u8)hw->func_caps.mdio_port_num; int status = -EIO; u32 command = 0; u16 retry = 1000; command = value << I40E_GLGEN_MSRWD_MDIWRDATA_SHIFT; wr32(hw, I40E_GLGEN_MSRWD(port_num), command); command = (reg << I40E_GLGEN_MSCA_DEVADD_SHIFT) | (phy_addr << I40E_GLGEN_MSCA_PHYADD_SHIFT) | (I40E_MDIO_CLAUSE22_OPCODE_WRITE_MASK) | (I40E_MDIO_CLAUSE22_STCODE_MASK) | (I40E_GLGEN_MSCA_MDICMD_MASK); wr32(hw, I40E_GLGEN_MSCA(port_num), command); do { command = rd32(hw, I40E_GLGEN_MSCA(port_num)); if (!(command & I40E_GLGEN_MSCA_MDICMD_MASK)) { status = 0; break; } udelay(10); retry--; } while (retry); return status; } /** * i40e_read_phy_register_clause45 * @hw: pointer to the HW structure * @page: registers page number * @reg: register address in the page * @phy_addr: PHY address on MDIO interface * @value: PHY register value * * Reads specified PHY register value **/ int i40e_read_phy_register_clause45(struct i40e_hw *hw, u8 page, u16 reg, u8 phy_addr, u16 *value) { u8 port_num = hw->func_caps.mdio_port_num; int status = -EIO; u32 command = 0; u16 retry = 1000; command = (reg << I40E_GLGEN_MSCA_MDIADD_SHIFT) | (page << I40E_GLGEN_MSCA_DEVADD_SHIFT) | (phy_addr << I40E_GLGEN_MSCA_PHYADD_SHIFT) | (I40E_MDIO_CLAUSE45_OPCODE_ADDRESS_MASK) | (I40E_MDIO_CLAUSE45_STCODE_MASK) | (I40E_GLGEN_MSCA_MDICMD_MASK) | (I40E_GLGEN_MSCA_MDIINPROGEN_MASK); wr32(hw, I40E_GLGEN_MSCA(port_num), command); do { command = rd32(hw, I40E_GLGEN_MSCA(port_num)); if (!(command & I40E_GLGEN_MSCA_MDICMD_MASK)) { status = 0; break; } usleep_range(10, 20); retry--; } while (retry); if (status) { i40e_debug(hw, I40E_DEBUG_PHY, "PHY: Can't write command to external PHY.\n"); goto phy_read_end; } command = (page << I40E_GLGEN_MSCA_DEVADD_SHIFT) | (phy_addr << I40E_GLGEN_MSCA_PHYADD_SHIFT) | (I40E_MDIO_CLAUSE45_OPCODE_READ_MASK) | (I40E_MDIO_CLAUSE45_STCODE_MASK) | (I40E_GLGEN_MSCA_MDICMD_MASK) | (I40E_GLGEN_MSCA_MDIINPROGEN_MASK); status = -EIO; retry = 1000; wr32(hw, I40E_GLGEN_MSCA(port_num), command); do { command = rd32(hw, I40E_GLGEN_MSCA(port_num)); if (!(command & I40E_GLGEN_MSCA_MDICMD_MASK)) { status = 0; break; } usleep_range(10, 20); retry--; } while (retry); if (!status) { command = rd32(hw, I40E_GLGEN_MSRWD(port_num)); *value = (command & I40E_GLGEN_MSRWD_MDIRDDATA_MASK) >> I40E_GLGEN_MSRWD_MDIRDDATA_SHIFT; } else { i40e_debug(hw, I40E_DEBUG_PHY, "PHY: Can't read register value from external PHY.\n"); } phy_read_end: return status; } /** * i40e_write_phy_register_clause45 * @hw: pointer to the HW structure * @page: registers page number * @reg: register address in the page * @phy_addr: PHY address on MDIO interface * @value: PHY register value * * Writes value to specified PHY register **/ int i40e_write_phy_register_clause45(struct i40e_hw *hw, u8 page, u16 reg, u8 phy_addr, u16 value) { u8 port_num = hw->func_caps.mdio_port_num; int status = -EIO; u16 retry = 1000; u32 command = 0; command = (reg << I40E_GLGEN_MSCA_MDIADD_SHIFT) | (page << I40E_GLGEN_MSCA_DEVADD_SHIFT) | (phy_addr << I40E_GLGEN_MSCA_PHYADD_SHIFT) | (I40E_MDIO_CLAUSE45_OPCODE_ADDRESS_MASK) | (I40E_MDIO_CLAUSE45_STCODE_MASK) | (I40E_GLGEN_MSCA_MDICMD_MASK) | (I40E_GLGEN_MSCA_MDIINPROGEN_MASK); wr32(hw, I40E_GLGEN_MSCA(port_num), command); do { command = rd32(hw, I40E_GLGEN_MSCA(port_num)); if (!(command & I40E_GLGEN_MSCA_MDICMD_MASK)) { status = 0; break; } usleep_range(10, 20); retry--; } while (retry); if (status) { i40e_debug(hw, I40E_DEBUG_PHY, "PHY: Can't write command to external PHY.\n"); goto phy_write_end; } command = value << I40E_GLGEN_MSRWD_MDIWRDATA_SHIFT; wr32(hw, I40E_GLGEN_MSRWD(port_num), command); command = (page << I40E_GLGEN_MSCA_DEVADD_SHIFT) | (phy_addr << I40E_GLGEN_MSCA_PHYADD_SHIFT) | (I40E_MDIO_CLAUSE45_OPCODE_WRITE_MASK) | (I40E_MDIO_CLAUSE45_STCODE_MASK) | (I40E_GLGEN_MSCA_MDICMD_MASK) | (I40E_GLGEN_MSCA_MDIINPROGEN_MASK); status = -EIO; retry = 1000; wr32(hw, I40E_GLGEN_MSCA(port_num), command); do { command = rd32(hw, I40E_GLGEN_MSCA(port_num)); if (!(command & I40E_GLGEN_MSCA_MDICMD_MASK)) { status = 0; break; } usleep_range(10, 20); retry--; } while (retry); phy_write_end: return status; } /** * i40e_write_phy_register * @hw: pointer to the HW structure * @page: registers page number * @reg: register address in the page * @phy_addr: PHY address on MDIO interface * @value: PHY register value * * Writes value to specified PHY register **/ int i40e_write_phy_register(struct i40e_hw *hw, u8 page, u16 reg, u8 phy_addr, u16 value) { int status; switch (hw->device_id) { case I40E_DEV_ID_1G_BASE_T_X722: status = i40e_write_phy_register_clause22(hw, reg, phy_addr, value); break; case I40E_DEV_ID_1G_BASE_T_BC: case I40E_DEV_ID_5G_BASE_T_BC: case I40E_DEV_ID_10G_BASE_T: case I40E_DEV_ID_10G_BASE_T4: case I40E_DEV_ID_10G_BASE_T_BC: case I40E_DEV_ID_10G_BASE_T_X722: case I40E_DEV_ID_25G_B: case I40E_DEV_ID_25G_SFP28: status = i40e_write_phy_register_clause45(hw, page, reg, phy_addr, value); break; default: status = -EIO; break; } return status; } /** * i40e_read_phy_register * @hw: pointer to the HW structure * @page: registers page number * @reg: register address in the page * @phy_addr: PHY address on MDIO interface * @value: PHY register value * * Reads specified PHY register value **/ int i40e_read_phy_register(struct i40e_hw *hw, u8 page, u16 reg, u8 phy_addr, u16 *value) { int status; switch (hw->device_id) { case I40E_DEV_ID_1G_BASE_T_X722: status = i40e_read_phy_register_clause22(hw, reg, phy_addr, value); break; case I40E_DEV_ID_1G_BASE_T_BC: case I40E_DEV_ID_5G_BASE_T_BC: case I40E_DEV_ID_10G_BASE_T: case I40E_DEV_ID_10G_BASE_T4: case I40E_DEV_ID_10G_BASE_T_BC: case I40E_DEV_ID_10G_BASE_T_X722: case I40E_DEV_ID_25G_B: case I40E_DEV_ID_25G_SFP28: status = i40e_read_phy_register_clause45(hw, page, reg, phy_addr, value); break; default: status = -EIO; break; } return status; } /** * i40e_get_phy_address * @hw: pointer to the HW structure * @dev_num: PHY port num that address we want * * Gets PHY address for current port **/ u8 i40e_get_phy_address(struct i40e_hw *hw, u8 dev_num) { u8 port_num = hw->func_caps.mdio_port_num; u32 reg_val = rd32(hw, I40E_GLGEN_MDIO_I2C_SEL(port_num)); return (u8)(reg_val >> ((dev_num + 1) * 5)) & 0x1f; } /** * i40e_blink_phy_link_led * @hw: pointer to the HW structure * @time: time how long led will blinks in secs * @interval: gap between LED on and off in msecs * * Blinks PHY link LED **/ int i40e_blink_phy_link_led(struct i40e_hw *hw, u32 time, u32 interval) { u16 led_addr = I40E_PHY_LED_PROV_REG_1; u16 gpio_led_port; u8 phy_addr = 0; int status = 0; u16 led_ctl; u8 port_num; u16 led_reg; u32 i; i = rd32(hw, I40E_PFGEN_PORTNUM); port_num = (u8)(i & I40E_PFGEN_PORTNUM_PORT_NUM_MASK); phy_addr = i40e_get_phy_address(hw, port_num); for (gpio_led_port = 0; gpio_led_port < 3; gpio_led_port++, led_addr++) { status = i40e_read_phy_register_clause45(hw, I40E_PHY_COM_REG_PAGE, led_addr, phy_addr, &led_reg); if (status) goto phy_blinking_end; led_ctl = led_reg; if (led_reg & I40E_PHY_LED_LINK_MODE_MASK) { led_reg = 0; status = i40e_write_phy_register_clause45(hw, I40E_PHY_COM_REG_PAGE, led_addr, phy_addr, led_reg); if (status) goto phy_blinking_end; break; } } if (time > 0 && interval > 0) { for (i = 0; i < time * 1000; i += interval) { status = i40e_read_phy_register_clause45(hw, I40E_PHY_COM_REG_PAGE, led_addr, phy_addr, &led_reg); if (status) goto restore_config; if (led_reg & I40E_PHY_LED_MANUAL_ON) led_reg = 0; else led_reg = I40E_PHY_LED_MANUAL_ON; status = i40e_write_phy_register_clause45(hw, I40E_PHY_COM_REG_PAGE, led_addr, phy_addr, led_reg); if (status) goto restore_config; msleep(interval); } } restore_config: status = i40e_write_phy_register_clause45(hw, I40E_PHY_COM_REG_PAGE, led_addr, phy_addr, led_ctl); phy_blinking_end: return status; } /** * i40e_led_get_reg - read LED register * @hw: pointer to the HW structure * @led_addr: LED register address * @reg_val: read register value **/ static int i40e_led_get_reg(struct i40e_hw *hw, u16 led_addr, u32 *reg_val) { u8 phy_addr = 0; u8 port_num; int status; u32 i; *reg_val = 0; if (hw->flags & I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE) { status = i40e_aq_get_phy_register(hw, I40E_AQ_PHY_REG_ACCESS_EXTERNAL, I40E_PHY_COM_REG_PAGE, true, I40E_PHY_LED_PROV_REG_1, reg_val, NULL); } else { i = rd32(hw, I40E_PFGEN_PORTNUM); port_num = (u8)(i & I40E_PFGEN_PORTNUM_PORT_NUM_MASK); phy_addr = i40e_get_phy_address(hw, port_num); status = i40e_read_phy_register_clause45(hw, I40E_PHY_COM_REG_PAGE, led_addr, phy_addr, (u16 *)reg_val); } return status; } /** * i40e_led_set_reg - write LED register * @hw: pointer to the HW structure * @led_addr: LED register address * @reg_val: register value to write **/ static int i40e_led_set_reg(struct i40e_hw *hw, u16 led_addr, u32 reg_val) { u8 phy_addr = 0; u8 port_num; int status; u32 i; if (hw->flags & I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE) { status = i40e_aq_set_phy_register(hw, I40E_AQ_PHY_REG_ACCESS_EXTERNAL, I40E_PHY_COM_REG_PAGE, true, I40E_PHY_LED_PROV_REG_1, reg_val, NULL); } else { i = rd32(hw, I40E_PFGEN_PORTNUM); port_num = (u8)(i & I40E_PFGEN_PORTNUM_PORT_NUM_MASK); phy_addr = i40e_get_phy_address(hw, port_num); status = i40e_write_phy_register_clause45(hw, I40E_PHY_COM_REG_PAGE, led_addr, phy_addr, (u16)reg_val); } return status; } /** * i40e_led_get_phy - return current on/off mode * @hw: pointer to the hw struct * @led_addr: address of led register to use * @val: original value of register to use * **/ int i40e_led_get_phy(struct i40e_hw *hw, u16 *led_addr, u16 *val) { u16 gpio_led_port; u8 phy_addr = 0; u32 reg_val_aq; int status = 0; u16 temp_addr; u16 reg_val; u8 port_num; u32 i; if (hw->flags & I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE) { status = i40e_aq_get_phy_register(hw, I40E_AQ_PHY_REG_ACCESS_EXTERNAL, I40E_PHY_COM_REG_PAGE, true, I40E_PHY_LED_PROV_REG_1, &reg_val_aq, NULL); if (status == 0) *val = (u16)reg_val_aq; return status; } temp_addr = I40E_PHY_LED_PROV_REG_1; i = rd32(hw, I40E_PFGEN_PORTNUM); port_num = (u8)(i & I40E_PFGEN_PORTNUM_PORT_NUM_MASK); phy_addr = i40e_get_phy_address(hw, port_num); for (gpio_led_port = 0; gpio_led_port < 3; gpio_led_port++, temp_addr++) { status = i40e_read_phy_register_clause45(hw, I40E_PHY_COM_REG_PAGE, temp_addr, phy_addr, &reg_val); if (status) return status; *val = reg_val; if (reg_val & I40E_PHY_LED_LINK_MODE_MASK) { *led_addr = temp_addr; break; } } return status; } /** * i40e_led_set_phy * @hw: pointer to the HW structure * @on: true or false * @led_addr: address of led register to use * @mode: original val plus bit for set or ignore * * Set led's on or off when controlled by the PHY * **/ int i40e_led_set_phy(struct i40e_hw *hw, bool on, u16 led_addr, u32 mode) { u32 led_ctl = 0; u32 led_reg = 0; int status = 0; status = i40e_led_get_reg(hw, led_addr, &led_reg); if (status) return status; led_ctl = led_reg; if (led_reg & I40E_PHY_LED_LINK_MODE_MASK) { led_reg = 0; status = i40e_led_set_reg(hw, led_addr, led_reg); if (status) return status; } status = i40e_led_get_reg(hw, led_addr, &led_reg); if (status) goto restore_config; if (on) led_reg = I40E_PHY_LED_MANUAL_ON; else led_reg = 0; status = i40e_led_set_reg(hw, led_addr, led_reg); if (status) goto restore_config; if (mode & I40E_PHY_LED_MODE_ORIG) { led_ctl = (mode & I40E_PHY_LED_MODE_MASK); status = i40e_led_set_reg(hw, led_addr, led_ctl); } return status; restore_config: status = i40e_led_set_reg(hw, led_addr, led_ctl); return status; } /** * i40e_aq_rx_ctl_read_register - use FW to read from an Rx control register * @hw: pointer to the hw struct * @reg_addr: register address * @reg_val: ptr to register value * @cmd_details: pointer to command details structure or NULL * * Use the firmware to read the Rx control register, * especially useful if the Rx unit is under heavy pressure **/ int i40e_aq_rx_ctl_read_register(struct i40e_hw *hw, u32 reg_addr, u32 *reg_val, struct i40e_asq_cmd_details *cmd_details) { struct i40e_aq_desc desc; struct i40e_aqc_rx_ctl_reg_read_write *cmd_resp = (struct i40e_aqc_rx_ctl_reg_read_write *)&desc.params.raw; int status; if (!reg_val) return -EINVAL; i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_rx_ctl_reg_read); cmd_resp->address = cpu_to_le32(reg_addr); status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); if (status == 0) *reg_val = le32_to_cpu(cmd_resp->value); return status; } /** * i40e_read_rx_ctl - read from an Rx control register * @hw: pointer to the hw struct * @reg_addr: register address **/ u32 i40e_read_rx_ctl(struct i40e_hw *hw, u32 reg_addr) { bool use_register; int status = 0; int retry = 5; u32 val = 0; use_register = (((hw->aq.api_maj_ver == 1) && (hw->aq.api_min_ver < 5)) || (hw->mac.type == I40E_MAC_X722)); if (!use_register) { do_retry: status = i40e_aq_rx_ctl_read_register(hw, reg_addr, &val, NULL); if (hw->aq.asq_last_status == I40E_AQ_RC_EAGAIN && retry) { usleep_range(1000, 2000); retry--; goto do_retry; } } /* if the AQ access failed, try the old-fashioned way */ if (status || use_register) val = rd32(hw, reg_addr); return val; } /** * i40e_aq_rx_ctl_write_register * @hw: pointer to the hw struct * @reg_addr: register address * @reg_val: register value * @cmd_details: pointer to command details structure or NULL * * Use the firmware to write to an Rx control register, * especially useful if the Rx unit is under heavy pressure **/ int i40e_aq_rx_ctl_write_register(struct i40e_hw *hw, u32 reg_addr, u32 reg_val, struct i40e_asq_cmd_details *cmd_details) { struct i40e_aq_desc desc; struct i40e_aqc_rx_ctl_reg_read_write *cmd = (struct i40e_aqc_rx_ctl_reg_read_write *)&desc.params.raw; int status; i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_rx_ctl_reg_write); cmd->address = cpu_to_le32(reg_addr); cmd->value = cpu_to_le32(reg_val); status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); return status; } /** * i40e_write_rx_ctl - write to an Rx control register * @hw: pointer to the hw struct * @reg_addr: register address * @reg_val: register value **/ void i40e_write_rx_ctl(struct i40e_hw *hw, u32 reg_addr, u32 reg_val) { bool use_register; int status = 0; int retry = 5; use_register = (((hw->aq.api_maj_ver == 1) && (hw->aq.api_min_ver < 5)) || (hw->mac.type == I40E_MAC_X722)); if (!use_register) { do_retry: status = i40e_aq_rx_ctl_write_register(hw, reg_addr, reg_val, NULL); if (hw->aq.asq_last_status == I40E_AQ_RC_EAGAIN && retry) { usleep_range(1000, 2000); retry--; goto do_retry; } } /* if the AQ access failed, try the old-fashioned way */ if (status || use_register) wr32(hw, reg_addr, reg_val); } /** * i40e_mdio_if_number_selection - MDIO I/F number selection * @hw: pointer to the hw struct * @set_mdio: use MDIO I/F number specified by mdio_num * @mdio_num: MDIO I/F number * @cmd: pointer to PHY Register command structure **/ static void i40e_mdio_if_number_selection(struct i40e_hw *hw, bool set_mdio, u8 mdio_num, struct i40e_aqc_phy_register_access *cmd) { if (set_mdio && cmd->phy_interface == I40E_AQ_PHY_REG_ACCESS_EXTERNAL) { if (hw->flags & I40E_HW_FLAG_AQ_PHY_ACCESS_EXTENDED) cmd->cmd_flags |= I40E_AQ_PHY_REG_ACCESS_SET_MDIO_IF_NUMBER | ((mdio_num << I40E_AQ_PHY_REG_ACCESS_MDIO_IF_NUMBER_SHIFT) & I40E_AQ_PHY_REG_ACCESS_MDIO_IF_NUMBER_MASK); else i40e_debug(hw, I40E_DEBUG_PHY, "MDIO I/F number selection not supported by current FW version.\n"); } } /** * i40e_aq_set_phy_register_ext * @hw: pointer to the hw struct * @phy_select: select which phy should be accessed * @dev_addr: PHY device address * @page_change: flag to indicate if phy page should be updated * @set_mdio: use MDIO I/F number specified by mdio_num * @mdio_num: MDIO I/F number * @reg_addr: PHY register address * @reg_val: new register value * @cmd_details: pointer to command details structure or NULL * * Write the external PHY register. * NOTE: In common cases MDIO I/F number should not be changed, thats why you * may use simple wrapper i40e_aq_set_phy_register. **/ int i40e_aq_set_phy_register_ext(struct i40e_hw *hw, u8 phy_select, u8 dev_addr, bool page_change, bool set_mdio, u8 mdio_num, u32 reg_addr, u32 reg_val, struct i40e_asq_cmd_details *cmd_details) { struct i40e_aq_desc desc; struct i40e_aqc_phy_register_access *cmd = (struct i40e_aqc_phy_register_access *)&desc.params.raw; int status; i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_set_phy_register); cmd->phy_interface = phy_select; cmd->dev_address = dev_addr; cmd->reg_address = cpu_to_le32(reg_addr); cmd->reg_value = cpu_to_le32(reg_val); i40e_mdio_if_number_selection(hw, set_mdio, mdio_num, cmd); if (!page_change) cmd->cmd_flags = I40E_AQ_PHY_REG_ACCESS_DONT_CHANGE_QSFP_PAGE; status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); return status; } /** * i40e_aq_get_phy_register_ext * @hw: pointer to the hw struct * @phy_select: select which phy should be accessed * @dev_addr: PHY device address * @page_change: flag to indicate if phy page should be updated * @set_mdio: use MDIO I/F number specified by mdio_num * @mdio_num: MDIO I/F number * @reg_addr: PHY register address * @reg_val: read register value * @cmd_details: pointer to command details structure or NULL * * Read the external PHY register. * NOTE: In common cases MDIO I/F number should not be changed, thats why you * may use simple wrapper i40e_aq_get_phy_register. **/ int i40e_aq_get_phy_register_ext(struct i40e_hw *hw, u8 phy_select, u8 dev_addr, bool page_change, bool set_mdio, u8 mdio_num, u32 reg_addr, u32 *reg_val, struct i40e_asq_cmd_details *cmd_details) { struct i40e_aq_desc desc; struct i40e_aqc_phy_register_access *cmd = (struct i40e_aqc_phy_register_access *)&desc.params.raw; int status; i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_get_phy_register); cmd->phy_interface = phy_select; cmd->dev_address = dev_addr; cmd->reg_address = cpu_to_le32(reg_addr); i40e_mdio_if_number_selection(hw, set_mdio, mdio_num, cmd); if (!page_change) cmd->cmd_flags = I40E_AQ_PHY_REG_ACCESS_DONT_CHANGE_QSFP_PAGE; status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); if (!status) *reg_val = le32_to_cpu(cmd->reg_value); return status; } /** * i40e_aq_write_ddp - Write dynamic device personalization (ddp) * @hw: pointer to the hw struct * @buff: command buffer (size in bytes = buff_size) * @buff_size: buffer size in bytes * @track_id: package tracking id * @error_offset: returns error offset * @error_info: returns error information * @cmd_details: pointer to command details structure or NULL **/ int i40e_aq_write_ddp(struct i40e_hw *hw, void *buff, u16 buff_size, u32 track_id, u32 *error_offset, u32 *error_info, struct i40e_asq_cmd_details *cmd_details) { struct i40e_aq_desc desc; struct i40e_aqc_write_personalization_profile *cmd = (struct i40e_aqc_write_personalization_profile *) &desc.params.raw; struct i40e_aqc_write_ddp_resp *resp; int status; i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_write_personalization_profile); desc.flags |= cpu_to_le16(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD); if (buff_size > I40E_AQ_LARGE_BUF) desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB); desc.datalen = cpu_to_le16(buff_size); cmd->profile_track_id = cpu_to_le32(track_id); status = i40e_asq_send_command(hw, &desc, buff, buff_size, cmd_details); if (!status) { resp = (struct i40e_aqc_write_ddp_resp *)&desc.params.raw; if (error_offset) *error_offset = le32_to_cpu(resp->error_offset); if (error_info) *error_info = le32_to_cpu(resp->error_info); } return status; } /** * i40e_aq_get_ddp_list - Read dynamic device personalization (ddp) * @hw: pointer to the hw struct * @buff: command buffer (size in bytes = buff_size) * @buff_size: buffer size in bytes * @flags: AdminQ command flags * @cmd_details: pointer to command details structure or NULL **/ int i40e_aq_get_ddp_list(struct i40e_hw *hw, void *buff, u16 buff_size, u8 flags, struct i40e_asq_cmd_details *cmd_details) { struct i40e_aq_desc desc; struct i40e_aqc_get_applied_profiles *cmd = (struct i40e_aqc_get_applied_profiles *)&desc.params.raw; int status; i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_get_personalization_profile_list); desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF); if (buff_size > I40E_AQ_LARGE_BUF) desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB); desc.datalen = cpu_to_le16(buff_size); cmd->flags = flags; status = i40e_asq_send_command(hw, &desc, buff, buff_size, cmd_details); return status; } /** * i40e_find_segment_in_package * @segment_type: the segment type to search for (i.e., SEGMENT_TYPE_I40E) * @pkg_hdr: pointer to the package header to be searched * * This function searches a package file for a particular segment type. On * success it returns a pointer to the segment header, otherwise it will * return NULL. **/ struct i40e_generic_seg_header * i40e_find_segment_in_package(u32 segment_type, struct i40e_package_header *pkg_hdr) { struct i40e_generic_seg_header *segment; u32 i; /* Search all package segments for the requested segment type */ for (i = 0; i < pkg_hdr->segment_count; i++) { segment = (struct i40e_generic_seg_header *)((u8 *)pkg_hdr + pkg_hdr->segment_offset[i]); if (segment->type == segment_type) return segment; } return NULL; } /* Get section table in profile */ #define I40E_SECTION_TABLE(profile, sec_tbl) \ do { \ struct i40e_profile_segment *p = (profile); \ u32 count; \ u32 *nvm; \ count = p->device_table_count; \ nvm = (u32 *)&p->device_table[count]; \ sec_tbl = (struct i40e_section_table *)&nvm[nvm[0] + 1]; \ } while (0) /* Get section header in profile */ #define I40E_SECTION_HEADER(profile, offset) \ (struct i40e_profile_section_header *)((u8 *)(profile) + (offset)) /** * i40e_find_section_in_profile * @section_type: the section type to search for (i.e., SECTION_TYPE_NOTE) * @profile: pointer to the i40e segment header to be searched * * This function searches i40e segment for a particular section type. On * success it returns a pointer to the section header, otherwise it will * return NULL. **/ struct i40e_profile_section_header * i40e_find_section_in_profile(u32 section_type, struct i40e_profile_segment *profile) { struct i40e_profile_section_header *sec; struct i40e_section_table *sec_tbl; u32 sec_off; u32 i; if (profile->header.type != SEGMENT_TYPE_I40E) return NULL; I40E_SECTION_TABLE(profile, sec_tbl); for (i = 0; i < sec_tbl->section_count; i++) { sec_off = sec_tbl->section_offset[i]; sec = I40E_SECTION_HEADER(profile, sec_off); if (sec->section.type == section_type) return sec; } return NULL; } /** * i40e_ddp_exec_aq_section - Execute generic AQ for DDP * @hw: pointer to the hw struct * @aq: command buffer containing all data to execute AQ **/ static int i40e_ddp_exec_aq_section(struct i40e_hw *hw, struct i40e_profile_aq_section *aq) { struct i40e_aq_desc desc; u8 *msg = NULL; u16 msglen; int status; i40e_fill_default_direct_cmd_desc(&desc, aq->opcode); desc.flags |= cpu_to_le16(aq->flags); memcpy(desc.params.raw, aq->param, sizeof(desc.params.raw)); msglen = aq->datalen; if (msglen) { desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD)); if (msglen > I40E_AQ_LARGE_BUF) desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB); desc.datalen = cpu_to_le16(msglen); msg = &aq->data[0]; } status = i40e_asq_send_command(hw, &desc, msg, msglen, NULL); if (status) { i40e_debug(hw, I40E_DEBUG_PACKAGE, "unable to exec DDP AQ opcode %u, error %d\n", aq->opcode, status); return status; } /* copy returned desc to aq_buf */ memcpy(aq->param, desc.params.raw, sizeof(desc.params.raw)); return 0; } /** * i40e_validate_profile * @hw: pointer to the hardware structure * @profile: pointer to the profile segment of the package to be validated * @track_id: package tracking id * @rollback: flag if the profile is for rollback. * * Validates supported devices and profile's sections. */ static int i40e_validate_profile(struct i40e_hw *hw, struct i40e_profile_segment *profile, u32 track_id, bool rollback) { struct i40e_profile_section_header *sec = NULL; struct i40e_section_table *sec_tbl; u32 vendor_dev_id; int status = 0; u32 dev_cnt; u32 sec_off; u32 i; if (track_id == I40E_DDP_TRACKID_INVALID) { i40e_debug(hw, I40E_DEBUG_PACKAGE, "Invalid track_id\n"); return -EOPNOTSUPP; } dev_cnt = profile->device_table_count; for (i = 0; i < dev_cnt; i++) { vendor_dev_id = profile->device_table[i].vendor_dev_id; if ((vendor_dev_id >> 16) == PCI_VENDOR_ID_INTEL && hw->device_id == (vendor_dev_id & 0xFFFF)) break; } if (dev_cnt && i == dev_cnt) { i40e_debug(hw, I40E_DEBUG_PACKAGE, "Device doesn't support DDP\n"); return -ENODEV; } I40E_SECTION_TABLE(profile, sec_tbl); /* Validate sections types */ for (i = 0; i < sec_tbl->section_count; i++) { sec_off = sec_tbl->section_offset[i]; sec = I40E_SECTION_HEADER(profile, sec_off); if (rollback) { if (sec->section.type == SECTION_TYPE_MMIO || sec->section.type == SECTION_TYPE_AQ || sec->section.type == SECTION_TYPE_RB_AQ) { i40e_debug(hw, I40E_DEBUG_PACKAGE, "Not a roll-back package\n"); return -EOPNOTSUPP; } } else { if (sec->section.type == SECTION_TYPE_RB_AQ || sec->section.type == SECTION_TYPE_RB_MMIO) { i40e_debug(hw, I40E_DEBUG_PACKAGE, "Not an original package\n"); return -EOPNOTSUPP; } } } return status; } /** * i40e_write_profile * @hw: pointer to the hardware structure * @profile: pointer to the profile segment of the package to be downloaded * @track_id: package tracking id * * Handles the download of a complete package. */ int i40e_write_profile(struct i40e_hw *hw, struct i40e_profile_segment *profile, u32 track_id) { struct i40e_profile_section_header *sec = NULL; struct i40e_profile_aq_section *ddp_aq; struct i40e_section_table *sec_tbl; u32 offset = 0, info = 0; u32 section_size = 0; int status = 0; u32 sec_off; u32 i; status = i40e_validate_profile(hw, profile, track_id, false); if (status) return status; I40E_SECTION_TABLE(profile, sec_tbl); for (i = 0; i < sec_tbl->section_count; i++) { sec_off = sec_tbl->section_offset[i]; sec = I40E_SECTION_HEADER(profile, sec_off); /* Process generic admin command */ if (sec->section.type == SECTION_TYPE_AQ) { ddp_aq = (struct i40e_profile_aq_section *)&sec[1]; status = i40e_ddp_exec_aq_section(hw, ddp_aq); if (status) { i40e_debug(hw, I40E_DEBUG_PACKAGE, "Failed to execute aq: section %d, opcode %u\n", i, ddp_aq->opcode); break; } sec->section.type = SECTION_TYPE_RB_AQ; } /* Skip any non-mmio sections */ if (sec->section.type != SECTION_TYPE_MMIO) continue; section_size = sec->section.size + sizeof(struct i40e_profile_section_header); /* Write MMIO section */ status = i40e_aq_write_ddp(hw, (void *)sec, (u16)section_size, track_id, &offset, &info, NULL); if (status) { i40e_debug(hw, I40E_DEBUG_PACKAGE, "Failed to write profile: section %d, offset %d, info %d\n", i, offset, info); break; } } return status; } /** * i40e_rollback_profile * @hw: pointer to the hardware structure * @profile: pointer to the profile segment of the package to be removed * @track_id: package tracking id * * Rolls back previously loaded package. */ int i40e_rollback_profile(struct i40e_hw *hw, struct i40e_profile_segment *profile, u32 track_id) { struct i40e_profile_section_header *sec = NULL; struct i40e_section_table *sec_tbl; u32 offset = 0, info = 0; u32 section_size = 0; int status = 0; u32 sec_off; int i; status = i40e_validate_profile(hw, profile, track_id, true); if (status) return status; I40E_SECTION_TABLE(profile, sec_tbl); /* For rollback write sections in reverse */ for (i = sec_tbl->section_count - 1; i >= 0; i--) { sec_off = sec_tbl->section_offset[i]; sec = I40E_SECTION_HEADER(profile, sec_off); /* Skip any non-rollback sections */ if (sec->section.type != SECTION_TYPE_RB_MMIO) continue; section_size = sec->section.size + sizeof(struct i40e_profile_section_header); /* Write roll-back MMIO section */ status = i40e_aq_write_ddp(hw, (void *)sec, (u16)section_size, track_id, &offset, &info, NULL); if (status) { i40e_debug(hw, I40E_DEBUG_PACKAGE, "Failed to write profile: section %d, offset %d, info %d\n", i, offset, info); break; } } return status; } /** * i40e_add_pinfo_to_list * @hw: pointer to the hardware structure * @profile: pointer to the profile segment of the package * @profile_info_sec: buffer for information section * @track_id: package tracking id * * Register a profile to the list of loaded profiles. */ int i40e_add_pinfo_to_list(struct i40e_hw *hw, struct i40e_profile_segment *profile, u8 *profile_info_sec, u32 track_id) { struct i40e_profile_section_header *sec = NULL; struct i40e_profile_info *pinfo; u32 offset = 0, info = 0; int status = 0; sec = (struct i40e_profile_section_header *)profile_info_sec; sec->tbl_size = 1; sec->data_end = sizeof(struct i40e_profile_section_header) + sizeof(struct i40e_profile_info); sec->section.type = SECTION_TYPE_INFO; sec->section.offset = sizeof(struct i40e_profile_section_header); sec->section.size = sizeof(struct i40e_profile_info); pinfo = (struct i40e_profile_info *)(profile_info_sec + sec->section.offset); pinfo->track_id = track_id; pinfo->version = profile->version; pinfo->op = I40E_DDP_ADD_TRACKID; memcpy(pinfo->name, profile->name, I40E_DDP_NAME_SIZE); status = i40e_aq_write_ddp(hw, (void *)sec, sec->data_end, track_id, &offset, &info, NULL); return status; } /** * i40e_aq_add_cloud_filters * @hw: pointer to the hardware structure * @seid: VSI seid to add cloud filters from * @filters: Buffer which contains the filters to be added * @filter_count: number of filters contained in the buffer * * Set the cloud filters for a given VSI. The contents of the * i40e_aqc_cloud_filters_element_data are filled in by the caller * of the function. * **/ int i40e_aq_add_cloud_filters(struct i40e_hw *hw, u16 seid, struct i40e_aqc_cloud_filters_element_data *filters, u8 filter_count) { struct i40e_aq_desc desc; struct i40e_aqc_add_remove_cloud_filters *cmd = (struct i40e_aqc_add_remove_cloud_filters *)&desc.params.raw; u16 buff_len; int status; i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_add_cloud_filters); buff_len = filter_count * sizeof(*filters); desc.datalen = cpu_to_le16(buff_len); desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD)); cmd->num_filters = filter_count; cmd->seid = cpu_to_le16(seid); status = i40e_asq_send_command(hw, &desc, filters, buff_len, NULL); return status; } /** * i40e_aq_add_cloud_filters_bb * @hw: pointer to the hardware structure * @seid: VSI seid to add cloud filters from * @filters: Buffer which contains the filters in big buffer to be added * @filter_count: number of filters contained in the buffer * * Set the big buffer cloud filters for a given VSI. The contents of the * i40e_aqc_cloud_filters_element_bb are filled in by the caller of the * function. * **/ int i40e_aq_add_cloud_filters_bb(struct i40e_hw *hw, u16 seid, struct i40e_aqc_cloud_filters_element_bb *filters, u8 filter_count) { struct i40e_aq_desc desc; struct i40e_aqc_add_remove_cloud_filters *cmd = (struct i40e_aqc_add_remove_cloud_filters *)&desc.params.raw; u16 buff_len; int status; int i; i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_add_cloud_filters); buff_len = filter_count * sizeof(*filters); desc.datalen = cpu_to_le16(buff_len); desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD)); cmd->num_filters = filter_count; cmd->seid = cpu_to_le16(seid); cmd->big_buffer_flag = I40E_AQC_ADD_CLOUD_CMD_BB; for (i = 0; i < filter_count; i++) { u16 tnl_type; u32 ti; tnl_type = (le16_to_cpu(filters[i].element.flags) & I40E_AQC_ADD_CLOUD_TNL_TYPE_MASK) >> I40E_AQC_ADD_CLOUD_TNL_TYPE_SHIFT; /* Due to hardware eccentricities, the VNI for Geneve is shifted * one more byte further than normally used for Tenant ID in * other tunnel types. */ if (tnl_type == I40E_AQC_ADD_CLOUD_TNL_TYPE_GENEVE) { ti = le32_to_cpu(filters[i].element.tenant_id); filters[i].element.tenant_id = cpu_to_le32(ti << 8); } } status = i40e_asq_send_command(hw, &desc, filters, buff_len, NULL); return status; } /** * i40e_aq_rem_cloud_filters * @hw: pointer to the hardware structure * @seid: VSI seid to remove cloud filters from * @filters: Buffer which contains the filters to be removed * @filter_count: number of filters contained in the buffer * * Remove the cloud filters for a given VSI. The contents of the * i40e_aqc_cloud_filters_element_data are filled in by the caller * of the function. * **/ int i40e_aq_rem_cloud_filters(struct i40e_hw *hw, u16 seid, struct i40e_aqc_cloud_filters_element_data *filters, u8 filter_count) { struct i40e_aq_desc desc; struct i40e_aqc_add_remove_cloud_filters *cmd = (struct i40e_aqc_add_remove_cloud_filters *)&desc.params.raw; u16 buff_len; int status; i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_remove_cloud_filters); buff_len = filter_count * sizeof(*filters); desc.datalen = cpu_to_le16(buff_len); desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD)); cmd->num_filters = filter_count; cmd->seid = cpu_to_le16(seid); status = i40e_asq_send_command(hw, &desc, filters, buff_len, NULL); return status; } /** * i40e_aq_rem_cloud_filters_bb * @hw: pointer to the hardware structure * @seid: VSI seid to remove cloud filters from * @filters: Buffer which contains the filters in big buffer to be removed * @filter_count: number of filters contained in the buffer * * Remove the big buffer cloud filters for a given VSI. The contents of the * i40e_aqc_cloud_filters_element_bb are filled in by the caller of the * function. * **/ int i40e_aq_rem_cloud_filters_bb(struct i40e_hw *hw, u16 seid, struct i40e_aqc_cloud_filters_element_bb *filters, u8 filter_count) { struct i40e_aq_desc desc; struct i40e_aqc_add_remove_cloud_filters *cmd = (struct i40e_aqc_add_remove_cloud_filters *)&desc.params.raw; u16 buff_len; int status; int i; i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_remove_cloud_filters); buff_len = filter_count * sizeof(*filters); desc.datalen = cpu_to_le16(buff_len); desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD)); cmd->num_filters = filter_count; cmd->seid = cpu_to_le16(seid); cmd->big_buffer_flag = I40E_AQC_ADD_CLOUD_CMD_BB; for (i = 0; i < filter_count; i++) { u16 tnl_type; u32 ti; tnl_type = (le16_to_cpu(filters[i].element.flags) & I40E_AQC_ADD_CLOUD_TNL_TYPE_MASK) >> I40E_AQC_ADD_CLOUD_TNL_TYPE_SHIFT; /* Due to hardware eccentricities, the VNI for Geneve is shifted * one more byte further than normally used for Tenant ID in * other tunnel types. */ if (tnl_type == I40E_AQC_ADD_CLOUD_TNL_TYPE_GENEVE) { ti = le32_to_cpu(filters[i].element.tenant_id); filters[i].element.tenant_id = cpu_to_le32(ti << 8); } } status = i40e_asq_send_command(hw, &desc, filters, buff_len, NULL); return status; }
linux-master
drivers/net/ethernet/intel/i40e/i40e_common.c
// SPDX-License-Identifier: GPL-2.0 /* Copyright(c) 2013 - 2021 Intel Corporation. */ #ifdef CONFIG_I40E_DCB #include "i40e.h" #include <net/dcbnl.h> #define I40E_DCBNL_STATUS_SUCCESS 0 #define I40E_DCBNL_STATUS_ERROR 1 static bool i40e_dcbnl_find_app(struct i40e_dcbx_config *cfg, struct i40e_dcb_app_priority_table *app); /** * i40e_get_pfc_delay - retrieve PFC Link Delay * @hw: pointer to hardware struct * @delay: holds the PFC Link delay value * * Returns PFC Link Delay from the PRTDCB_GENC.PFCLDA **/ static void i40e_get_pfc_delay(struct i40e_hw *hw, u16 *delay) { u32 val; val = rd32(hw, I40E_PRTDCB_GENC); *delay = (u16)((val & I40E_PRTDCB_GENC_PFCLDA_MASK) >> I40E_PRTDCB_GENC_PFCLDA_SHIFT); } /** * i40e_dcbnl_ieee_getets - retrieve local IEEE ETS configuration * @dev: the corresponding netdev * @ets: structure to hold the ETS information * * Returns local IEEE ETS configuration **/ static int i40e_dcbnl_ieee_getets(struct net_device *dev, struct ieee_ets *ets) { struct i40e_pf *pf = i40e_netdev_to_pf(dev); struct i40e_dcbx_config *dcbxcfg; if (!(pf->dcbx_cap & DCB_CAP_DCBX_VER_IEEE)) return -EINVAL; dcbxcfg = &pf->hw.local_dcbx_config; ets->willing = dcbxcfg->etscfg.willing; ets->ets_cap = I40E_MAX_TRAFFIC_CLASS; ets->cbs = dcbxcfg->etscfg.cbs; memcpy(ets->tc_tx_bw, dcbxcfg->etscfg.tcbwtable, sizeof(ets->tc_tx_bw)); memcpy(ets->tc_rx_bw, dcbxcfg->etscfg.tcbwtable, sizeof(ets->tc_rx_bw)); memcpy(ets->tc_tsa, dcbxcfg->etscfg.tsatable, sizeof(ets->tc_tsa)); memcpy(ets->prio_tc, dcbxcfg->etscfg.prioritytable, sizeof(ets->prio_tc)); memcpy(ets->tc_reco_bw, dcbxcfg->etsrec.tcbwtable, sizeof(ets->tc_reco_bw)); memcpy(ets->tc_reco_tsa, dcbxcfg->etsrec.tsatable, sizeof(ets->tc_reco_tsa)); memcpy(ets->reco_prio_tc, dcbxcfg->etscfg.prioritytable, sizeof(ets->reco_prio_tc)); return 0; } /** * i40e_dcbnl_ieee_getpfc - retrieve local IEEE PFC configuration * @dev: the corresponding netdev * @pfc: structure to hold the PFC information * * Returns local IEEE PFC configuration **/ static int i40e_dcbnl_ieee_getpfc(struct net_device *dev, struct ieee_pfc *pfc) { struct i40e_pf *pf = i40e_netdev_to_pf(dev); struct i40e_dcbx_config *dcbxcfg; struct i40e_hw *hw = &pf->hw; int i; if (!(pf->dcbx_cap & DCB_CAP_DCBX_VER_IEEE)) return -EINVAL; dcbxcfg = &hw->local_dcbx_config; pfc->pfc_cap = dcbxcfg->pfc.pfccap; pfc->pfc_en = dcbxcfg->pfc.pfcenable; pfc->mbc = dcbxcfg->pfc.mbc; i40e_get_pfc_delay(hw, &pfc->delay); /* Get Requests/Indications */ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { pfc->requests[i] = pf->stats.priority_xoff_tx[i]; pfc->indications[i] = pf->stats.priority_xoff_rx[i]; } return 0; } /** * i40e_dcbnl_ieee_setets - set IEEE ETS configuration * @netdev: the corresponding netdev * @ets: structure to hold the ETS information * * Set IEEE ETS configuration **/ static int i40e_dcbnl_ieee_setets(struct net_device *netdev, struct ieee_ets *ets) { struct i40e_pf *pf = i40e_netdev_to_pf(netdev); struct i40e_dcbx_config *old_cfg; int i, ret; if (!(pf->dcbx_cap & DCB_CAP_DCBX_VER_IEEE) || (pf->dcbx_cap & DCB_CAP_DCBX_LLD_MANAGED)) return -EINVAL; old_cfg = &pf->hw.local_dcbx_config; /* Copy current config into temp */ pf->tmp_cfg = *old_cfg; /* Update the ETS configuration for temp */ pf->tmp_cfg.etscfg.willing = ets->willing; pf->tmp_cfg.etscfg.maxtcs = I40E_MAX_TRAFFIC_CLASS; pf->tmp_cfg.etscfg.cbs = ets->cbs; for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { pf->tmp_cfg.etscfg.tcbwtable[i] = ets->tc_tx_bw[i]; pf->tmp_cfg.etscfg.tsatable[i] = ets->tc_tsa[i]; pf->tmp_cfg.etscfg.prioritytable[i] = ets->prio_tc[i]; pf->tmp_cfg.etsrec.tcbwtable[i] = ets->tc_reco_bw[i]; pf->tmp_cfg.etsrec.tsatable[i] = ets->tc_reco_tsa[i]; pf->tmp_cfg.etsrec.prioritytable[i] = ets->reco_prio_tc[i]; } /* Commit changes to HW */ ret = i40e_hw_dcb_config(pf, &pf->tmp_cfg); if (ret) { dev_info(&pf->pdev->dev, "Failed setting DCB ETS configuration err %pe aq_err %s\n", ERR_PTR(ret), i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); return -EINVAL; } return 0; } /** * i40e_dcbnl_ieee_setpfc - set local IEEE PFC configuration * @netdev: the corresponding netdev * @pfc: structure to hold the PFC information * * Sets local IEEE PFC configuration **/ static int i40e_dcbnl_ieee_setpfc(struct net_device *netdev, struct ieee_pfc *pfc) { struct i40e_pf *pf = i40e_netdev_to_pf(netdev); struct i40e_dcbx_config *old_cfg; int ret; if (!(pf->dcbx_cap & DCB_CAP_DCBX_VER_IEEE) || (pf->dcbx_cap & DCB_CAP_DCBX_LLD_MANAGED)) return -EINVAL; old_cfg = &pf->hw.local_dcbx_config; /* Copy current config into temp */ pf->tmp_cfg = *old_cfg; if (pfc->pfc_cap) pf->tmp_cfg.pfc.pfccap = pfc->pfc_cap; else pf->tmp_cfg.pfc.pfccap = I40E_MAX_TRAFFIC_CLASS; pf->tmp_cfg.pfc.pfcenable = pfc->pfc_en; ret = i40e_hw_dcb_config(pf, &pf->tmp_cfg); if (ret) { dev_info(&pf->pdev->dev, "Failed setting DCB PFC configuration err %pe aq_err %s\n", ERR_PTR(ret), i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); return -EINVAL; } return 0; } /** * i40e_dcbnl_ieee_setapp - set local IEEE App configuration * @netdev: the corresponding netdev * @app: structure to hold the Application information * * Sets local IEEE App configuration **/ static int i40e_dcbnl_ieee_setapp(struct net_device *netdev, struct dcb_app *app) { struct i40e_pf *pf = i40e_netdev_to_pf(netdev); struct i40e_dcb_app_priority_table new_app; struct i40e_dcbx_config *old_cfg; int ret; if (!(pf->dcbx_cap & DCB_CAP_DCBX_VER_IEEE) || (pf->dcbx_cap & DCB_CAP_DCBX_LLD_MANAGED)) return -EINVAL; old_cfg = &pf->hw.local_dcbx_config; if (old_cfg->numapps == I40E_DCBX_MAX_APPS) return -EINVAL; ret = dcb_ieee_setapp(netdev, app); if (ret) return ret; new_app.selector = app->selector; new_app.protocolid = app->protocol; new_app.priority = app->priority; /* Already internally available */ if (i40e_dcbnl_find_app(old_cfg, &new_app)) return 0; /* Copy current config into temp */ pf->tmp_cfg = *old_cfg; /* Add the app */ pf->tmp_cfg.app[pf->tmp_cfg.numapps++] = new_app; ret = i40e_hw_dcb_config(pf, &pf->tmp_cfg); if (ret) { dev_info(&pf->pdev->dev, "Failed setting DCB configuration err %pe aq_err %s\n", ERR_PTR(ret), i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); return -EINVAL; } return 0; } /** * i40e_dcbnl_ieee_delapp - delete local IEEE App configuration * @netdev: the corresponding netdev * @app: structure to hold the Application information * * Deletes local IEEE App configuration other than the first application * required by firmware **/ static int i40e_dcbnl_ieee_delapp(struct net_device *netdev, struct dcb_app *app) { struct i40e_pf *pf = i40e_netdev_to_pf(netdev); struct i40e_dcbx_config *old_cfg; int i, j, ret; if (!(pf->dcbx_cap & DCB_CAP_DCBX_VER_IEEE) || (pf->dcbx_cap & DCB_CAP_DCBX_LLD_MANAGED)) return -EINVAL; ret = dcb_ieee_delapp(netdev, app); if (ret) return ret; old_cfg = &pf->hw.local_dcbx_config; /* Need one app for FW so keep it */ if (old_cfg->numapps == 1) return 0; /* Copy current config into temp */ pf->tmp_cfg = *old_cfg; /* Find and reset the app */ for (i = 1; i < pf->tmp_cfg.numapps; i++) { if (app->selector == pf->tmp_cfg.app[i].selector && app->protocol == pf->tmp_cfg.app[i].protocolid && app->priority == pf->tmp_cfg.app[i].priority) { /* Reset the app data */ pf->tmp_cfg.app[i].selector = 0; pf->tmp_cfg.app[i].protocolid = 0; pf->tmp_cfg.app[i].priority = 0; break; } } /* If the specific DCB app not found */ if (i == pf->tmp_cfg.numapps) return -EINVAL; pf->tmp_cfg.numapps--; /* Overwrite the tmp_cfg app */ for (j = i; j < pf->tmp_cfg.numapps; j++) pf->tmp_cfg.app[j] = old_cfg->app[j + 1]; ret = i40e_hw_dcb_config(pf, &pf->tmp_cfg); if (ret) { dev_info(&pf->pdev->dev, "Failed setting DCB configuration err %pe aq_err %s\n", ERR_PTR(ret), i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); return -EINVAL; } return 0; } /** * i40e_dcbnl_getstate - Get DCB enabled state * @netdev: the corresponding netdev * * Get the current DCB enabled state **/ static u8 i40e_dcbnl_getstate(struct net_device *netdev) { struct i40e_pf *pf = i40e_netdev_to_pf(netdev); dev_dbg(&pf->pdev->dev, "DCB state=%d\n", !!(pf->flags & I40E_FLAG_DCB_ENABLED)); return !!(pf->flags & I40E_FLAG_DCB_ENABLED); } /** * i40e_dcbnl_setstate - Set DCB state * @netdev: the corresponding netdev * @state: enable or disable * * Set the DCB state **/ static u8 i40e_dcbnl_setstate(struct net_device *netdev, u8 state) { struct i40e_pf *pf = i40e_netdev_to_pf(netdev); int ret = I40E_DCBNL_STATUS_SUCCESS; if (!(pf->dcbx_cap & DCB_CAP_DCBX_VER_CEE) || (pf->dcbx_cap & DCB_CAP_DCBX_LLD_MANAGED)) return ret; dev_dbg(&pf->pdev->dev, "new state=%d current state=%d\n", state, (pf->flags & I40E_FLAG_DCB_ENABLED) ? 1 : 0); /* Nothing to do */ if (!state == !(pf->flags & I40E_FLAG_DCB_ENABLED)) return ret; if (i40e_is_sw_dcb(pf)) { if (state) { pf->flags |= I40E_FLAG_DCB_ENABLED; memcpy(&pf->hw.desired_dcbx_config, &pf->hw.local_dcbx_config, sizeof(struct i40e_dcbx_config)); } else { pf->flags &= ~I40E_FLAG_DCB_ENABLED; } } else { /* Cannot directly manipulate FW LLDP Agent */ ret = I40E_DCBNL_STATUS_ERROR; } return ret; } /** * i40e_dcbnl_set_pg_tc_cfg_tx - Set CEE PG Tx config * @netdev: the corresponding netdev * @tc: the corresponding traffic class * @prio_type: the traffic priority type * @bwg_id: the BW group id the traffic class belongs to * @bw_pct: the BW percentage for the corresponding BWG * @up_map: prio mapped to corresponding tc * * Set Tx PG settings for CEE mode **/ static void i40e_dcbnl_set_pg_tc_cfg_tx(struct net_device *netdev, int tc, u8 prio_type, u8 bwg_id, u8 bw_pct, u8 up_map) { struct i40e_pf *pf = i40e_netdev_to_pf(netdev); int i; if (!(pf->dcbx_cap & DCB_CAP_DCBX_VER_CEE) || (pf->dcbx_cap & DCB_CAP_DCBX_LLD_MANAGED)) return; /* LLTC not supported yet */ if (tc >= I40E_MAX_TRAFFIC_CLASS) return; /* prio_type, bwg_id and bw_pct per UP are not supported */ /* Use only up_map to map tc */ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { if (up_map & BIT(i)) pf->tmp_cfg.etscfg.prioritytable[i] = tc; } pf->tmp_cfg.etscfg.tsatable[tc] = I40E_IEEE_TSA_ETS; dev_dbg(&pf->pdev->dev, "Set PG config tc=%d bwg_id=%d prio_type=%d bw_pct=%d up_map=%d\n", tc, bwg_id, prio_type, bw_pct, up_map); } /** * i40e_dcbnl_set_pg_bwg_cfg_tx - Set CEE PG Tx BW config * @netdev: the corresponding netdev * @pgid: the corresponding traffic class * @bw_pct: the BW percentage for the specified traffic class * * Set Tx BW settings for CEE mode **/ static void i40e_dcbnl_set_pg_bwg_cfg_tx(struct net_device *netdev, int pgid, u8 bw_pct) { struct i40e_pf *pf = i40e_netdev_to_pf(netdev); if (!(pf->dcbx_cap & DCB_CAP_DCBX_VER_CEE) || (pf->dcbx_cap & DCB_CAP_DCBX_LLD_MANAGED)) return; /* LLTC not supported yet */ if (pgid >= I40E_MAX_TRAFFIC_CLASS) return; pf->tmp_cfg.etscfg.tcbwtable[pgid] = bw_pct; dev_dbg(&pf->pdev->dev, "Set PG BW config tc=%d bw_pct=%d\n", pgid, bw_pct); } /** * i40e_dcbnl_set_pg_tc_cfg_rx - Set CEE PG Rx config * @netdev: the corresponding netdev * @prio: the corresponding traffic class * @prio_type: the traffic priority type * @pgid: the BW group id the traffic class belongs to * @bw_pct: the BW percentage for the corresponding BWG * @up_map: prio mapped to corresponding tc * * Set Rx BW settings for CEE mode. The hardware does not support this * so we won't allow setting of this parameter. **/ static void i40e_dcbnl_set_pg_tc_cfg_rx(struct net_device *netdev, int __always_unused prio, u8 __always_unused prio_type, u8 __always_unused pgid, u8 __always_unused bw_pct, u8 __always_unused up_map) { struct i40e_pf *pf = i40e_netdev_to_pf(netdev); dev_dbg(&pf->pdev->dev, "Rx TC PG Config Not Supported.\n"); } /** * i40e_dcbnl_set_pg_bwg_cfg_rx - Set CEE PG Rx config * @netdev: the corresponding netdev * @pgid: the corresponding traffic class * @bw_pct: the BW percentage for the specified traffic class * * Set Rx BW settings for CEE mode. The hardware does not support this * so we won't allow setting of this parameter. **/ static void i40e_dcbnl_set_pg_bwg_cfg_rx(struct net_device *netdev, int pgid, u8 bw_pct) { struct i40e_pf *pf = i40e_netdev_to_pf(netdev); dev_dbg(&pf->pdev->dev, "Rx BWG PG Config Not Supported.\n"); } /** * i40e_dcbnl_get_pg_tc_cfg_tx - Get CEE PG Tx config * @netdev: the corresponding netdev * @prio: the corresponding user priority * @prio_type: traffic priority type * @pgid: the BW group ID the traffic class belongs to * @bw_pct: BW percentage for the corresponding BWG * @up_map: prio mapped to corresponding TC * * Get Tx PG settings for CEE mode **/ static void i40e_dcbnl_get_pg_tc_cfg_tx(struct net_device *netdev, int prio, u8 __always_unused *prio_type, u8 *pgid, u8 __always_unused *bw_pct, u8 __always_unused *up_map) { struct i40e_pf *pf = i40e_netdev_to_pf(netdev); if (!(pf->dcbx_cap & DCB_CAP_DCBX_VER_CEE) || (pf->dcbx_cap & DCB_CAP_DCBX_LLD_MANAGED)) return; if (prio >= I40E_MAX_USER_PRIORITY) return; *pgid = pf->hw.local_dcbx_config.etscfg.prioritytable[prio]; dev_dbg(&pf->pdev->dev, "Get PG config prio=%d tc=%d\n", prio, *pgid); } /** * i40e_dcbnl_get_pg_bwg_cfg_tx - Get CEE PG BW config * @netdev: the corresponding netdev * @pgid: the corresponding traffic class * @bw_pct: the BW percentage for the corresponding TC * * Get Tx BW settings for given TC in CEE mode **/ static void i40e_dcbnl_get_pg_bwg_cfg_tx(struct net_device *netdev, int pgid, u8 *bw_pct) { struct i40e_pf *pf = i40e_netdev_to_pf(netdev); if (!(pf->dcbx_cap & DCB_CAP_DCBX_VER_CEE) || (pf->dcbx_cap & DCB_CAP_DCBX_LLD_MANAGED)) return; if (pgid >= I40E_MAX_TRAFFIC_CLASS) return; *bw_pct = pf->hw.local_dcbx_config.etscfg.tcbwtable[pgid]; dev_dbg(&pf->pdev->dev, "Get PG BW config tc=%d bw_pct=%d\n", pgid, *bw_pct); } /** * i40e_dcbnl_get_pg_tc_cfg_rx - Get CEE PG Rx config * @netdev: the corresponding netdev * @prio: the corresponding user priority * @prio_type: the traffic priority type * @pgid: the PG ID * @bw_pct: the BW percentage for the corresponding BWG * @up_map: prio mapped to corresponding TC * * Get Rx PG settings for CEE mode. The UP2TC map is applied in same * manner for Tx and Rx (symmetrical) so return the TC information for * given priority accordingly. **/ static void i40e_dcbnl_get_pg_tc_cfg_rx(struct net_device *netdev, int prio, u8 *prio_type, u8 *pgid, u8 *bw_pct, u8 *up_map) { struct i40e_pf *pf = i40e_netdev_to_pf(netdev); if (!(pf->dcbx_cap & DCB_CAP_DCBX_VER_CEE) || (pf->dcbx_cap & DCB_CAP_DCBX_LLD_MANAGED)) return; if (prio >= I40E_MAX_USER_PRIORITY) return; *pgid = pf->hw.local_dcbx_config.etscfg.prioritytable[prio]; } /** * i40e_dcbnl_get_pg_bwg_cfg_rx - Get CEE PG BW Rx config * @netdev: the corresponding netdev * @pgid: the corresponding traffic class * @bw_pct: the BW percentage for the corresponding TC * * Get Rx BW settings for given TC in CEE mode * The adapter doesn't support Rx ETS and runs in strict priority * mode in Rx path and hence just return 0. **/ static void i40e_dcbnl_get_pg_bwg_cfg_rx(struct net_device *netdev, int pgid, u8 *bw_pct) { struct i40e_pf *pf = i40e_netdev_to_pf(netdev); if (!(pf->dcbx_cap & DCB_CAP_DCBX_VER_CEE) || (pf->dcbx_cap & DCB_CAP_DCBX_LLD_MANAGED)) return; *bw_pct = 0; } /** * i40e_dcbnl_set_pfc_cfg - Set CEE PFC configuration * @netdev: the corresponding netdev * @prio: the corresponding user priority * @setting: the PFC setting for given priority * * Set the PFC enabled/disabled setting for given user priority **/ static void i40e_dcbnl_set_pfc_cfg(struct net_device *netdev, int prio, u8 setting) { struct i40e_pf *pf = i40e_netdev_to_pf(netdev); if (!(pf->dcbx_cap & DCB_CAP_DCBX_VER_CEE) || (pf->dcbx_cap & DCB_CAP_DCBX_LLD_MANAGED)) return; if (prio >= I40E_MAX_USER_PRIORITY) return; pf->tmp_cfg.pfc.pfccap = I40E_MAX_TRAFFIC_CLASS; if (setting) pf->tmp_cfg.pfc.pfcenable |= BIT(prio); else pf->tmp_cfg.pfc.pfcenable &= ~BIT(prio); dev_dbg(&pf->pdev->dev, "Set PFC Config up=%d setting=%d pfcenable=0x%x\n", prio, setting, pf->tmp_cfg.pfc.pfcenable); } /** * i40e_dcbnl_get_pfc_cfg - Get CEE PFC configuration * @netdev: the corresponding netdev * @prio: the corresponding user priority * @setting: the PFC setting for given priority * * Get the PFC enabled/disabled setting for given user priority **/ static void i40e_dcbnl_get_pfc_cfg(struct net_device *netdev, int prio, u8 *setting) { struct i40e_pf *pf = i40e_netdev_to_pf(netdev); if (!(pf->dcbx_cap & DCB_CAP_DCBX_VER_CEE) || (pf->dcbx_cap & DCB_CAP_DCBX_LLD_MANAGED)) return; if (prio >= I40E_MAX_USER_PRIORITY) return; *setting = (pf->hw.local_dcbx_config.pfc.pfcenable >> prio) & 0x1; dev_dbg(&pf->pdev->dev, "Get PFC Config up=%d setting=%d pfcenable=0x%x\n", prio, *setting, pf->hw.local_dcbx_config.pfc.pfcenable); } /** * i40e_dcbnl_cee_set_all - Commit CEE DCB settings to hardware * @netdev: the corresponding netdev * * Commit the current DCB configuration to hardware **/ static u8 i40e_dcbnl_cee_set_all(struct net_device *netdev) { struct i40e_pf *pf = i40e_netdev_to_pf(netdev); int err; if (!(pf->dcbx_cap & DCB_CAP_DCBX_VER_CEE) || (pf->dcbx_cap & DCB_CAP_DCBX_LLD_MANAGED)) return I40E_DCBNL_STATUS_ERROR; dev_dbg(&pf->pdev->dev, "Commit DCB Configuration to the hardware\n"); err = i40e_hw_dcb_config(pf, &pf->tmp_cfg); return err ? I40E_DCBNL_STATUS_ERROR : I40E_DCBNL_STATUS_SUCCESS; } /** * i40e_dcbnl_get_cap - Get DCBX capabilities of adapter * @netdev: the corresponding netdev * @capid: the capability type * @cap: the capability value * * Return the capability value for a given capability type **/ static u8 i40e_dcbnl_get_cap(struct net_device *netdev, int capid, u8 *cap) { struct i40e_pf *pf = i40e_netdev_to_pf(netdev); if (!(pf->flags & I40E_FLAG_DCB_CAPABLE)) return I40E_DCBNL_STATUS_ERROR; switch (capid) { case DCB_CAP_ATTR_PG: case DCB_CAP_ATTR_PFC: *cap = true; break; case DCB_CAP_ATTR_PG_TCS: case DCB_CAP_ATTR_PFC_TCS: *cap = 0x80; break; case DCB_CAP_ATTR_DCBX: *cap = pf->dcbx_cap; break; case DCB_CAP_ATTR_UP2TC: case DCB_CAP_ATTR_GSP: case DCB_CAP_ATTR_BCN: default: *cap = false; break; } dev_dbg(&pf->pdev->dev, "Get Capability cap=%d capval=0x%x\n", capid, *cap); return I40E_DCBNL_STATUS_SUCCESS; } /** * i40e_dcbnl_getnumtcs - Get max number of traffic classes supported * @netdev: the corresponding netdev * @tcid: the TC id * @num: total number of TCs supported by the device * * Return the total number of TCs supported by the adapter **/ static int i40e_dcbnl_getnumtcs(struct net_device *netdev, int tcid, u8 *num) { struct i40e_pf *pf = i40e_netdev_to_pf(netdev); if (!(pf->flags & I40E_FLAG_DCB_CAPABLE)) return -EINVAL; *num = I40E_MAX_TRAFFIC_CLASS; return 0; } /** * i40e_dcbnl_setnumtcs - Set CEE number of traffic classes * @netdev: the corresponding netdev * @tcid: the TC id * @num: total number of TCs * * Set the total number of TCs (Unsupported) **/ static int i40e_dcbnl_setnumtcs(struct net_device *netdev, int tcid, u8 num) { return -EINVAL; } /** * i40e_dcbnl_getpfcstate - Get CEE PFC mode * @netdev: the corresponding netdev * * Get the current PFC enabled state **/ static u8 i40e_dcbnl_getpfcstate(struct net_device *netdev) { struct i40e_pf *pf = i40e_netdev_to_pf(netdev); /* Return enabled if any PFC enabled UP */ if (pf->hw.local_dcbx_config.pfc.pfcenable) return 1; else return 0; } /** * i40e_dcbnl_setpfcstate - Set CEE PFC mode * @netdev: the corresponding netdev * @state: required state * * The PFC state to be set; this is enabled/disabled based on the PFC * priority settings and not via this call for i40e driver **/ static void i40e_dcbnl_setpfcstate(struct net_device *netdev, u8 state) { struct i40e_pf *pf = i40e_netdev_to_pf(netdev); dev_dbg(&pf->pdev->dev, "PFC State is modified via PFC config.\n"); } /** * i40e_dcbnl_getapp - Get CEE APP * @netdev: the corresponding netdev * @idtype: the App selector * @id: the App ethtype or port number * * Return the CEE mode app for the given idtype and id **/ static int i40e_dcbnl_getapp(struct net_device *netdev, u8 idtype, u16 id) { struct i40e_pf *pf = i40e_netdev_to_pf(netdev); struct dcb_app app = { .selector = idtype, .protocol = id, }; if (!(pf->dcbx_cap & DCB_CAP_DCBX_VER_CEE) || (pf->dcbx_cap & DCB_CAP_DCBX_LLD_MANAGED)) return -EINVAL; return dcb_getapp(netdev, &app); } /** * i40e_dcbnl_setdcbx - set required DCBx capability * @netdev: the corresponding netdev * @mode: new DCB mode managed or CEE+IEEE * * Set DCBx capability features **/ static u8 i40e_dcbnl_setdcbx(struct net_device *netdev, u8 mode) { struct i40e_pf *pf = i40e_netdev_to_pf(netdev); /* Do not allow to set mode if managed by Firmware */ if (pf->dcbx_cap & DCB_CAP_DCBX_LLD_MANAGED) return I40E_DCBNL_STATUS_ERROR; /* No support for LLD_MANAGED modes or CEE+IEEE */ if ((mode & DCB_CAP_DCBX_LLD_MANAGED) || ((mode & DCB_CAP_DCBX_VER_IEEE) && (mode & DCB_CAP_DCBX_VER_CEE)) || !(mode & DCB_CAP_DCBX_HOST)) return I40E_DCBNL_STATUS_ERROR; /* Already set to the given mode no change */ if (mode == pf->dcbx_cap) return I40E_DCBNL_STATUS_SUCCESS; pf->dcbx_cap = mode; if (mode & DCB_CAP_DCBX_VER_CEE) pf->hw.local_dcbx_config.dcbx_mode = I40E_DCBX_MODE_CEE; else pf->hw.local_dcbx_config.dcbx_mode = I40E_DCBX_MODE_IEEE; dev_dbg(&pf->pdev->dev, "mode=%d\n", mode); return I40E_DCBNL_STATUS_SUCCESS; } /** * i40e_dcbnl_getdcbx - retrieve current DCBx capability * @dev: the corresponding netdev * * Returns DCBx capability features **/ static u8 i40e_dcbnl_getdcbx(struct net_device *dev) { struct i40e_pf *pf = i40e_netdev_to_pf(dev); return pf->dcbx_cap; } /** * i40e_dcbnl_get_perm_hw_addr - MAC address used by DCBx * @dev: the corresponding netdev * @perm_addr: buffer to store the MAC address * * Returns the SAN MAC address used for LLDP exchange **/ static void i40e_dcbnl_get_perm_hw_addr(struct net_device *dev, u8 *perm_addr) { struct i40e_pf *pf = i40e_netdev_to_pf(dev); int i, j; memset(perm_addr, 0xff, MAX_ADDR_LEN); for (i = 0; i < dev->addr_len; i++) perm_addr[i] = pf->hw.mac.perm_addr[i]; for (j = 0; j < dev->addr_len; j++, i++) perm_addr[i] = pf->hw.mac.san_addr[j]; } static const struct dcbnl_rtnl_ops dcbnl_ops = { .ieee_getets = i40e_dcbnl_ieee_getets, .ieee_getpfc = i40e_dcbnl_ieee_getpfc, .getdcbx = i40e_dcbnl_getdcbx, .getpermhwaddr = i40e_dcbnl_get_perm_hw_addr, .ieee_setets = i40e_dcbnl_ieee_setets, .ieee_setpfc = i40e_dcbnl_ieee_setpfc, .ieee_setapp = i40e_dcbnl_ieee_setapp, .ieee_delapp = i40e_dcbnl_ieee_delapp, .getstate = i40e_dcbnl_getstate, .setstate = i40e_dcbnl_setstate, .setpgtccfgtx = i40e_dcbnl_set_pg_tc_cfg_tx, .setpgbwgcfgtx = i40e_dcbnl_set_pg_bwg_cfg_tx, .setpgtccfgrx = i40e_dcbnl_set_pg_tc_cfg_rx, .setpgbwgcfgrx = i40e_dcbnl_set_pg_bwg_cfg_rx, .getpgtccfgtx = i40e_dcbnl_get_pg_tc_cfg_tx, .getpgbwgcfgtx = i40e_dcbnl_get_pg_bwg_cfg_tx, .getpgtccfgrx = i40e_dcbnl_get_pg_tc_cfg_rx, .getpgbwgcfgrx = i40e_dcbnl_get_pg_bwg_cfg_rx, .setpfccfg = i40e_dcbnl_set_pfc_cfg, .getpfccfg = i40e_dcbnl_get_pfc_cfg, .setall = i40e_dcbnl_cee_set_all, .getcap = i40e_dcbnl_get_cap, .getnumtcs = i40e_dcbnl_getnumtcs, .setnumtcs = i40e_dcbnl_setnumtcs, .getpfcstate = i40e_dcbnl_getpfcstate, .setpfcstate = i40e_dcbnl_setpfcstate, .getapp = i40e_dcbnl_getapp, .setdcbx = i40e_dcbnl_setdcbx, }; /** * i40e_dcbnl_set_all - set all the apps and ieee data from DCBx config * @vsi: the corresponding vsi * * Set up all the IEEE APPs in the DCBNL App Table and generate event for * other settings **/ void i40e_dcbnl_set_all(struct i40e_vsi *vsi) { struct net_device *dev = vsi->netdev; struct i40e_pf *pf = i40e_netdev_to_pf(dev); struct i40e_dcbx_config *dcbxcfg; struct i40e_hw *hw = &pf->hw; struct dcb_app sapp; u8 prio, tc_map; int i; /* SW DCB taken care by DCBNL set calls */ if (pf->dcbx_cap & DCB_CAP_DCBX_HOST) return; /* DCB not enabled */ if (!(pf->flags & I40E_FLAG_DCB_ENABLED)) return; /* MFP mode but not an iSCSI PF so return */ if ((pf->flags & I40E_FLAG_MFP_ENABLED) && !(hw->func_caps.iscsi)) return; dcbxcfg = &hw->local_dcbx_config; /* Set up all the App TLVs if DCBx is negotiated */ for (i = 0; i < dcbxcfg->numapps; i++) { prio = dcbxcfg->app[i].priority; tc_map = BIT(dcbxcfg->etscfg.prioritytable[prio]); /* Add APP only if the TC is enabled for this VSI */ if (tc_map & vsi->tc_config.enabled_tc) { sapp.selector = dcbxcfg->app[i].selector; sapp.protocol = dcbxcfg->app[i].protocolid; sapp.priority = prio; dcb_ieee_setapp(dev, &sapp); } } /* Notify user-space of the changes */ dcbnl_ieee_notify(dev, RTM_SETDCB, DCB_CMD_IEEE_SET, 0, 0); } /** * i40e_dcbnl_vsi_del_app - Delete APP for given VSI * @vsi: the corresponding vsi * @app: APP to delete * * Delete given APP from the DCBNL APP table for given * VSI **/ static int i40e_dcbnl_vsi_del_app(struct i40e_vsi *vsi, struct i40e_dcb_app_priority_table *app) { struct net_device *dev = vsi->netdev; struct dcb_app sapp; if (!dev) return -EINVAL; sapp.selector = app->selector; sapp.protocol = app->protocolid; sapp.priority = app->priority; return dcb_ieee_delapp(dev, &sapp); } /** * i40e_dcbnl_del_app - Delete APP on all VSIs * @pf: the corresponding PF * @app: APP to delete * * Delete given APP from all the VSIs for given PF **/ static void i40e_dcbnl_del_app(struct i40e_pf *pf, struct i40e_dcb_app_priority_table *app) { int v, err; for (v = 0; v < pf->num_alloc_vsi; v++) { if (pf->vsi[v] && pf->vsi[v]->netdev) { err = i40e_dcbnl_vsi_del_app(pf->vsi[v], app); dev_dbg(&pf->pdev->dev, "Deleting app for VSI seid=%d err=%d sel=%d proto=0x%x prio=%d\n", pf->vsi[v]->seid, err, app->selector, app->protocolid, app->priority); } } } /** * i40e_dcbnl_find_app - Search APP in given DCB config * @cfg: DCBX configuration data * @app: APP to search for * * Find given APP in the DCB configuration **/ static bool i40e_dcbnl_find_app(struct i40e_dcbx_config *cfg, struct i40e_dcb_app_priority_table *app) { int i; for (i = 0; i < cfg->numapps; i++) { if (app->selector == cfg->app[i].selector && app->protocolid == cfg->app[i].protocolid && app->priority == cfg->app[i].priority) return true; } return false; } /** * i40e_dcbnl_flush_apps - Delete all removed APPs * @pf: the corresponding PF * @old_cfg: old DCBX configuration data * @new_cfg: new DCBX configuration data * * Find and delete all APPs that are not present in the passed * DCB configuration **/ void i40e_dcbnl_flush_apps(struct i40e_pf *pf, struct i40e_dcbx_config *old_cfg, struct i40e_dcbx_config *new_cfg) { struct i40e_dcb_app_priority_table app; int i; /* MFP mode but not an iSCSI PF so return */ if ((pf->flags & I40E_FLAG_MFP_ENABLED) && !(pf->hw.func_caps.iscsi)) return; for (i = 0; i < old_cfg->numapps; i++) { app = old_cfg->app[i]; /* The APP is not available anymore delete it */ if (!i40e_dcbnl_find_app(new_cfg, &app)) i40e_dcbnl_del_app(pf, &app); } } /** * i40e_dcbnl_setup - DCBNL setup * @vsi: the corresponding vsi * * Set up DCBNL ops and initial APP TLVs **/ void i40e_dcbnl_setup(struct i40e_vsi *vsi) { struct net_device *dev = vsi->netdev; struct i40e_pf *pf = i40e_netdev_to_pf(dev); /* Not DCB capable */ if (!(pf->flags & I40E_FLAG_DCB_CAPABLE)) return; dev->dcbnl_ops = &dcbnl_ops; /* Set initial IEEE DCB settings */ i40e_dcbnl_set_all(vsi); } #endif /* CONFIG_I40E_DCB */
linux-master
drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c
// SPDX-License-Identifier: GPL-2.0 /* Copyright(c) 2013 - 2021 Intel Corporation. */ #include <linux/etherdevice.h> #include <linux/of_net.h> #include <linux/pci.h> #include <linux/bpf.h> #include <generated/utsrelease.h> #include <linux/crash_dump.h> /* Local includes */ #include "i40e.h" #include "i40e_diag.h" #include "i40e_xsk.h" #include <net/udp_tunnel.h> #include <net/xdp_sock_drv.h> /* All i40e tracepoints are defined by the include below, which * must be included exactly once across the whole kernel with * CREATE_TRACE_POINTS defined */ #define CREATE_TRACE_POINTS #include "i40e_trace.h" const char i40e_driver_name[] = "i40e"; static const char i40e_driver_string[] = "Intel(R) Ethernet Connection XL710 Network Driver"; static const char i40e_copyright[] = "Copyright (c) 2013 - 2019 Intel Corporation."; /* a bit of forward declarations */ static void i40e_vsi_reinit_locked(struct i40e_vsi *vsi); static void i40e_handle_reset_warning(struct i40e_pf *pf, bool lock_acquired); static int i40e_add_vsi(struct i40e_vsi *vsi); static int i40e_add_veb(struct i40e_veb *veb, struct i40e_vsi *vsi); static int i40e_setup_pf_switch(struct i40e_pf *pf, bool reinit, bool lock_acquired); static int i40e_setup_misc_vector(struct i40e_pf *pf); static void i40e_determine_queue_usage(struct i40e_pf *pf); static int i40e_setup_pf_filter_control(struct i40e_pf *pf); static void i40e_prep_for_reset(struct i40e_pf *pf); static void i40e_reset_and_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired); static int i40e_reset(struct i40e_pf *pf); static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired); static int i40e_setup_misc_vector_for_recovery_mode(struct i40e_pf *pf); static int i40e_restore_interrupt_scheme(struct i40e_pf *pf); static bool i40e_check_recovery_mode(struct i40e_pf *pf); static int i40e_init_recovery_mode(struct i40e_pf *pf, struct i40e_hw *hw); static void i40e_fdir_sb_setup(struct i40e_pf *pf); static int i40e_veb_get_bw_info(struct i40e_veb *veb); static int i40e_get_capabilities(struct i40e_pf *pf, enum i40e_admin_queue_opc list_type); static bool i40e_is_total_port_shutdown_enabled(struct i40e_pf *pf); /* i40e_pci_tbl - PCI Device ID Table * * Last entry must be all 0s * * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, * Class, Class Mask, private data (not used) } */ static const struct pci_device_id i40e_pci_tbl[] = { {PCI_VDEVICE(INTEL, I40E_DEV_ID_SFP_XL710), 0}, {PCI_VDEVICE(INTEL, I40E_DEV_ID_QEMU), 0}, {PCI_VDEVICE(INTEL, I40E_DEV_ID_KX_B), 0}, {PCI_VDEVICE(INTEL, I40E_DEV_ID_KX_C), 0}, {PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_A), 0}, {PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_B), 0}, {PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_C), 0}, {PCI_VDEVICE(INTEL, I40E_DEV_ID_1G_BASE_T_BC), 0}, {PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_BASE_T), 0}, {PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_BASE_T4), 0}, {PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_BASE_T_BC), 0}, {PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_SFP), 0}, {PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_B), 0}, {PCI_VDEVICE(INTEL, I40E_DEV_ID_KX_X722), 0}, {PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_X722), 0}, {PCI_VDEVICE(INTEL, I40E_DEV_ID_SFP_X722), 0}, {PCI_VDEVICE(INTEL, I40E_DEV_ID_1G_BASE_T_X722), 0}, {PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_BASE_T_X722), 0}, {PCI_VDEVICE(INTEL, I40E_DEV_ID_SFP_I_X722), 0}, {PCI_VDEVICE(INTEL, I40E_DEV_ID_SFP_X722_A), 0}, {PCI_VDEVICE(INTEL, I40E_DEV_ID_20G_KR2), 0}, {PCI_VDEVICE(INTEL, I40E_DEV_ID_20G_KR2_A), 0}, {PCI_VDEVICE(INTEL, I40E_DEV_ID_X710_N3000), 0}, {PCI_VDEVICE(INTEL, I40E_DEV_ID_XXV710_N3000), 0}, {PCI_VDEVICE(INTEL, I40E_DEV_ID_25G_B), 0}, {PCI_VDEVICE(INTEL, I40E_DEV_ID_25G_SFP28), 0}, /* required last entry */ {0, } }; MODULE_DEVICE_TABLE(pci, i40e_pci_tbl); #define I40E_MAX_VF_COUNT 128 static int debug = -1; module_param(debug, uint, 0); MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all), Debug mask (0x8XXXXXXX)"); MODULE_AUTHOR("Intel Corporation, <[email protected]>"); MODULE_DESCRIPTION("Intel(R) Ethernet Connection XL710 Network Driver"); MODULE_LICENSE("GPL v2"); static struct workqueue_struct *i40e_wq; static void netdev_hw_addr_refcnt(struct i40e_mac_filter *f, struct net_device *netdev, int delta) { struct netdev_hw_addr *ha; if (!f || !netdev) return; netdev_for_each_mc_addr(ha, netdev) { if (ether_addr_equal(ha->addr, f->macaddr)) { ha->refcount += delta; if (ha->refcount <= 0) ha->refcount = 1; break; } } } /** * i40e_allocate_dma_mem_d - OS specific memory alloc for shared code * @hw: pointer to the HW structure * @mem: ptr to mem struct to fill out * @size: size of memory requested * @alignment: what to align the allocation to **/ int i40e_allocate_dma_mem_d(struct i40e_hw *hw, struct i40e_dma_mem *mem, u64 size, u32 alignment) { struct i40e_pf *pf = (struct i40e_pf *)hw->back; mem->size = ALIGN(size, alignment); mem->va = dma_alloc_coherent(&pf->pdev->dev, mem->size, &mem->pa, GFP_KERNEL); if (!mem->va) return -ENOMEM; return 0; } /** * i40e_free_dma_mem_d - OS specific memory free for shared code * @hw: pointer to the HW structure * @mem: ptr to mem struct to free **/ int i40e_free_dma_mem_d(struct i40e_hw *hw, struct i40e_dma_mem *mem) { struct i40e_pf *pf = (struct i40e_pf *)hw->back; dma_free_coherent(&pf->pdev->dev, mem->size, mem->va, mem->pa); mem->va = NULL; mem->pa = 0; mem->size = 0; return 0; } /** * i40e_allocate_virt_mem_d - OS specific memory alloc for shared code * @hw: pointer to the HW structure * @mem: ptr to mem struct to fill out * @size: size of memory requested **/ int i40e_allocate_virt_mem_d(struct i40e_hw *hw, struct i40e_virt_mem *mem, u32 size) { mem->size = size; mem->va = kzalloc(size, GFP_KERNEL); if (!mem->va) return -ENOMEM; return 0; } /** * i40e_free_virt_mem_d - OS specific memory free for shared code * @hw: pointer to the HW structure * @mem: ptr to mem struct to free **/ int i40e_free_virt_mem_d(struct i40e_hw *hw, struct i40e_virt_mem *mem) { /* it's ok to kfree a NULL pointer */ kfree(mem->va); mem->va = NULL; mem->size = 0; return 0; } /** * i40e_get_lump - find a lump of free generic resource * @pf: board private structure * @pile: the pile of resource to search * @needed: the number of items needed * @id: an owner id to stick on the items assigned * * Returns the base item index of the lump, or negative for error **/ static int i40e_get_lump(struct i40e_pf *pf, struct i40e_lump_tracking *pile, u16 needed, u16 id) { int ret = -ENOMEM; int i, j; if (!pile || needed == 0 || id >= I40E_PILE_VALID_BIT) { dev_info(&pf->pdev->dev, "param err: pile=%s needed=%d id=0x%04x\n", pile ? "<valid>" : "<null>", needed, id); return -EINVAL; } /* Allocate last queue in the pile for FDIR VSI queue * so it doesn't fragment the qp_pile */ if (pile == pf->qp_pile && pf->vsi[id]->type == I40E_VSI_FDIR) { if (pile->list[pile->num_entries - 1] & I40E_PILE_VALID_BIT) { dev_err(&pf->pdev->dev, "Cannot allocate queue %d for I40E_VSI_FDIR\n", pile->num_entries - 1); return -ENOMEM; } pile->list[pile->num_entries - 1] = id | I40E_PILE_VALID_BIT; return pile->num_entries - 1; } i = 0; while (i < pile->num_entries) { /* skip already allocated entries */ if (pile->list[i] & I40E_PILE_VALID_BIT) { i++; continue; } /* do we have enough in this lump? */ for (j = 0; (j < needed) && ((i+j) < pile->num_entries); j++) { if (pile->list[i+j] & I40E_PILE_VALID_BIT) break; } if (j == needed) { /* there was enough, so assign it to the requestor */ for (j = 0; j < needed; j++) pile->list[i+j] = id | I40E_PILE_VALID_BIT; ret = i; break; } /* not enough, so skip over it and continue looking */ i += j; } return ret; } /** * i40e_put_lump - return a lump of generic resource * @pile: the pile of resource to search * @index: the base item index * @id: the owner id of the items assigned * * Returns the count of items in the lump **/ static int i40e_put_lump(struct i40e_lump_tracking *pile, u16 index, u16 id) { int valid_id = (id | I40E_PILE_VALID_BIT); int count = 0; u16 i; if (!pile || index >= pile->num_entries) return -EINVAL; for (i = index; i < pile->num_entries && pile->list[i] == valid_id; i++) { pile->list[i] = 0; count++; } return count; } /** * i40e_find_vsi_from_id - searches for the vsi with the given id * @pf: the pf structure to search for the vsi * @id: id of the vsi it is searching for **/ struct i40e_vsi *i40e_find_vsi_from_id(struct i40e_pf *pf, u16 id) { int i; for (i = 0; i < pf->num_alloc_vsi; i++) if (pf->vsi[i] && (pf->vsi[i]->id == id)) return pf->vsi[i]; return NULL; } /** * i40e_service_event_schedule - Schedule the service task to wake up * @pf: board private structure * * If not already scheduled, this puts the task into the work queue **/ void i40e_service_event_schedule(struct i40e_pf *pf) { if ((!test_bit(__I40E_DOWN, pf->state) && !test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state)) || test_bit(__I40E_RECOVERY_MODE, pf->state)) queue_work(i40e_wq, &pf->service_task); } /** * i40e_tx_timeout - Respond to a Tx Hang * @netdev: network interface device structure * @txqueue: queue number timing out * * If any port has noticed a Tx timeout, it is likely that the whole * device is munged, not just the one netdev port, so go for the full * reset. **/ static void i40e_tx_timeout(struct net_device *netdev, unsigned int txqueue) { struct i40e_netdev_priv *np = netdev_priv(netdev); struct i40e_vsi *vsi = np->vsi; struct i40e_pf *pf = vsi->back; struct i40e_ring *tx_ring = NULL; unsigned int i; u32 head, val; pf->tx_timeout_count++; /* with txqueue index, find the tx_ring struct */ for (i = 0; i < vsi->num_queue_pairs; i++) { if (vsi->tx_rings[i] && vsi->tx_rings[i]->desc) { if (txqueue == vsi->tx_rings[i]->queue_index) { tx_ring = vsi->tx_rings[i]; break; } } } if (time_after(jiffies, (pf->tx_timeout_last_recovery + HZ*20))) pf->tx_timeout_recovery_level = 1; /* reset after some time */ else if (time_before(jiffies, (pf->tx_timeout_last_recovery + netdev->watchdog_timeo))) return; /* don't do any new action before the next timeout */ /* don't kick off another recovery if one is already pending */ if (test_and_set_bit(__I40E_TIMEOUT_RECOVERY_PENDING, pf->state)) return; if (tx_ring) { head = i40e_get_head(tx_ring); /* Read interrupt register */ if (pf->flags & I40E_FLAG_MSIX_ENABLED) val = rd32(&pf->hw, I40E_PFINT_DYN_CTLN(tx_ring->q_vector->v_idx + tx_ring->vsi->base_vector - 1)); else val = rd32(&pf->hw, I40E_PFINT_DYN_CTL0); netdev_info(netdev, "tx_timeout: VSI_seid: %d, Q %d, NTC: 0x%x, HWB: 0x%x, NTU: 0x%x, TAIL: 0x%x, INT: 0x%x\n", vsi->seid, txqueue, tx_ring->next_to_clean, head, tx_ring->next_to_use, readl(tx_ring->tail), val); } pf->tx_timeout_last_recovery = jiffies; netdev_info(netdev, "tx_timeout recovery level %d, txqueue %d\n", pf->tx_timeout_recovery_level, txqueue); switch (pf->tx_timeout_recovery_level) { case 1: set_bit(__I40E_PF_RESET_REQUESTED, pf->state); break; case 2: set_bit(__I40E_CORE_RESET_REQUESTED, pf->state); break; case 3: set_bit(__I40E_GLOBAL_RESET_REQUESTED, pf->state); break; default: netdev_err(netdev, "tx_timeout recovery unsuccessful, device is in non-recoverable state.\n"); set_bit(__I40E_DOWN_REQUESTED, pf->state); set_bit(__I40E_VSI_DOWN_REQUESTED, vsi->state); break; } i40e_service_event_schedule(pf); pf->tx_timeout_recovery_level++; } /** * i40e_get_vsi_stats_struct - Get System Network Statistics * @vsi: the VSI we care about * * Returns the address of the device statistics structure. * The statistics are actually updated from the service task. **/ struct rtnl_link_stats64 *i40e_get_vsi_stats_struct(struct i40e_vsi *vsi) { return &vsi->net_stats; } /** * i40e_get_netdev_stats_struct_tx - populate stats from a Tx ring * @ring: Tx ring to get statistics from * @stats: statistics entry to be updated **/ static void i40e_get_netdev_stats_struct_tx(struct i40e_ring *ring, struct rtnl_link_stats64 *stats) { u64 bytes, packets; unsigned int start; do { start = u64_stats_fetch_begin(&ring->syncp); packets = ring->stats.packets; bytes = ring->stats.bytes; } while (u64_stats_fetch_retry(&ring->syncp, start)); stats->tx_packets += packets; stats->tx_bytes += bytes; } /** * i40e_get_netdev_stats_struct - Get statistics for netdev interface * @netdev: network interface device structure * @stats: data structure to store statistics * * Returns the address of the device statistics structure. * The statistics are actually updated from the service task. **/ static void i40e_get_netdev_stats_struct(struct net_device *netdev, struct rtnl_link_stats64 *stats) { struct i40e_netdev_priv *np = netdev_priv(netdev); struct i40e_vsi *vsi = np->vsi; struct rtnl_link_stats64 *vsi_stats = i40e_get_vsi_stats_struct(vsi); struct i40e_ring *ring; int i; if (test_bit(__I40E_VSI_DOWN, vsi->state)) return; if (!vsi->tx_rings) return; rcu_read_lock(); for (i = 0; i < vsi->num_queue_pairs; i++) { u64 bytes, packets; unsigned int start; ring = READ_ONCE(vsi->tx_rings[i]); if (!ring) continue; i40e_get_netdev_stats_struct_tx(ring, stats); if (i40e_enabled_xdp_vsi(vsi)) { ring = READ_ONCE(vsi->xdp_rings[i]); if (!ring) continue; i40e_get_netdev_stats_struct_tx(ring, stats); } ring = READ_ONCE(vsi->rx_rings[i]); if (!ring) continue; do { start = u64_stats_fetch_begin(&ring->syncp); packets = ring->stats.packets; bytes = ring->stats.bytes; } while (u64_stats_fetch_retry(&ring->syncp, start)); stats->rx_packets += packets; stats->rx_bytes += bytes; } rcu_read_unlock(); /* following stats updated by i40e_watchdog_subtask() */ stats->multicast = vsi_stats->multicast; stats->tx_errors = vsi_stats->tx_errors; stats->tx_dropped = vsi_stats->tx_dropped; stats->rx_errors = vsi_stats->rx_errors; stats->rx_dropped = vsi_stats->rx_dropped; stats->rx_crc_errors = vsi_stats->rx_crc_errors; stats->rx_length_errors = vsi_stats->rx_length_errors; } /** * i40e_vsi_reset_stats - Resets all stats of the given vsi * @vsi: the VSI to have its stats reset **/ void i40e_vsi_reset_stats(struct i40e_vsi *vsi) { struct rtnl_link_stats64 *ns; int i; if (!vsi) return; ns = i40e_get_vsi_stats_struct(vsi); memset(ns, 0, sizeof(*ns)); memset(&vsi->net_stats_offsets, 0, sizeof(vsi->net_stats_offsets)); memset(&vsi->eth_stats, 0, sizeof(vsi->eth_stats)); memset(&vsi->eth_stats_offsets, 0, sizeof(vsi->eth_stats_offsets)); if (vsi->rx_rings && vsi->rx_rings[0]) { for (i = 0; i < vsi->num_queue_pairs; i++) { memset(&vsi->rx_rings[i]->stats, 0, sizeof(vsi->rx_rings[i]->stats)); memset(&vsi->rx_rings[i]->rx_stats, 0, sizeof(vsi->rx_rings[i]->rx_stats)); memset(&vsi->tx_rings[i]->stats, 0, sizeof(vsi->tx_rings[i]->stats)); memset(&vsi->tx_rings[i]->tx_stats, 0, sizeof(vsi->tx_rings[i]->tx_stats)); } } vsi->stat_offsets_loaded = false; } /** * i40e_pf_reset_stats - Reset all of the stats for the given PF * @pf: the PF to be reset **/ void i40e_pf_reset_stats(struct i40e_pf *pf) { int i; memset(&pf->stats, 0, sizeof(pf->stats)); memset(&pf->stats_offsets, 0, sizeof(pf->stats_offsets)); pf->stat_offsets_loaded = false; for (i = 0; i < I40E_MAX_VEB; i++) { if (pf->veb[i]) { memset(&pf->veb[i]->stats, 0, sizeof(pf->veb[i]->stats)); memset(&pf->veb[i]->stats_offsets, 0, sizeof(pf->veb[i]->stats_offsets)); memset(&pf->veb[i]->tc_stats, 0, sizeof(pf->veb[i]->tc_stats)); memset(&pf->veb[i]->tc_stats_offsets, 0, sizeof(pf->veb[i]->tc_stats_offsets)); pf->veb[i]->stat_offsets_loaded = false; } } pf->hw_csum_rx_error = 0; } /** * i40e_compute_pci_to_hw_id - compute index form PCI function. * @vsi: ptr to the VSI to read from. * @hw: ptr to the hardware info. **/ static u32 i40e_compute_pci_to_hw_id(struct i40e_vsi *vsi, struct i40e_hw *hw) { int pf_count = i40e_get_pf_count(hw); if (vsi->type == I40E_VSI_SRIOV) return (hw->port * BIT(7)) / pf_count + vsi->vf_id; return hw->port + BIT(7); } /** * i40e_stat_update64 - read and update a 64 bit stat from the chip. * @hw: ptr to the hardware info. * @hireg: the high 32 bit reg to read. * @loreg: the low 32 bit reg to read. * @offset_loaded: has the initial offset been loaded yet. * @offset: ptr to current offset value. * @stat: ptr to the stat. * * Since the device stats are not reset at PFReset, they will not * be zeroed when the driver starts. We'll save the first values read * and use them as offsets to be subtracted from the raw values in order * to report stats that count from zero. **/ static void i40e_stat_update64(struct i40e_hw *hw, u32 hireg, u32 loreg, bool offset_loaded, u64 *offset, u64 *stat) { u64 new_data; new_data = rd64(hw, loreg); if (!offset_loaded || new_data < *offset) *offset = new_data; *stat = new_data - *offset; } /** * i40e_stat_update48 - read and update a 48 bit stat from the chip * @hw: ptr to the hardware info * @hireg: the high 32 bit reg to read * @loreg: the low 32 bit reg to read * @offset_loaded: has the initial offset been loaded yet * @offset: ptr to current offset value * @stat: ptr to the stat * * Since the device stats are not reset at PFReset, they likely will not * be zeroed when the driver starts. We'll save the first values read * and use them as offsets to be subtracted from the raw values in order * to report stats that count from zero. In the process, we also manage * the potential roll-over. **/ static void i40e_stat_update48(struct i40e_hw *hw, u32 hireg, u32 loreg, bool offset_loaded, u64 *offset, u64 *stat) { u64 new_data; if (hw->device_id == I40E_DEV_ID_QEMU) { new_data = rd32(hw, loreg); new_data |= ((u64)(rd32(hw, hireg) & 0xFFFF)) << 32; } else { new_data = rd64(hw, loreg); } if (!offset_loaded) *offset = new_data; if (likely(new_data >= *offset)) *stat = new_data - *offset; else *stat = (new_data + BIT_ULL(48)) - *offset; *stat &= 0xFFFFFFFFFFFFULL; } /** * i40e_stat_update32 - read and update a 32 bit stat from the chip * @hw: ptr to the hardware info * @reg: the hw reg to read * @offset_loaded: has the initial offset been loaded yet * @offset: ptr to current offset value * @stat: ptr to the stat **/ static void i40e_stat_update32(struct i40e_hw *hw, u32 reg, bool offset_loaded, u64 *offset, u64 *stat) { u32 new_data; new_data = rd32(hw, reg); if (!offset_loaded) *offset = new_data; if (likely(new_data >= *offset)) *stat = (u32)(new_data - *offset); else *stat = (u32)((new_data + BIT_ULL(32)) - *offset); } /** * i40e_stat_update_and_clear32 - read and clear hw reg, update a 32 bit stat * @hw: ptr to the hardware info * @reg: the hw reg to read and clear * @stat: ptr to the stat **/ static void i40e_stat_update_and_clear32(struct i40e_hw *hw, u32 reg, u64 *stat) { u32 new_data = rd32(hw, reg); wr32(hw, reg, 1); /* must write a nonzero value to clear register */ *stat += new_data; } /** * i40e_stats_update_rx_discards - update rx_discards. * @vsi: ptr to the VSI to be updated. * @hw: ptr to the hardware info. * @stat_idx: VSI's stat_counter_idx. * @offset_loaded: ptr to the VSI's stat_offsets_loaded. * @stat_offset: ptr to stat_offset to store first read of specific register. * @stat: ptr to VSI's stat to be updated. **/ static void i40e_stats_update_rx_discards(struct i40e_vsi *vsi, struct i40e_hw *hw, int stat_idx, bool offset_loaded, struct i40e_eth_stats *stat_offset, struct i40e_eth_stats *stat) { u64 rx_rdpc, rx_rxerr; i40e_stat_update32(hw, I40E_GLV_RDPC(stat_idx), offset_loaded, &stat_offset->rx_discards, &rx_rdpc); i40e_stat_update64(hw, I40E_GL_RXERR1H(i40e_compute_pci_to_hw_id(vsi, hw)), I40E_GL_RXERR1L(i40e_compute_pci_to_hw_id(vsi, hw)), offset_loaded, &stat_offset->rx_discards_other, &rx_rxerr); stat->rx_discards = rx_rdpc + rx_rxerr; } /** * i40e_update_eth_stats - Update VSI-specific ethernet statistics counters. * @vsi: the VSI to be updated **/ void i40e_update_eth_stats(struct i40e_vsi *vsi) { int stat_idx = le16_to_cpu(vsi->info.stat_counter_idx); struct i40e_pf *pf = vsi->back; struct i40e_hw *hw = &pf->hw; struct i40e_eth_stats *oes; struct i40e_eth_stats *es; /* device's eth stats */ es = &vsi->eth_stats; oes = &vsi->eth_stats_offsets; /* Gather up the stats that the hw collects */ i40e_stat_update32(hw, I40E_GLV_TEPC(stat_idx), vsi->stat_offsets_loaded, &oes->tx_errors, &es->tx_errors); i40e_stat_update32(hw, I40E_GLV_RDPC(stat_idx), vsi->stat_offsets_loaded, &oes->rx_discards, &es->rx_discards); i40e_stat_update32(hw, I40E_GLV_RUPP(stat_idx), vsi->stat_offsets_loaded, &oes->rx_unknown_protocol, &es->rx_unknown_protocol); i40e_stat_update48(hw, I40E_GLV_GORCH(stat_idx), I40E_GLV_GORCL(stat_idx), vsi->stat_offsets_loaded, &oes->rx_bytes, &es->rx_bytes); i40e_stat_update48(hw, I40E_GLV_UPRCH(stat_idx), I40E_GLV_UPRCL(stat_idx), vsi->stat_offsets_loaded, &oes->rx_unicast, &es->rx_unicast); i40e_stat_update48(hw, I40E_GLV_MPRCH(stat_idx), I40E_GLV_MPRCL(stat_idx), vsi->stat_offsets_loaded, &oes->rx_multicast, &es->rx_multicast); i40e_stat_update48(hw, I40E_GLV_BPRCH(stat_idx), I40E_GLV_BPRCL(stat_idx), vsi->stat_offsets_loaded, &oes->rx_broadcast, &es->rx_broadcast); i40e_stat_update48(hw, I40E_GLV_GOTCH(stat_idx), I40E_GLV_GOTCL(stat_idx), vsi->stat_offsets_loaded, &oes->tx_bytes, &es->tx_bytes); i40e_stat_update48(hw, I40E_GLV_UPTCH(stat_idx), I40E_GLV_UPTCL(stat_idx), vsi->stat_offsets_loaded, &oes->tx_unicast, &es->tx_unicast); i40e_stat_update48(hw, I40E_GLV_MPTCH(stat_idx), I40E_GLV_MPTCL(stat_idx), vsi->stat_offsets_loaded, &oes->tx_multicast, &es->tx_multicast); i40e_stat_update48(hw, I40E_GLV_BPTCH(stat_idx), I40E_GLV_BPTCL(stat_idx), vsi->stat_offsets_loaded, &oes->tx_broadcast, &es->tx_broadcast); i40e_stats_update_rx_discards(vsi, hw, stat_idx, vsi->stat_offsets_loaded, oes, es); vsi->stat_offsets_loaded = true; } /** * i40e_update_veb_stats - Update Switch component statistics * @veb: the VEB being updated **/ void i40e_update_veb_stats(struct i40e_veb *veb) { struct i40e_pf *pf = veb->pf; struct i40e_hw *hw = &pf->hw; struct i40e_eth_stats *oes; struct i40e_eth_stats *es; /* device's eth stats */ struct i40e_veb_tc_stats *veb_oes; struct i40e_veb_tc_stats *veb_es; int i, idx = 0; idx = veb->stats_idx; es = &veb->stats; oes = &veb->stats_offsets; veb_es = &veb->tc_stats; veb_oes = &veb->tc_stats_offsets; /* Gather up the stats that the hw collects */ i40e_stat_update32(hw, I40E_GLSW_TDPC(idx), veb->stat_offsets_loaded, &oes->tx_discards, &es->tx_discards); if (hw->revision_id > 0) i40e_stat_update32(hw, I40E_GLSW_RUPP(idx), veb->stat_offsets_loaded, &oes->rx_unknown_protocol, &es->rx_unknown_protocol); i40e_stat_update48(hw, I40E_GLSW_GORCH(idx), I40E_GLSW_GORCL(idx), veb->stat_offsets_loaded, &oes->rx_bytes, &es->rx_bytes); i40e_stat_update48(hw, I40E_GLSW_UPRCH(idx), I40E_GLSW_UPRCL(idx), veb->stat_offsets_loaded, &oes->rx_unicast, &es->rx_unicast); i40e_stat_update48(hw, I40E_GLSW_MPRCH(idx), I40E_GLSW_MPRCL(idx), veb->stat_offsets_loaded, &oes->rx_multicast, &es->rx_multicast); i40e_stat_update48(hw, I40E_GLSW_BPRCH(idx), I40E_GLSW_BPRCL(idx), veb->stat_offsets_loaded, &oes->rx_broadcast, &es->rx_broadcast); i40e_stat_update48(hw, I40E_GLSW_GOTCH(idx), I40E_GLSW_GOTCL(idx), veb->stat_offsets_loaded, &oes->tx_bytes, &es->tx_bytes); i40e_stat_update48(hw, I40E_GLSW_UPTCH(idx), I40E_GLSW_UPTCL(idx), veb->stat_offsets_loaded, &oes->tx_unicast, &es->tx_unicast); i40e_stat_update48(hw, I40E_GLSW_MPTCH(idx), I40E_GLSW_MPTCL(idx), veb->stat_offsets_loaded, &oes->tx_multicast, &es->tx_multicast); i40e_stat_update48(hw, I40E_GLSW_BPTCH(idx), I40E_GLSW_BPTCL(idx), veb->stat_offsets_loaded, &oes->tx_broadcast, &es->tx_broadcast); for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { i40e_stat_update48(hw, I40E_GLVEBTC_RPCH(i, idx), I40E_GLVEBTC_RPCL(i, idx), veb->stat_offsets_loaded, &veb_oes->tc_rx_packets[i], &veb_es->tc_rx_packets[i]); i40e_stat_update48(hw, I40E_GLVEBTC_RBCH(i, idx), I40E_GLVEBTC_RBCL(i, idx), veb->stat_offsets_loaded, &veb_oes->tc_rx_bytes[i], &veb_es->tc_rx_bytes[i]); i40e_stat_update48(hw, I40E_GLVEBTC_TPCH(i, idx), I40E_GLVEBTC_TPCL(i, idx), veb->stat_offsets_loaded, &veb_oes->tc_tx_packets[i], &veb_es->tc_tx_packets[i]); i40e_stat_update48(hw, I40E_GLVEBTC_TBCH(i, idx), I40E_GLVEBTC_TBCL(i, idx), veb->stat_offsets_loaded, &veb_oes->tc_tx_bytes[i], &veb_es->tc_tx_bytes[i]); } veb->stat_offsets_loaded = true; } /** * i40e_update_vsi_stats - Update the vsi statistics counters. * @vsi: the VSI to be updated * * There are a few instances where we store the same stat in a * couple of different structs. This is partly because we have * the netdev stats that need to be filled out, which is slightly * different from the "eth_stats" defined by the chip and used in * VF communications. We sort it out here. **/ static void i40e_update_vsi_stats(struct i40e_vsi *vsi) { u64 rx_page, rx_buf, rx_reuse, rx_alloc, rx_waive, rx_busy; struct i40e_pf *pf = vsi->back; struct rtnl_link_stats64 *ons; struct rtnl_link_stats64 *ns; /* netdev stats */ struct i40e_eth_stats *oes; struct i40e_eth_stats *es; /* device's eth stats */ u64 tx_restart, tx_busy; struct i40e_ring *p; u64 bytes, packets; unsigned int start; u64 tx_linearize; u64 tx_force_wb; u64 tx_stopped; u64 rx_p, rx_b; u64 tx_p, tx_b; u16 q; if (test_bit(__I40E_VSI_DOWN, vsi->state) || test_bit(__I40E_CONFIG_BUSY, pf->state)) return; ns = i40e_get_vsi_stats_struct(vsi); ons = &vsi->net_stats_offsets; es = &vsi->eth_stats; oes = &vsi->eth_stats_offsets; /* Gather up the netdev and vsi stats that the driver collects * on the fly during packet processing */ rx_b = rx_p = 0; tx_b = tx_p = 0; tx_restart = tx_busy = tx_linearize = tx_force_wb = 0; tx_stopped = 0; rx_page = 0; rx_buf = 0; rx_reuse = 0; rx_alloc = 0; rx_waive = 0; rx_busy = 0; rcu_read_lock(); for (q = 0; q < vsi->num_queue_pairs; q++) { /* locate Tx ring */ p = READ_ONCE(vsi->tx_rings[q]); if (!p) continue; do { start = u64_stats_fetch_begin(&p->syncp); packets = p->stats.packets; bytes = p->stats.bytes; } while (u64_stats_fetch_retry(&p->syncp, start)); tx_b += bytes; tx_p += packets; tx_restart += p->tx_stats.restart_queue; tx_busy += p->tx_stats.tx_busy; tx_linearize += p->tx_stats.tx_linearize; tx_force_wb += p->tx_stats.tx_force_wb; tx_stopped += p->tx_stats.tx_stopped; /* locate Rx ring */ p = READ_ONCE(vsi->rx_rings[q]); if (!p) continue; do { start = u64_stats_fetch_begin(&p->syncp); packets = p->stats.packets; bytes = p->stats.bytes; } while (u64_stats_fetch_retry(&p->syncp, start)); rx_b += bytes; rx_p += packets; rx_buf += p->rx_stats.alloc_buff_failed; rx_page += p->rx_stats.alloc_page_failed; rx_reuse += p->rx_stats.page_reuse_count; rx_alloc += p->rx_stats.page_alloc_count; rx_waive += p->rx_stats.page_waive_count; rx_busy += p->rx_stats.page_busy_count; if (i40e_enabled_xdp_vsi(vsi)) { /* locate XDP ring */ p = READ_ONCE(vsi->xdp_rings[q]); if (!p) continue; do { start = u64_stats_fetch_begin(&p->syncp); packets = p->stats.packets; bytes = p->stats.bytes; } while (u64_stats_fetch_retry(&p->syncp, start)); tx_b += bytes; tx_p += packets; tx_restart += p->tx_stats.restart_queue; tx_busy += p->tx_stats.tx_busy; tx_linearize += p->tx_stats.tx_linearize; tx_force_wb += p->tx_stats.tx_force_wb; } } rcu_read_unlock(); vsi->tx_restart = tx_restart; vsi->tx_busy = tx_busy; vsi->tx_linearize = tx_linearize; vsi->tx_force_wb = tx_force_wb; vsi->tx_stopped = tx_stopped; vsi->rx_page_failed = rx_page; vsi->rx_buf_failed = rx_buf; vsi->rx_page_reuse = rx_reuse; vsi->rx_page_alloc = rx_alloc; vsi->rx_page_waive = rx_waive; vsi->rx_page_busy = rx_busy; ns->rx_packets = rx_p; ns->rx_bytes = rx_b; ns->tx_packets = tx_p; ns->tx_bytes = tx_b; /* update netdev stats from eth stats */ i40e_update_eth_stats(vsi); ons->tx_errors = oes->tx_errors; ns->tx_errors = es->tx_errors; ons->multicast = oes->rx_multicast; ns->multicast = es->rx_multicast; ons->rx_dropped = oes->rx_discards; ns->rx_dropped = es->rx_discards; ons->tx_dropped = oes->tx_discards; ns->tx_dropped = es->tx_discards; /* pull in a couple PF stats if this is the main vsi */ if (vsi == pf->vsi[pf->lan_vsi]) { ns->rx_crc_errors = pf->stats.crc_errors; ns->rx_errors = pf->stats.crc_errors + pf->stats.illegal_bytes; ns->rx_length_errors = pf->stats.rx_length_errors; } } /** * i40e_update_pf_stats - Update the PF statistics counters. * @pf: the PF to be updated **/ static void i40e_update_pf_stats(struct i40e_pf *pf) { struct i40e_hw_port_stats *osd = &pf->stats_offsets; struct i40e_hw_port_stats *nsd = &pf->stats; struct i40e_hw *hw = &pf->hw; u32 val; int i; i40e_stat_update48(hw, I40E_GLPRT_GORCH(hw->port), I40E_GLPRT_GORCL(hw->port), pf->stat_offsets_loaded, &osd->eth.rx_bytes, &nsd->eth.rx_bytes); i40e_stat_update48(hw, I40E_GLPRT_GOTCH(hw->port), I40E_GLPRT_GOTCL(hw->port), pf->stat_offsets_loaded, &osd->eth.tx_bytes, &nsd->eth.tx_bytes); i40e_stat_update32(hw, I40E_GLPRT_RDPC(hw->port), pf->stat_offsets_loaded, &osd->eth.rx_discards, &nsd->eth.rx_discards); i40e_stat_update48(hw, I40E_GLPRT_UPRCH(hw->port), I40E_GLPRT_UPRCL(hw->port), pf->stat_offsets_loaded, &osd->eth.rx_unicast, &nsd->eth.rx_unicast); i40e_stat_update48(hw, I40E_GLPRT_MPRCH(hw->port), I40E_GLPRT_MPRCL(hw->port), pf->stat_offsets_loaded, &osd->eth.rx_multicast, &nsd->eth.rx_multicast); i40e_stat_update48(hw, I40E_GLPRT_BPRCH(hw->port), I40E_GLPRT_BPRCL(hw->port), pf->stat_offsets_loaded, &osd->eth.rx_broadcast, &nsd->eth.rx_broadcast); i40e_stat_update48(hw, I40E_GLPRT_UPTCH(hw->port), I40E_GLPRT_UPTCL(hw->port), pf->stat_offsets_loaded, &osd->eth.tx_unicast, &nsd->eth.tx_unicast); i40e_stat_update48(hw, I40E_GLPRT_MPTCH(hw->port), I40E_GLPRT_MPTCL(hw->port), pf->stat_offsets_loaded, &osd->eth.tx_multicast, &nsd->eth.tx_multicast); i40e_stat_update48(hw, I40E_GLPRT_BPTCH(hw->port), I40E_GLPRT_BPTCL(hw->port), pf->stat_offsets_loaded, &osd->eth.tx_broadcast, &nsd->eth.tx_broadcast); i40e_stat_update32(hw, I40E_GLPRT_TDOLD(hw->port), pf->stat_offsets_loaded, &osd->tx_dropped_link_down, &nsd->tx_dropped_link_down); i40e_stat_update32(hw, I40E_GLPRT_CRCERRS(hw->port), pf->stat_offsets_loaded, &osd->crc_errors, &nsd->crc_errors); i40e_stat_update32(hw, I40E_GLPRT_ILLERRC(hw->port), pf->stat_offsets_loaded, &osd->illegal_bytes, &nsd->illegal_bytes); i40e_stat_update32(hw, I40E_GLPRT_MLFC(hw->port), pf->stat_offsets_loaded, &osd->mac_local_faults, &nsd->mac_local_faults); i40e_stat_update32(hw, I40E_GLPRT_MRFC(hw->port), pf->stat_offsets_loaded, &osd->mac_remote_faults, &nsd->mac_remote_faults); i40e_stat_update32(hw, I40E_GLPRT_RLEC(hw->port), pf->stat_offsets_loaded, &osd->rx_length_errors, &nsd->rx_length_errors); i40e_stat_update32(hw, I40E_GLPRT_LXONRXC(hw->port), pf->stat_offsets_loaded, &osd->link_xon_rx, &nsd->link_xon_rx); i40e_stat_update32(hw, I40E_GLPRT_LXONTXC(hw->port), pf->stat_offsets_loaded, &osd->link_xon_tx, &nsd->link_xon_tx); i40e_stat_update32(hw, I40E_GLPRT_LXOFFRXC(hw->port), pf->stat_offsets_loaded, &osd->link_xoff_rx, &nsd->link_xoff_rx); i40e_stat_update32(hw, I40E_GLPRT_LXOFFTXC(hw->port), pf->stat_offsets_loaded, &osd->link_xoff_tx, &nsd->link_xoff_tx); for (i = 0; i < 8; i++) { i40e_stat_update32(hw, I40E_GLPRT_PXOFFRXC(hw->port, i), pf->stat_offsets_loaded, &osd->priority_xoff_rx[i], &nsd->priority_xoff_rx[i]); i40e_stat_update32(hw, I40E_GLPRT_PXONRXC(hw->port, i), pf->stat_offsets_loaded, &osd->priority_xon_rx[i], &nsd->priority_xon_rx[i]); i40e_stat_update32(hw, I40E_GLPRT_PXONTXC(hw->port, i), pf->stat_offsets_loaded, &osd->priority_xon_tx[i], &nsd->priority_xon_tx[i]); i40e_stat_update32(hw, I40E_GLPRT_PXOFFTXC(hw->port, i), pf->stat_offsets_loaded, &osd->priority_xoff_tx[i], &nsd->priority_xoff_tx[i]); i40e_stat_update32(hw, I40E_GLPRT_RXON2OFFCNT(hw->port, i), pf->stat_offsets_loaded, &osd->priority_xon_2_xoff[i], &nsd->priority_xon_2_xoff[i]); } i40e_stat_update48(hw, I40E_GLPRT_PRC64H(hw->port), I40E_GLPRT_PRC64L(hw->port), pf->stat_offsets_loaded, &osd->rx_size_64, &nsd->rx_size_64); i40e_stat_update48(hw, I40E_GLPRT_PRC127H(hw->port), I40E_GLPRT_PRC127L(hw->port), pf->stat_offsets_loaded, &osd->rx_size_127, &nsd->rx_size_127); i40e_stat_update48(hw, I40E_GLPRT_PRC255H(hw->port), I40E_GLPRT_PRC255L(hw->port), pf->stat_offsets_loaded, &osd->rx_size_255, &nsd->rx_size_255); i40e_stat_update48(hw, I40E_GLPRT_PRC511H(hw->port), I40E_GLPRT_PRC511L(hw->port), pf->stat_offsets_loaded, &osd->rx_size_511, &nsd->rx_size_511); i40e_stat_update48(hw, I40E_GLPRT_PRC1023H(hw->port), I40E_GLPRT_PRC1023L(hw->port), pf->stat_offsets_loaded, &osd->rx_size_1023, &nsd->rx_size_1023); i40e_stat_update48(hw, I40E_GLPRT_PRC1522H(hw->port), I40E_GLPRT_PRC1522L(hw->port), pf->stat_offsets_loaded, &osd->rx_size_1522, &nsd->rx_size_1522); i40e_stat_update48(hw, I40E_GLPRT_PRC9522H(hw->port), I40E_GLPRT_PRC9522L(hw->port), pf->stat_offsets_loaded, &osd->rx_size_big, &nsd->rx_size_big); i40e_stat_update48(hw, I40E_GLPRT_PTC64H(hw->port), I40E_GLPRT_PTC64L(hw->port), pf->stat_offsets_loaded, &osd->tx_size_64, &nsd->tx_size_64); i40e_stat_update48(hw, I40E_GLPRT_PTC127H(hw->port), I40E_GLPRT_PTC127L(hw->port), pf->stat_offsets_loaded, &osd->tx_size_127, &nsd->tx_size_127); i40e_stat_update48(hw, I40E_GLPRT_PTC255H(hw->port), I40E_GLPRT_PTC255L(hw->port), pf->stat_offsets_loaded, &osd->tx_size_255, &nsd->tx_size_255); i40e_stat_update48(hw, I40E_GLPRT_PTC511H(hw->port), I40E_GLPRT_PTC511L(hw->port), pf->stat_offsets_loaded, &osd->tx_size_511, &nsd->tx_size_511); i40e_stat_update48(hw, I40E_GLPRT_PTC1023H(hw->port), I40E_GLPRT_PTC1023L(hw->port), pf->stat_offsets_loaded, &osd->tx_size_1023, &nsd->tx_size_1023); i40e_stat_update48(hw, I40E_GLPRT_PTC1522H(hw->port), I40E_GLPRT_PTC1522L(hw->port), pf->stat_offsets_loaded, &osd->tx_size_1522, &nsd->tx_size_1522); i40e_stat_update48(hw, I40E_GLPRT_PTC9522H(hw->port), I40E_GLPRT_PTC9522L(hw->port), pf->stat_offsets_loaded, &osd->tx_size_big, &nsd->tx_size_big); i40e_stat_update32(hw, I40E_GLPRT_RUC(hw->port), pf->stat_offsets_loaded, &osd->rx_undersize, &nsd->rx_undersize); i40e_stat_update32(hw, I40E_GLPRT_RFC(hw->port), pf->stat_offsets_loaded, &osd->rx_fragments, &nsd->rx_fragments); i40e_stat_update32(hw, I40E_GLPRT_ROC(hw->port), pf->stat_offsets_loaded, &osd->rx_oversize, &nsd->rx_oversize); i40e_stat_update32(hw, I40E_GLPRT_RJC(hw->port), pf->stat_offsets_loaded, &osd->rx_jabber, &nsd->rx_jabber); /* FDIR stats */ i40e_stat_update_and_clear32(hw, I40E_GLQF_PCNT(I40E_FD_ATR_STAT_IDX(hw->pf_id)), &nsd->fd_atr_match); i40e_stat_update_and_clear32(hw, I40E_GLQF_PCNT(I40E_FD_SB_STAT_IDX(hw->pf_id)), &nsd->fd_sb_match); i40e_stat_update_and_clear32(hw, I40E_GLQF_PCNT(I40E_FD_ATR_TUNNEL_STAT_IDX(hw->pf_id)), &nsd->fd_atr_tunnel_match); val = rd32(hw, I40E_PRTPM_EEE_STAT); nsd->tx_lpi_status = (val & I40E_PRTPM_EEE_STAT_TX_LPI_STATUS_MASK) >> I40E_PRTPM_EEE_STAT_TX_LPI_STATUS_SHIFT; nsd->rx_lpi_status = (val & I40E_PRTPM_EEE_STAT_RX_LPI_STATUS_MASK) >> I40E_PRTPM_EEE_STAT_RX_LPI_STATUS_SHIFT; i40e_stat_update32(hw, I40E_PRTPM_TLPIC, pf->stat_offsets_loaded, &osd->tx_lpi_count, &nsd->tx_lpi_count); i40e_stat_update32(hw, I40E_PRTPM_RLPIC, pf->stat_offsets_loaded, &osd->rx_lpi_count, &nsd->rx_lpi_count); if (pf->flags & I40E_FLAG_FD_SB_ENABLED && !test_bit(__I40E_FD_SB_AUTO_DISABLED, pf->state)) nsd->fd_sb_status = true; else nsd->fd_sb_status = false; if (pf->flags & I40E_FLAG_FD_ATR_ENABLED && !test_bit(__I40E_FD_ATR_AUTO_DISABLED, pf->state)) nsd->fd_atr_status = true; else nsd->fd_atr_status = false; pf->stat_offsets_loaded = true; } /** * i40e_update_stats - Update the various statistics counters. * @vsi: the VSI to be updated * * Update the various stats for this VSI and its related entities. **/ void i40e_update_stats(struct i40e_vsi *vsi) { struct i40e_pf *pf = vsi->back; if (vsi == pf->vsi[pf->lan_vsi]) i40e_update_pf_stats(pf); i40e_update_vsi_stats(vsi); } /** * i40e_count_filters - counts VSI mac filters * @vsi: the VSI to be searched * * Returns count of mac filters **/ int i40e_count_filters(struct i40e_vsi *vsi) { struct i40e_mac_filter *f; struct hlist_node *h; int bkt; int cnt = 0; hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) ++cnt; return cnt; } /** * i40e_find_filter - Search VSI filter list for specific mac/vlan filter * @vsi: the VSI to be searched * @macaddr: the MAC address * @vlan: the vlan * * Returns ptr to the filter object or NULL **/ static struct i40e_mac_filter *i40e_find_filter(struct i40e_vsi *vsi, const u8 *macaddr, s16 vlan) { struct i40e_mac_filter *f; u64 key; if (!vsi || !macaddr) return NULL; key = i40e_addr_to_hkey(macaddr); hash_for_each_possible(vsi->mac_filter_hash, f, hlist, key) { if ((ether_addr_equal(macaddr, f->macaddr)) && (vlan == f->vlan)) return f; } return NULL; } /** * i40e_find_mac - Find a mac addr in the macvlan filters list * @vsi: the VSI to be searched * @macaddr: the MAC address we are searching for * * Returns the first filter with the provided MAC address or NULL if * MAC address was not found **/ struct i40e_mac_filter *i40e_find_mac(struct i40e_vsi *vsi, const u8 *macaddr) { struct i40e_mac_filter *f; u64 key; if (!vsi || !macaddr) return NULL; key = i40e_addr_to_hkey(macaddr); hash_for_each_possible(vsi->mac_filter_hash, f, hlist, key) { if ((ether_addr_equal(macaddr, f->macaddr))) return f; } return NULL; } /** * i40e_is_vsi_in_vlan - Check if VSI is in vlan mode * @vsi: the VSI to be searched * * Returns true if VSI is in vlan mode or false otherwise **/ bool i40e_is_vsi_in_vlan(struct i40e_vsi *vsi) { /* If we have a PVID, always operate in VLAN mode */ if (vsi->info.pvid) return true; /* We need to operate in VLAN mode whenever we have any filters with * a VLAN other than I40E_VLAN_ALL. We could check the table each * time, incurring search cost repeatedly. However, we can notice two * things: * * 1) the only place where we can gain a VLAN filter is in * i40e_add_filter. * * 2) the only place where filters are actually removed is in * i40e_sync_filters_subtask. * * Thus, we can simply use a boolean value, has_vlan_filters which we * will set to true when we add a VLAN filter in i40e_add_filter. Then * we have to perform the full search after deleting filters in * i40e_sync_filters_subtask, but we already have to search * filters here and can perform the check at the same time. This * results in avoiding embedding a loop for VLAN mode inside another * loop over all the filters, and should maintain correctness as noted * above. */ return vsi->has_vlan_filter; } /** * i40e_correct_mac_vlan_filters - Correct non-VLAN filters if necessary * @vsi: the VSI to configure * @tmp_add_list: list of filters ready to be added * @tmp_del_list: list of filters ready to be deleted * @vlan_filters: the number of active VLAN filters * * Update VLAN=0 and VLAN=-1 (I40E_VLAN_ANY) filters properly so that they * behave as expected. If we have any active VLAN filters remaining or about * to be added then we need to update non-VLAN filters to be marked as VLAN=0 * so that they only match against untagged traffic. If we no longer have any * active VLAN filters, we need to make all non-VLAN filters marked as VLAN=-1 * so that they match against both tagged and untagged traffic. In this way, * we ensure that we correctly receive the desired traffic. This ensures that * when we have an active VLAN we will receive only untagged traffic and * traffic matching active VLANs. If we have no active VLANs then we will * operate in non-VLAN mode and receive all traffic, tagged or untagged. * * Finally, in a similar fashion, this function also corrects filters when * there is an active PVID assigned to this VSI. * * In case of memory allocation failure return -ENOMEM. Otherwise, return 0. * * This function is only expected to be called from within * i40e_sync_vsi_filters. * * NOTE: This function expects to be called while under the * mac_filter_hash_lock */ static int i40e_correct_mac_vlan_filters(struct i40e_vsi *vsi, struct hlist_head *tmp_add_list, struct hlist_head *tmp_del_list, int vlan_filters) { s16 pvid = le16_to_cpu(vsi->info.pvid); struct i40e_mac_filter *f, *add_head; struct i40e_new_mac_filter *new; struct hlist_node *h; int bkt, new_vlan; /* To determine if a particular filter needs to be replaced we * have the three following conditions: * * a) if we have a PVID assigned, then all filters which are * not marked as VLAN=PVID must be replaced with filters that * are. * b) otherwise, if we have any active VLANS, all filters * which are marked as VLAN=-1 must be replaced with * filters marked as VLAN=0 * c) finally, if we do not have any active VLANS, all filters * which are marked as VLAN=0 must be replaced with filters * marked as VLAN=-1 */ /* Update the filters about to be added in place */ hlist_for_each_entry(new, tmp_add_list, hlist) { if (pvid && new->f->vlan != pvid) new->f->vlan = pvid; else if (vlan_filters && new->f->vlan == I40E_VLAN_ANY) new->f->vlan = 0; else if (!vlan_filters && new->f->vlan == 0) new->f->vlan = I40E_VLAN_ANY; } /* Update the remaining active filters */ hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) { /* Combine the checks for whether a filter needs to be changed * and then determine the new VLAN inside the if block, in * order to avoid duplicating code for adding the new filter * then deleting the old filter. */ if ((pvid && f->vlan != pvid) || (vlan_filters && f->vlan == I40E_VLAN_ANY) || (!vlan_filters && f->vlan == 0)) { /* Determine the new vlan we will be adding */ if (pvid) new_vlan = pvid; else if (vlan_filters) new_vlan = 0; else new_vlan = I40E_VLAN_ANY; /* Create the new filter */ add_head = i40e_add_filter(vsi, f->macaddr, new_vlan); if (!add_head) return -ENOMEM; /* Create a temporary i40e_new_mac_filter */ new = kzalloc(sizeof(*new), GFP_ATOMIC); if (!new) return -ENOMEM; new->f = add_head; new->state = add_head->state; /* Add the new filter to the tmp list */ hlist_add_head(&new->hlist, tmp_add_list); /* Put the original filter into the delete list */ f->state = I40E_FILTER_REMOVE; hash_del(&f->hlist); hlist_add_head(&f->hlist, tmp_del_list); } } vsi->has_vlan_filter = !!vlan_filters; return 0; } /** * i40e_get_vf_new_vlan - Get new vlan id on a vf * @vsi: the vsi to configure * @new_mac: new mac filter to be added * @f: existing mac filter, replaced with new_mac->f if new_mac is not NULL * @vlan_filters: the number of active VLAN filters * @trusted: flag if the VF is trusted * * Get new VLAN id based on current VLAN filters, trust, PVID * and vf-vlan-prune-disable flag. * * Returns the value of the new vlan filter or * the old value if no new filter is needed. */ static s16 i40e_get_vf_new_vlan(struct i40e_vsi *vsi, struct i40e_new_mac_filter *new_mac, struct i40e_mac_filter *f, int vlan_filters, bool trusted) { s16 pvid = le16_to_cpu(vsi->info.pvid); struct i40e_pf *pf = vsi->back; bool is_any; if (new_mac) f = new_mac->f; if (pvid && f->vlan != pvid) return pvid; is_any = (trusted || !(pf->flags & I40E_FLAG_VF_VLAN_PRUNING)); if ((vlan_filters && f->vlan == I40E_VLAN_ANY) || (!is_any && !vlan_filters && f->vlan == I40E_VLAN_ANY) || (is_any && !vlan_filters && f->vlan == 0)) { if (is_any) return I40E_VLAN_ANY; else return 0; } return f->vlan; } /** * i40e_correct_vf_mac_vlan_filters - Correct non-VLAN VF filters if necessary * @vsi: the vsi to configure * @tmp_add_list: list of filters ready to be added * @tmp_del_list: list of filters ready to be deleted * @vlan_filters: the number of active VLAN filters * @trusted: flag if the VF is trusted * * Correct VF VLAN filters based on current VLAN filters, trust, PVID * and vf-vlan-prune-disable flag. * * In case of memory allocation failure return -ENOMEM. Otherwise, return 0. * * This function is only expected to be called from within * i40e_sync_vsi_filters. * * NOTE: This function expects to be called while under the * mac_filter_hash_lock */ static int i40e_correct_vf_mac_vlan_filters(struct i40e_vsi *vsi, struct hlist_head *tmp_add_list, struct hlist_head *tmp_del_list, int vlan_filters, bool trusted) { struct i40e_mac_filter *f, *add_head; struct i40e_new_mac_filter *new_mac; struct hlist_node *h; int bkt, new_vlan; hlist_for_each_entry(new_mac, tmp_add_list, hlist) { new_mac->f->vlan = i40e_get_vf_new_vlan(vsi, new_mac, NULL, vlan_filters, trusted); } hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) { new_vlan = i40e_get_vf_new_vlan(vsi, NULL, f, vlan_filters, trusted); if (new_vlan != f->vlan) { add_head = i40e_add_filter(vsi, f->macaddr, new_vlan); if (!add_head) return -ENOMEM; /* Create a temporary i40e_new_mac_filter */ new_mac = kzalloc(sizeof(*new_mac), GFP_ATOMIC); if (!new_mac) return -ENOMEM; new_mac->f = add_head; new_mac->state = add_head->state; /* Add the new filter to the tmp list */ hlist_add_head(&new_mac->hlist, tmp_add_list); /* Put the original filter into the delete list */ f->state = I40E_FILTER_REMOVE; hash_del(&f->hlist); hlist_add_head(&f->hlist, tmp_del_list); } } vsi->has_vlan_filter = !!vlan_filters; return 0; } /** * i40e_rm_default_mac_filter - Remove the default MAC filter set by NVM * @vsi: the PF Main VSI - inappropriate for any other VSI * @macaddr: the MAC address * * Remove whatever filter the firmware set up so the driver can manage * its own filtering intelligently. **/ static void i40e_rm_default_mac_filter(struct i40e_vsi *vsi, u8 *macaddr) { struct i40e_aqc_remove_macvlan_element_data element; struct i40e_pf *pf = vsi->back; /* Only appropriate for the PF main VSI */ if (vsi->type != I40E_VSI_MAIN) return; memset(&element, 0, sizeof(element)); ether_addr_copy(element.mac_addr, macaddr); element.vlan_tag = 0; /* Ignore error returns, some firmware does it this way... */ element.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH; i40e_aq_remove_macvlan(&pf->hw, vsi->seid, &element, 1, NULL); memset(&element, 0, sizeof(element)); ether_addr_copy(element.mac_addr, macaddr); element.vlan_tag = 0; /* ...and some firmware does it this way. */ element.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH | I40E_AQC_MACVLAN_DEL_IGNORE_VLAN; i40e_aq_remove_macvlan(&pf->hw, vsi->seid, &element, 1, NULL); } /** * i40e_add_filter - Add a mac/vlan filter to the VSI * @vsi: the VSI to be searched * @macaddr: the MAC address * @vlan: the vlan * * Returns ptr to the filter object or NULL when no memory available. * * NOTE: This function is expected to be called with mac_filter_hash_lock * being held. **/ struct i40e_mac_filter *i40e_add_filter(struct i40e_vsi *vsi, const u8 *macaddr, s16 vlan) { struct i40e_mac_filter *f; u64 key; if (!vsi || !macaddr) return NULL; f = i40e_find_filter(vsi, macaddr, vlan); if (!f) { f = kzalloc(sizeof(*f), GFP_ATOMIC); if (!f) return NULL; /* Update the boolean indicating if we need to function in * VLAN mode. */ if (vlan >= 0) vsi->has_vlan_filter = true; ether_addr_copy(f->macaddr, macaddr); f->vlan = vlan; f->state = I40E_FILTER_NEW; INIT_HLIST_NODE(&f->hlist); key = i40e_addr_to_hkey(macaddr); hash_add(vsi->mac_filter_hash, &f->hlist, key); vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED; set_bit(__I40E_MACVLAN_SYNC_PENDING, vsi->back->state); } /* If we're asked to add a filter that has been marked for removal, it * is safe to simply restore it to active state. __i40e_del_filter * will have simply deleted any filters which were previously marked * NEW or FAILED, so if it is currently marked REMOVE it must have * previously been ACTIVE. Since we haven't yet run the sync filters * task, just restore this filter to the ACTIVE state so that the * sync task leaves it in place */ if (f->state == I40E_FILTER_REMOVE) f->state = I40E_FILTER_ACTIVE; return f; } /** * __i40e_del_filter - Remove a specific filter from the VSI * @vsi: VSI to remove from * @f: the filter to remove from the list * * This function should be called instead of i40e_del_filter only if you know * the exact filter you will remove already, such as via i40e_find_filter or * i40e_find_mac. * * NOTE: This function is expected to be called with mac_filter_hash_lock * being held. * ANOTHER NOTE: This function MUST be called from within the context of * the "safe" variants of any list iterators, e.g. list_for_each_entry_safe() * instead of list_for_each_entry(). **/ void __i40e_del_filter(struct i40e_vsi *vsi, struct i40e_mac_filter *f) { if (!f) return; /* If the filter was never added to firmware then we can just delete it * directly and we don't want to set the status to remove or else an * admin queue command will unnecessarily fire. */ if ((f->state == I40E_FILTER_FAILED) || (f->state == I40E_FILTER_NEW)) { hash_del(&f->hlist); kfree(f); } else { f->state = I40E_FILTER_REMOVE; } vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED; set_bit(__I40E_MACVLAN_SYNC_PENDING, vsi->back->state); } /** * i40e_del_filter - Remove a MAC/VLAN filter from the VSI * @vsi: the VSI to be searched * @macaddr: the MAC address * @vlan: the VLAN * * NOTE: This function is expected to be called with mac_filter_hash_lock * being held. * ANOTHER NOTE: This function MUST be called from within the context of * the "safe" variants of any list iterators, e.g. list_for_each_entry_safe() * instead of list_for_each_entry(). **/ void i40e_del_filter(struct i40e_vsi *vsi, const u8 *macaddr, s16 vlan) { struct i40e_mac_filter *f; if (!vsi || !macaddr) return; f = i40e_find_filter(vsi, macaddr, vlan); __i40e_del_filter(vsi, f); } /** * i40e_add_mac_filter - Add a MAC filter for all active VLANs * @vsi: the VSI to be searched * @macaddr: the mac address to be filtered * * If we're not in VLAN mode, just add the filter to I40E_VLAN_ANY. Otherwise, * go through all the macvlan filters and add a macvlan filter for each * unique vlan that already exists. If a PVID has been assigned, instead only * add the macaddr to that VLAN. * * Returns last filter added on success, else NULL **/ struct i40e_mac_filter *i40e_add_mac_filter(struct i40e_vsi *vsi, const u8 *macaddr) { struct i40e_mac_filter *f, *add = NULL; struct hlist_node *h; int bkt; if (vsi->info.pvid) return i40e_add_filter(vsi, macaddr, le16_to_cpu(vsi->info.pvid)); if (!i40e_is_vsi_in_vlan(vsi)) return i40e_add_filter(vsi, macaddr, I40E_VLAN_ANY); hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) { if (f->state == I40E_FILTER_REMOVE) continue; add = i40e_add_filter(vsi, macaddr, f->vlan); if (!add) return NULL; } return add; } /** * i40e_del_mac_filter - Remove a MAC filter from all VLANs * @vsi: the VSI to be searched * @macaddr: the mac address to be removed * * Removes a given MAC address from a VSI regardless of what VLAN it has been * associated with. * * Returns 0 for success, or error **/ int i40e_del_mac_filter(struct i40e_vsi *vsi, const u8 *macaddr) { struct i40e_mac_filter *f; struct hlist_node *h; bool found = false; int bkt; lockdep_assert_held(&vsi->mac_filter_hash_lock); hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) { if (ether_addr_equal(macaddr, f->macaddr)) { __i40e_del_filter(vsi, f); found = true; } } if (found) return 0; else return -ENOENT; } /** * i40e_set_mac - NDO callback to set mac address * @netdev: network interface device structure * @p: pointer to an address structure * * Returns 0 on success, negative on failure **/ static int i40e_set_mac(struct net_device *netdev, void *p) { struct i40e_netdev_priv *np = netdev_priv(netdev); struct i40e_vsi *vsi = np->vsi; struct i40e_pf *pf = vsi->back; struct i40e_hw *hw = &pf->hw; struct sockaddr *addr = p; if (!is_valid_ether_addr(addr->sa_data)) return -EADDRNOTAVAIL; if (test_bit(__I40E_DOWN, pf->state) || test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state)) return -EADDRNOTAVAIL; if (ether_addr_equal(hw->mac.addr, addr->sa_data)) netdev_info(netdev, "returning to hw mac address %pM\n", hw->mac.addr); else netdev_info(netdev, "set new mac address %pM\n", addr->sa_data); /* Copy the address first, so that we avoid a possible race with * .set_rx_mode(). * - Remove old address from MAC filter * - Copy new address * - Add new address to MAC filter */ spin_lock_bh(&vsi->mac_filter_hash_lock); i40e_del_mac_filter(vsi, netdev->dev_addr); eth_hw_addr_set(netdev, addr->sa_data); i40e_add_mac_filter(vsi, netdev->dev_addr); spin_unlock_bh(&vsi->mac_filter_hash_lock); if (vsi->type == I40E_VSI_MAIN) { int ret; ret = i40e_aq_mac_address_write(hw, I40E_AQC_WRITE_TYPE_LAA_WOL, addr->sa_data, NULL); if (ret) netdev_info(netdev, "Ignoring error from firmware on LAA update, status %pe, AQ ret %s\n", ERR_PTR(ret), i40e_aq_str(hw, hw->aq.asq_last_status)); } /* schedule our worker thread which will take care of * applying the new filter changes */ i40e_service_event_schedule(pf); return 0; } /** * i40e_config_rss_aq - Prepare for RSS using AQ commands * @vsi: vsi structure * @seed: RSS hash seed * @lut: pointer to lookup table of lut_size * @lut_size: size of the lookup table **/ static int i40e_config_rss_aq(struct i40e_vsi *vsi, const u8 *seed, u8 *lut, u16 lut_size) { struct i40e_pf *pf = vsi->back; struct i40e_hw *hw = &pf->hw; int ret = 0; if (seed) { struct i40e_aqc_get_set_rss_key_data *seed_dw = (struct i40e_aqc_get_set_rss_key_data *)seed; ret = i40e_aq_set_rss_key(hw, vsi->id, seed_dw); if (ret) { dev_info(&pf->pdev->dev, "Cannot set RSS key, err %pe aq_err %s\n", ERR_PTR(ret), i40e_aq_str(hw, hw->aq.asq_last_status)); return ret; } } if (lut) { bool pf_lut = vsi->type == I40E_VSI_MAIN; ret = i40e_aq_set_rss_lut(hw, vsi->id, pf_lut, lut, lut_size); if (ret) { dev_info(&pf->pdev->dev, "Cannot set RSS lut, err %pe aq_err %s\n", ERR_PTR(ret), i40e_aq_str(hw, hw->aq.asq_last_status)); return ret; } } return ret; } /** * i40e_vsi_config_rss - Prepare for VSI(VMDq) RSS if used * @vsi: VSI structure **/ static int i40e_vsi_config_rss(struct i40e_vsi *vsi) { struct i40e_pf *pf = vsi->back; u8 seed[I40E_HKEY_ARRAY_SIZE]; u8 *lut; int ret; if (!(pf->hw_features & I40E_HW_RSS_AQ_CAPABLE)) return 0; if (!vsi->rss_size) vsi->rss_size = min_t(int, pf->alloc_rss_size, vsi->num_queue_pairs); if (!vsi->rss_size) return -EINVAL; lut = kzalloc(vsi->rss_table_size, GFP_KERNEL); if (!lut) return -ENOMEM; /* Use the user configured hash keys and lookup table if there is one, * otherwise use default */ if (vsi->rss_lut_user) memcpy(lut, vsi->rss_lut_user, vsi->rss_table_size); else i40e_fill_rss_lut(pf, lut, vsi->rss_table_size, vsi->rss_size); if (vsi->rss_hkey_user) memcpy(seed, vsi->rss_hkey_user, I40E_HKEY_ARRAY_SIZE); else netdev_rss_key_fill((void *)seed, I40E_HKEY_ARRAY_SIZE); ret = i40e_config_rss_aq(vsi, seed, lut, vsi->rss_table_size); kfree(lut); return ret; } /** * i40e_vsi_setup_queue_map_mqprio - Prepares mqprio based tc_config * @vsi: the VSI being configured, * @ctxt: VSI context structure * @enabled_tc: number of traffic classes to enable * * Prepares VSI tc_config to have queue configurations based on MQPRIO options. **/ static int i40e_vsi_setup_queue_map_mqprio(struct i40e_vsi *vsi, struct i40e_vsi_context *ctxt, u8 enabled_tc) { u16 qcount = 0, max_qcount, qmap, sections = 0; int i, override_q, pow, num_qps, ret; u8 netdev_tc = 0, offset = 0; if (vsi->type != I40E_VSI_MAIN) return -EINVAL; sections = I40E_AQ_VSI_PROP_QUEUE_MAP_VALID; sections |= I40E_AQ_VSI_PROP_SCHED_VALID; vsi->tc_config.numtc = vsi->mqprio_qopt.qopt.num_tc; vsi->tc_config.enabled_tc = enabled_tc ? enabled_tc : 1; num_qps = vsi->mqprio_qopt.qopt.count[0]; /* find the next higher power-of-2 of num queue pairs */ pow = ilog2(num_qps); if (!is_power_of_2(num_qps)) pow++; qmap = (offset << I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT) | (pow << I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT); /* Setup queue offset/count for all TCs for given VSI */ max_qcount = vsi->mqprio_qopt.qopt.count[0]; for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { /* See if the given TC is enabled for the given VSI */ if (vsi->tc_config.enabled_tc & BIT(i)) { offset = vsi->mqprio_qopt.qopt.offset[i]; qcount = vsi->mqprio_qopt.qopt.count[i]; if (qcount > max_qcount) max_qcount = qcount; vsi->tc_config.tc_info[i].qoffset = offset; vsi->tc_config.tc_info[i].qcount = qcount; vsi->tc_config.tc_info[i].netdev_tc = netdev_tc++; } else { /* TC is not enabled so set the offset to * default queue and allocate one queue * for the given TC. */ vsi->tc_config.tc_info[i].qoffset = 0; vsi->tc_config.tc_info[i].qcount = 1; vsi->tc_config.tc_info[i].netdev_tc = 0; } } /* Set actual Tx/Rx queue pairs */ vsi->num_queue_pairs = offset + qcount; /* Setup queue TC[0].qmap for given VSI context */ ctxt->info.tc_mapping[0] = cpu_to_le16(qmap); ctxt->info.mapping_flags |= cpu_to_le16(I40E_AQ_VSI_QUE_MAP_CONTIG); ctxt->info.queue_mapping[0] = cpu_to_le16(vsi->base_queue); ctxt->info.valid_sections |= cpu_to_le16(sections); /* Reconfigure RSS for main VSI with max queue count */ vsi->rss_size = max_qcount; ret = i40e_vsi_config_rss(vsi); if (ret) { dev_info(&vsi->back->pdev->dev, "Failed to reconfig rss for num_queues (%u)\n", max_qcount); return ret; } vsi->reconfig_rss = true; dev_dbg(&vsi->back->pdev->dev, "Reconfigured rss with num_queues (%u)\n", max_qcount); /* Find queue count available for channel VSIs and starting offset * for channel VSIs */ override_q = vsi->mqprio_qopt.qopt.count[0]; if (override_q && override_q < vsi->num_queue_pairs) { vsi->cnt_q_avail = vsi->num_queue_pairs - override_q; vsi->next_base_queue = override_q; } return 0; } /** * i40e_vsi_setup_queue_map - Setup a VSI queue map based on enabled_tc * @vsi: the VSI being setup * @ctxt: VSI context structure * @enabled_tc: Enabled TCs bitmap * @is_add: True if called before Add VSI * * Setup VSI queue mapping for enabled traffic classes. **/ static void i40e_vsi_setup_queue_map(struct i40e_vsi *vsi, struct i40e_vsi_context *ctxt, u8 enabled_tc, bool is_add) { struct i40e_pf *pf = vsi->back; u16 num_tc_qps = 0; u16 sections = 0; u8 netdev_tc = 0; u16 numtc = 1; u16 qcount; u8 offset; u16 qmap; int i; sections = I40E_AQ_VSI_PROP_QUEUE_MAP_VALID; offset = 0; /* zero out queue mapping, it will get updated on the end of the function */ memset(ctxt->info.queue_mapping, 0, sizeof(ctxt->info.queue_mapping)); if (vsi->type == I40E_VSI_MAIN) { /* This code helps add more queue to the VSI if we have * more cores than RSS can support, the higher cores will * be served by ATR or other filters. Furthermore, the * non-zero req_queue_pairs says that user requested a new * queue count via ethtool's set_channels, so use this * value for queues distribution across traffic classes * We need at least one queue pair for the interface * to be usable as we see in else statement. */ if (vsi->req_queue_pairs > 0) vsi->num_queue_pairs = vsi->req_queue_pairs; else if (pf->flags & I40E_FLAG_MSIX_ENABLED) vsi->num_queue_pairs = pf->num_lan_msix; else vsi->num_queue_pairs = 1; } /* Number of queues per enabled TC */ if (vsi->type == I40E_VSI_MAIN || (vsi->type == I40E_VSI_SRIOV && vsi->num_queue_pairs != 0)) num_tc_qps = vsi->num_queue_pairs; else num_tc_qps = vsi->alloc_queue_pairs; if (enabled_tc && (vsi->back->flags & I40E_FLAG_DCB_ENABLED)) { /* Find numtc from enabled TC bitmap */ for (i = 0, numtc = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { if (enabled_tc & BIT(i)) /* TC is enabled */ numtc++; } if (!numtc) { dev_warn(&pf->pdev->dev, "DCB is enabled but no TC enabled, forcing TC0\n"); numtc = 1; } num_tc_qps = num_tc_qps / numtc; num_tc_qps = min_t(int, num_tc_qps, i40e_pf_get_max_q_per_tc(pf)); } vsi->tc_config.numtc = numtc; vsi->tc_config.enabled_tc = enabled_tc ? enabled_tc : 1; /* Do not allow use more TC queue pairs than MSI-X vectors exist */ if (pf->flags & I40E_FLAG_MSIX_ENABLED) num_tc_qps = min_t(int, num_tc_qps, pf->num_lan_msix); /* Setup queue offset/count for all TCs for given VSI */ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { /* See if the given TC is enabled for the given VSI */ if (vsi->tc_config.enabled_tc & BIT(i)) { /* TC is enabled */ int pow, num_qps; switch (vsi->type) { case I40E_VSI_MAIN: if (!(pf->flags & (I40E_FLAG_FD_SB_ENABLED | I40E_FLAG_FD_ATR_ENABLED)) || vsi->tc_config.enabled_tc != 1) { qcount = min_t(int, pf->alloc_rss_size, num_tc_qps); break; } fallthrough; case I40E_VSI_FDIR: case I40E_VSI_SRIOV: case I40E_VSI_VMDQ2: default: qcount = num_tc_qps; WARN_ON(i != 0); break; } vsi->tc_config.tc_info[i].qoffset = offset; vsi->tc_config.tc_info[i].qcount = qcount; /* find the next higher power-of-2 of num queue pairs */ num_qps = qcount; pow = 0; while (num_qps && (BIT_ULL(pow) < qcount)) { pow++; num_qps >>= 1; } vsi->tc_config.tc_info[i].netdev_tc = netdev_tc++; qmap = (offset << I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT) | (pow << I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT); offset += qcount; } else { /* TC is not enabled so set the offset to * default queue and allocate one queue * for the given TC. */ vsi->tc_config.tc_info[i].qoffset = 0; vsi->tc_config.tc_info[i].qcount = 1; vsi->tc_config.tc_info[i].netdev_tc = 0; qmap = 0; } ctxt->info.tc_mapping[i] = cpu_to_le16(qmap); } /* Do not change previously set num_queue_pairs for PFs and VFs*/ if ((vsi->type == I40E_VSI_MAIN && numtc != 1) || (vsi->type == I40E_VSI_SRIOV && vsi->num_queue_pairs == 0) || (vsi->type != I40E_VSI_MAIN && vsi->type != I40E_VSI_SRIOV)) vsi->num_queue_pairs = offset; /* Scheduler section valid can only be set for ADD VSI */ if (is_add) { sections |= I40E_AQ_VSI_PROP_SCHED_VALID; ctxt->info.up_enable_bits = enabled_tc; } if (vsi->type == I40E_VSI_SRIOV) { ctxt->info.mapping_flags |= cpu_to_le16(I40E_AQ_VSI_QUE_MAP_NONCONTIG); for (i = 0; i < vsi->num_queue_pairs; i++) ctxt->info.queue_mapping[i] = cpu_to_le16(vsi->base_queue + i); } else { ctxt->info.mapping_flags |= cpu_to_le16(I40E_AQ_VSI_QUE_MAP_CONTIG); ctxt->info.queue_mapping[0] = cpu_to_le16(vsi->base_queue); } ctxt->info.valid_sections |= cpu_to_le16(sections); } /** * i40e_addr_sync - Callback for dev_(mc|uc)_sync to add address * @netdev: the netdevice * @addr: address to add * * Called by __dev_(mc|uc)_sync when an address needs to be added. We call * __dev_(uc|mc)_sync from .set_rx_mode and guarantee to hold the hash lock. */ static int i40e_addr_sync(struct net_device *netdev, const u8 *addr) { struct i40e_netdev_priv *np = netdev_priv(netdev); struct i40e_vsi *vsi = np->vsi; if (i40e_add_mac_filter(vsi, addr)) return 0; else return -ENOMEM; } /** * i40e_addr_unsync - Callback for dev_(mc|uc)_sync to remove address * @netdev: the netdevice * @addr: address to add * * Called by __dev_(mc|uc)_sync when an address needs to be removed. We call * __dev_(uc|mc)_sync from .set_rx_mode and guarantee to hold the hash lock. */ static int i40e_addr_unsync(struct net_device *netdev, const u8 *addr) { struct i40e_netdev_priv *np = netdev_priv(netdev); struct i40e_vsi *vsi = np->vsi; /* Under some circumstances, we might receive a request to delete * our own device address from our uc list. Because we store the * device address in the VSI's MAC/VLAN filter list, we need to ignore * such requests and not delete our device address from this list. */ if (ether_addr_equal(addr, netdev->dev_addr)) return 0; i40e_del_mac_filter(vsi, addr); return 0; } /** * i40e_set_rx_mode - NDO callback to set the netdev filters * @netdev: network interface device structure **/ static void i40e_set_rx_mode(struct net_device *netdev) { struct i40e_netdev_priv *np = netdev_priv(netdev); struct i40e_vsi *vsi = np->vsi; spin_lock_bh(&vsi->mac_filter_hash_lock); __dev_uc_sync(netdev, i40e_addr_sync, i40e_addr_unsync); __dev_mc_sync(netdev, i40e_addr_sync, i40e_addr_unsync); spin_unlock_bh(&vsi->mac_filter_hash_lock); /* check for other flag changes */ if (vsi->current_netdev_flags != vsi->netdev->flags) { vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED; set_bit(__I40E_MACVLAN_SYNC_PENDING, vsi->back->state); } } /** * i40e_undo_del_filter_entries - Undo the changes made to MAC filter entries * @vsi: Pointer to VSI struct * @from: Pointer to list which contains MAC filter entries - changes to * those entries needs to be undone. * * MAC filter entries from this list were slated for deletion. **/ static void i40e_undo_del_filter_entries(struct i40e_vsi *vsi, struct hlist_head *from) { struct i40e_mac_filter *f; struct hlist_node *h; hlist_for_each_entry_safe(f, h, from, hlist) { u64 key = i40e_addr_to_hkey(f->macaddr); /* Move the element back into MAC filter list*/ hlist_del(&f->hlist); hash_add(vsi->mac_filter_hash, &f->hlist, key); } } /** * i40e_undo_add_filter_entries - Undo the changes made to MAC filter entries * @vsi: Pointer to vsi struct * @from: Pointer to list which contains MAC filter entries - changes to * those entries needs to be undone. * * MAC filter entries from this list were slated for addition. **/ static void i40e_undo_add_filter_entries(struct i40e_vsi *vsi, struct hlist_head *from) { struct i40e_new_mac_filter *new; struct hlist_node *h; hlist_for_each_entry_safe(new, h, from, hlist) { /* We can simply free the wrapper structure */ hlist_del(&new->hlist); netdev_hw_addr_refcnt(new->f, vsi->netdev, -1); kfree(new); } } /** * i40e_next_filter - Get the next non-broadcast filter from a list * @next: pointer to filter in list * * Returns the next non-broadcast filter in the list. Required so that we * ignore broadcast filters within the list, since these are not handled via * the normal firmware update path. */ static struct i40e_new_mac_filter *i40e_next_filter(struct i40e_new_mac_filter *next) { hlist_for_each_entry_continue(next, hlist) { if (!is_broadcast_ether_addr(next->f->macaddr)) return next; } return NULL; } /** * i40e_update_filter_state - Update filter state based on return data * from firmware * @count: Number of filters added * @add_list: return data from fw * @add_head: pointer to first filter in current batch * * MAC filter entries from list were slated to be added to device. Returns * number of successful filters. Note that 0 does NOT mean success! **/ static int i40e_update_filter_state(int count, struct i40e_aqc_add_macvlan_element_data *add_list, struct i40e_new_mac_filter *add_head) { int retval = 0; int i; for (i = 0; i < count; i++) { /* Always check status of each filter. We don't need to check * the firmware return status because we pre-set the filter * status to I40E_AQC_MM_ERR_NO_RES when sending the filter * request to the adminq. Thus, if it no longer matches then * we know the filter is active. */ if (add_list[i].match_method == I40E_AQC_MM_ERR_NO_RES) { add_head->state = I40E_FILTER_FAILED; } else { add_head->state = I40E_FILTER_ACTIVE; retval++; } add_head = i40e_next_filter(add_head); if (!add_head) break; } return retval; } /** * i40e_aqc_del_filters - Request firmware to delete a set of filters * @vsi: ptr to the VSI * @vsi_name: name to display in messages * @list: the list of filters to send to firmware * @num_del: the number of filters to delete * @retval: Set to -EIO on failure to delete * * Send a request to firmware via AdminQ to delete a set of filters. Uses * *retval instead of a return value so that success does not force ret_val to * be set to 0. This ensures that a sequence of calls to this function * preserve the previous value of *retval on successful delete. */ static void i40e_aqc_del_filters(struct i40e_vsi *vsi, const char *vsi_name, struct i40e_aqc_remove_macvlan_element_data *list, int num_del, int *retval) { struct i40e_hw *hw = &vsi->back->hw; enum i40e_admin_queue_err aq_status; int aq_ret; aq_ret = i40e_aq_remove_macvlan_v2(hw, vsi->seid, list, num_del, NULL, &aq_status); /* Explicitly ignore and do not report when firmware returns ENOENT */ if (aq_ret && !(aq_status == I40E_AQ_RC_ENOENT)) { *retval = -EIO; dev_info(&vsi->back->pdev->dev, "ignoring delete macvlan error on %s, err %pe, aq_err %s\n", vsi_name, ERR_PTR(aq_ret), i40e_aq_str(hw, aq_status)); } } /** * i40e_aqc_add_filters - Request firmware to add a set of filters * @vsi: ptr to the VSI * @vsi_name: name to display in messages * @list: the list of filters to send to firmware * @add_head: Position in the add hlist * @num_add: the number of filters to add * * Send a request to firmware via AdminQ to add a chunk of filters. Will set * __I40E_VSI_OVERFLOW_PROMISC bit in vsi->state if the firmware has run out of * space for more filters. */ static void i40e_aqc_add_filters(struct i40e_vsi *vsi, const char *vsi_name, struct i40e_aqc_add_macvlan_element_data *list, struct i40e_new_mac_filter *add_head, int num_add) { struct i40e_hw *hw = &vsi->back->hw; enum i40e_admin_queue_err aq_status; int fcnt; i40e_aq_add_macvlan_v2(hw, vsi->seid, list, num_add, NULL, &aq_status); fcnt = i40e_update_filter_state(num_add, list, add_head); if (fcnt != num_add) { if (vsi->type == I40E_VSI_MAIN) { set_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state); dev_warn(&vsi->back->pdev->dev, "Error %s adding RX filters on %s, promiscuous mode forced on\n", i40e_aq_str(hw, aq_status), vsi_name); } else if (vsi->type == I40E_VSI_SRIOV || vsi->type == I40E_VSI_VMDQ1 || vsi->type == I40E_VSI_VMDQ2) { dev_warn(&vsi->back->pdev->dev, "Error %s adding RX filters on %s, please set promiscuous on manually for %s\n", i40e_aq_str(hw, aq_status), vsi_name, vsi_name); } else { dev_warn(&vsi->back->pdev->dev, "Error %s adding RX filters on %s, incorrect VSI type: %i.\n", i40e_aq_str(hw, aq_status), vsi_name, vsi->type); } } } /** * i40e_aqc_broadcast_filter - Set promiscuous broadcast flags * @vsi: pointer to the VSI * @vsi_name: the VSI name * @f: filter data * * This function sets or clears the promiscuous broadcast flags for VLAN * filters in order to properly receive broadcast frames. Assumes that only * broadcast filters are passed. * * Returns status indicating success or failure; **/ static int i40e_aqc_broadcast_filter(struct i40e_vsi *vsi, const char *vsi_name, struct i40e_mac_filter *f) { bool enable = f->state == I40E_FILTER_NEW; struct i40e_hw *hw = &vsi->back->hw; int aq_ret; if (f->vlan == I40E_VLAN_ANY) { aq_ret = i40e_aq_set_vsi_broadcast(hw, vsi->seid, enable, NULL); } else { aq_ret = i40e_aq_set_vsi_bc_promisc_on_vlan(hw, vsi->seid, enable, f->vlan, NULL); } if (aq_ret) { set_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state); dev_warn(&vsi->back->pdev->dev, "Error %s, forcing overflow promiscuous on %s\n", i40e_aq_str(hw, hw->aq.asq_last_status), vsi_name); } return aq_ret; } /** * i40e_set_promiscuous - set promiscuous mode * @pf: board private structure * @promisc: promisc on or off * * There are different ways of setting promiscuous mode on a PF depending on * what state/environment we're in. This identifies and sets it appropriately. * Returns 0 on success. **/ static int i40e_set_promiscuous(struct i40e_pf *pf, bool promisc) { struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi]; struct i40e_hw *hw = &pf->hw; int aq_ret; if (vsi->type == I40E_VSI_MAIN && pf->lan_veb != I40E_NO_VEB && !(pf->flags & I40E_FLAG_MFP_ENABLED)) { /* set defport ON for Main VSI instead of true promisc * this way we will get all unicast/multicast and VLAN * promisc behavior but will not get VF or VMDq traffic * replicated on the Main VSI. */ if (promisc) aq_ret = i40e_aq_set_default_vsi(hw, vsi->seid, NULL); else aq_ret = i40e_aq_clear_default_vsi(hw, vsi->seid, NULL); if (aq_ret) { dev_info(&pf->pdev->dev, "Set default VSI failed, err %pe, aq_err %s\n", ERR_PTR(aq_ret), i40e_aq_str(hw, hw->aq.asq_last_status)); } } else { aq_ret = i40e_aq_set_vsi_unicast_promiscuous( hw, vsi->seid, promisc, NULL, true); if (aq_ret) { dev_info(&pf->pdev->dev, "set unicast promisc failed, err %pe, aq_err %s\n", ERR_PTR(aq_ret), i40e_aq_str(hw, hw->aq.asq_last_status)); } aq_ret = i40e_aq_set_vsi_multicast_promiscuous( hw, vsi->seid, promisc, NULL); if (aq_ret) { dev_info(&pf->pdev->dev, "set multicast promisc failed, err %pe, aq_err %s\n", ERR_PTR(aq_ret), i40e_aq_str(hw, hw->aq.asq_last_status)); } } if (!aq_ret) pf->cur_promisc = promisc; return aq_ret; } /** * i40e_sync_vsi_filters - Update the VSI filter list to the HW * @vsi: ptr to the VSI * * Push any outstanding VSI filter changes through the AdminQ. * * Returns 0 or error value **/ int i40e_sync_vsi_filters(struct i40e_vsi *vsi) { struct hlist_head tmp_add_list, tmp_del_list; struct i40e_mac_filter *f; struct i40e_new_mac_filter *new, *add_head = NULL; struct i40e_hw *hw = &vsi->back->hw; bool old_overflow, new_overflow; unsigned int failed_filters = 0; unsigned int vlan_filters = 0; char vsi_name[16] = "PF"; int filter_list_len = 0; u32 changed_flags = 0; struct hlist_node *h; struct i40e_pf *pf; int num_add = 0; int num_del = 0; int aq_ret = 0; int retval = 0; u16 cmd_flags; int list_size; int bkt; /* empty array typed pointers, kcalloc later */ struct i40e_aqc_add_macvlan_element_data *add_list; struct i40e_aqc_remove_macvlan_element_data *del_list; while (test_and_set_bit(__I40E_VSI_SYNCING_FILTERS, vsi->state)) usleep_range(1000, 2000); pf = vsi->back; old_overflow = test_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state); if (vsi->netdev) { changed_flags = vsi->current_netdev_flags ^ vsi->netdev->flags; vsi->current_netdev_flags = vsi->netdev->flags; } INIT_HLIST_HEAD(&tmp_add_list); INIT_HLIST_HEAD(&tmp_del_list); if (vsi->type == I40E_VSI_SRIOV) snprintf(vsi_name, sizeof(vsi_name) - 1, "VF %d", vsi->vf_id); else if (vsi->type != I40E_VSI_MAIN) snprintf(vsi_name, sizeof(vsi_name) - 1, "vsi %d", vsi->seid); if (vsi->flags & I40E_VSI_FLAG_FILTER_CHANGED) { vsi->flags &= ~I40E_VSI_FLAG_FILTER_CHANGED; spin_lock_bh(&vsi->mac_filter_hash_lock); /* Create a list of filters to delete. */ hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) { if (f->state == I40E_FILTER_REMOVE) { /* Move the element into temporary del_list */ hash_del(&f->hlist); hlist_add_head(&f->hlist, &tmp_del_list); /* Avoid counting removed filters */ continue; } if (f->state == I40E_FILTER_NEW) { /* Create a temporary i40e_new_mac_filter */ new = kzalloc(sizeof(*new), GFP_ATOMIC); if (!new) goto err_no_memory_locked; /* Store pointer to the real filter */ new->f = f; new->state = f->state; /* Add it to the hash list */ hlist_add_head(&new->hlist, &tmp_add_list); } /* Count the number of active (current and new) VLAN * filters we have now. Does not count filters which * are marked for deletion. */ if (f->vlan > 0) vlan_filters++; } if (vsi->type != I40E_VSI_SRIOV) retval = i40e_correct_mac_vlan_filters (vsi, &tmp_add_list, &tmp_del_list, vlan_filters); else if (pf->vf) retval = i40e_correct_vf_mac_vlan_filters (vsi, &tmp_add_list, &tmp_del_list, vlan_filters, pf->vf[vsi->vf_id].trusted); hlist_for_each_entry(new, &tmp_add_list, hlist) netdev_hw_addr_refcnt(new->f, vsi->netdev, 1); if (retval) goto err_no_memory_locked; spin_unlock_bh(&vsi->mac_filter_hash_lock); } /* Now process 'del_list' outside the lock */ if (!hlist_empty(&tmp_del_list)) { filter_list_len = hw->aq.asq_buf_size / sizeof(struct i40e_aqc_remove_macvlan_element_data); list_size = filter_list_len * sizeof(struct i40e_aqc_remove_macvlan_element_data); del_list = kzalloc(list_size, GFP_ATOMIC); if (!del_list) goto err_no_memory; hlist_for_each_entry_safe(f, h, &tmp_del_list, hlist) { cmd_flags = 0; /* handle broadcast filters by updating the broadcast * promiscuous flag and release filter list. */ if (is_broadcast_ether_addr(f->macaddr)) { i40e_aqc_broadcast_filter(vsi, vsi_name, f); hlist_del(&f->hlist); kfree(f); continue; } /* add to delete list */ ether_addr_copy(del_list[num_del].mac_addr, f->macaddr); if (f->vlan == I40E_VLAN_ANY) { del_list[num_del].vlan_tag = 0; cmd_flags |= I40E_AQC_MACVLAN_DEL_IGNORE_VLAN; } else { del_list[num_del].vlan_tag = cpu_to_le16((u16)(f->vlan)); } cmd_flags |= I40E_AQC_MACVLAN_DEL_PERFECT_MATCH; del_list[num_del].flags = cmd_flags; num_del++; /* flush a full buffer */ if (num_del == filter_list_len) { i40e_aqc_del_filters(vsi, vsi_name, del_list, num_del, &retval); memset(del_list, 0, list_size); num_del = 0; } /* Release memory for MAC filter entries which were * synced up with HW. */ hlist_del(&f->hlist); kfree(f); } if (num_del) { i40e_aqc_del_filters(vsi, vsi_name, del_list, num_del, &retval); } kfree(del_list); del_list = NULL; } if (!hlist_empty(&tmp_add_list)) { /* Do all the adds now. */ filter_list_len = hw->aq.asq_buf_size / sizeof(struct i40e_aqc_add_macvlan_element_data); list_size = filter_list_len * sizeof(struct i40e_aqc_add_macvlan_element_data); add_list = kzalloc(list_size, GFP_ATOMIC); if (!add_list) goto err_no_memory; num_add = 0; hlist_for_each_entry_safe(new, h, &tmp_add_list, hlist) { /* handle broadcast filters by updating the broadcast * promiscuous flag instead of adding a MAC filter. */ if (is_broadcast_ether_addr(new->f->macaddr)) { if (i40e_aqc_broadcast_filter(vsi, vsi_name, new->f)) new->state = I40E_FILTER_FAILED; else new->state = I40E_FILTER_ACTIVE; continue; } /* add to add array */ if (num_add == 0) add_head = new; cmd_flags = 0; ether_addr_copy(add_list[num_add].mac_addr, new->f->macaddr); if (new->f->vlan == I40E_VLAN_ANY) { add_list[num_add].vlan_tag = 0; cmd_flags |= I40E_AQC_MACVLAN_ADD_IGNORE_VLAN; } else { add_list[num_add].vlan_tag = cpu_to_le16((u16)(new->f->vlan)); } add_list[num_add].queue_number = 0; /* set invalid match method for later detection */ add_list[num_add].match_method = I40E_AQC_MM_ERR_NO_RES; cmd_flags |= I40E_AQC_MACVLAN_ADD_PERFECT_MATCH; add_list[num_add].flags = cpu_to_le16(cmd_flags); num_add++; /* flush a full buffer */ if (num_add == filter_list_len) { i40e_aqc_add_filters(vsi, vsi_name, add_list, add_head, num_add); memset(add_list, 0, list_size); num_add = 0; } } if (num_add) { i40e_aqc_add_filters(vsi, vsi_name, add_list, add_head, num_add); } /* Now move all of the filters from the temp add list back to * the VSI's list. */ spin_lock_bh(&vsi->mac_filter_hash_lock); hlist_for_each_entry_safe(new, h, &tmp_add_list, hlist) { /* Only update the state if we're still NEW */ if (new->f->state == I40E_FILTER_NEW) new->f->state = new->state; hlist_del(&new->hlist); netdev_hw_addr_refcnt(new->f, vsi->netdev, -1); kfree(new); } spin_unlock_bh(&vsi->mac_filter_hash_lock); kfree(add_list); add_list = NULL; } /* Determine the number of active and failed filters. */ spin_lock_bh(&vsi->mac_filter_hash_lock); vsi->active_filters = 0; hash_for_each(vsi->mac_filter_hash, bkt, f, hlist) { if (f->state == I40E_FILTER_ACTIVE) vsi->active_filters++; else if (f->state == I40E_FILTER_FAILED) failed_filters++; } spin_unlock_bh(&vsi->mac_filter_hash_lock); /* Check if we are able to exit overflow promiscuous mode. We can * safely exit if we didn't just enter, we no longer have any failed * filters, and we have reduced filters below the threshold value. */ if (old_overflow && !failed_filters && vsi->active_filters < vsi->promisc_threshold) { dev_info(&pf->pdev->dev, "filter logjam cleared on %s, leaving overflow promiscuous mode\n", vsi_name); clear_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state); vsi->promisc_threshold = 0; } /* if the VF is not trusted do not do promisc */ if (vsi->type == I40E_VSI_SRIOV && pf->vf && !pf->vf[vsi->vf_id].trusted) { clear_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state); goto out; } new_overflow = test_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state); /* If we are entering overflow promiscuous, we need to calculate a new * threshold for when we are safe to exit */ if (!old_overflow && new_overflow) vsi->promisc_threshold = (vsi->active_filters * 3) / 4; /* check for changes in promiscuous modes */ if (changed_flags & IFF_ALLMULTI) { bool cur_multipromisc; cur_multipromisc = !!(vsi->current_netdev_flags & IFF_ALLMULTI); aq_ret = i40e_aq_set_vsi_multicast_promiscuous(&vsi->back->hw, vsi->seid, cur_multipromisc, NULL); if (aq_ret) { retval = i40e_aq_rc_to_posix(aq_ret, hw->aq.asq_last_status); dev_info(&pf->pdev->dev, "set multi promisc failed on %s, err %pe aq_err %s\n", vsi_name, ERR_PTR(aq_ret), i40e_aq_str(hw, hw->aq.asq_last_status)); } else { dev_info(&pf->pdev->dev, "%s allmulti mode.\n", cur_multipromisc ? "entering" : "leaving"); } } if ((changed_flags & IFF_PROMISC) || old_overflow != new_overflow) { bool cur_promisc; cur_promisc = (!!(vsi->current_netdev_flags & IFF_PROMISC) || new_overflow); aq_ret = i40e_set_promiscuous(pf, cur_promisc); if (aq_ret) { retval = i40e_aq_rc_to_posix(aq_ret, hw->aq.asq_last_status); dev_info(&pf->pdev->dev, "Setting promiscuous %s failed on %s, err %pe aq_err %s\n", cur_promisc ? "on" : "off", vsi_name, ERR_PTR(aq_ret), i40e_aq_str(hw, hw->aq.asq_last_status)); } } out: /* if something went wrong then set the changed flag so we try again */ if (retval) vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED; clear_bit(__I40E_VSI_SYNCING_FILTERS, vsi->state); return retval; err_no_memory: /* Restore elements on the temporary add and delete lists */ spin_lock_bh(&vsi->mac_filter_hash_lock); err_no_memory_locked: i40e_undo_del_filter_entries(vsi, &tmp_del_list); i40e_undo_add_filter_entries(vsi, &tmp_add_list); spin_unlock_bh(&vsi->mac_filter_hash_lock); vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED; clear_bit(__I40E_VSI_SYNCING_FILTERS, vsi->state); return -ENOMEM; } /** * i40e_sync_filters_subtask - Sync the VSI filter list with HW * @pf: board private structure **/ static void i40e_sync_filters_subtask(struct i40e_pf *pf) { int v; if (!pf) return; if (!test_and_clear_bit(__I40E_MACVLAN_SYNC_PENDING, pf->state)) return; if (test_bit(__I40E_VF_DISABLE, pf->state)) { set_bit(__I40E_MACVLAN_SYNC_PENDING, pf->state); return; } for (v = 0; v < pf->num_alloc_vsi; v++) { if (pf->vsi[v] && (pf->vsi[v]->flags & I40E_VSI_FLAG_FILTER_CHANGED) && !test_bit(__I40E_VSI_RELEASING, pf->vsi[v]->state)) { int ret = i40e_sync_vsi_filters(pf->vsi[v]); if (ret) { /* come back and try again later */ set_bit(__I40E_MACVLAN_SYNC_PENDING, pf->state); break; } } } } /** * i40e_calculate_vsi_rx_buf_len - Calculates buffer length * * @vsi: VSI to calculate rx_buf_len from */ static u16 i40e_calculate_vsi_rx_buf_len(struct i40e_vsi *vsi) { if (!vsi->netdev || (vsi->back->flags & I40E_FLAG_LEGACY_RX)) return SKB_WITH_OVERHEAD(I40E_RXBUFFER_2048); return PAGE_SIZE < 8192 ? I40E_RXBUFFER_3072 : I40E_RXBUFFER_2048; } /** * i40e_max_vsi_frame_size - returns the maximum allowed frame size for VSI * @vsi: the vsi * @xdp_prog: XDP program **/ static int i40e_max_vsi_frame_size(struct i40e_vsi *vsi, struct bpf_prog *xdp_prog) { u16 rx_buf_len = i40e_calculate_vsi_rx_buf_len(vsi); u16 chain_len; if (xdp_prog && !xdp_prog->aux->xdp_has_frags) chain_len = 1; else chain_len = I40E_MAX_CHAINED_RX_BUFFERS; return min_t(u16, rx_buf_len * chain_len, I40E_MAX_RXBUFFER); } /** * i40e_change_mtu - NDO callback to change the Maximum Transfer Unit * @netdev: network interface device structure * @new_mtu: new value for maximum frame size * * Returns 0 on success, negative on failure **/ static int i40e_change_mtu(struct net_device *netdev, int new_mtu) { struct i40e_netdev_priv *np = netdev_priv(netdev); struct i40e_vsi *vsi = np->vsi; struct i40e_pf *pf = vsi->back; int frame_size; frame_size = i40e_max_vsi_frame_size(vsi, vsi->xdp_prog); if (new_mtu > frame_size - I40E_PACKET_HDR_PAD) { netdev_err(netdev, "Error changing mtu to %d, Max is %d\n", new_mtu, frame_size - I40E_PACKET_HDR_PAD); return -EINVAL; } netdev_dbg(netdev, "changing MTU from %d to %d\n", netdev->mtu, new_mtu); netdev->mtu = new_mtu; if (netif_running(netdev)) i40e_vsi_reinit_locked(vsi); set_bit(__I40E_CLIENT_SERVICE_REQUESTED, pf->state); set_bit(__I40E_CLIENT_L2_CHANGE, pf->state); return 0; } /** * i40e_ioctl - Access the hwtstamp interface * @netdev: network interface device structure * @ifr: interface request data * @cmd: ioctl command **/ int i40e_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) { struct i40e_netdev_priv *np = netdev_priv(netdev); struct i40e_pf *pf = np->vsi->back; switch (cmd) { case SIOCGHWTSTAMP: return i40e_ptp_get_ts_config(pf, ifr); case SIOCSHWTSTAMP: return i40e_ptp_set_ts_config(pf, ifr); default: return -EOPNOTSUPP; } } /** * i40e_vlan_stripping_enable - Turn on vlan stripping for the VSI * @vsi: the vsi being adjusted **/ void i40e_vlan_stripping_enable(struct i40e_vsi *vsi) { struct i40e_vsi_context ctxt; int ret; /* Don't modify stripping options if a port VLAN is active */ if (vsi->info.pvid) return; if ((vsi->info.valid_sections & cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID)) && ((vsi->info.port_vlan_flags & I40E_AQ_VSI_PVLAN_MODE_MASK) == 0)) return; /* already enabled */ vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID); vsi->info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_ALL | I40E_AQ_VSI_PVLAN_EMOD_STR_BOTH; ctxt.seid = vsi->seid; ctxt.info = vsi->info; ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL); if (ret) { dev_info(&vsi->back->pdev->dev, "update vlan stripping failed, err %pe aq_err %s\n", ERR_PTR(ret), i40e_aq_str(&vsi->back->hw, vsi->back->hw.aq.asq_last_status)); } } /** * i40e_vlan_stripping_disable - Turn off vlan stripping for the VSI * @vsi: the vsi being adjusted **/ void i40e_vlan_stripping_disable(struct i40e_vsi *vsi) { struct i40e_vsi_context ctxt; int ret; /* Don't modify stripping options if a port VLAN is active */ if (vsi->info.pvid) return; if ((vsi->info.valid_sections & cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID)) && ((vsi->info.port_vlan_flags & I40E_AQ_VSI_PVLAN_EMOD_MASK) == I40E_AQ_VSI_PVLAN_EMOD_MASK)) return; /* already disabled */ vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID); vsi->info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_ALL | I40E_AQ_VSI_PVLAN_EMOD_NOTHING; ctxt.seid = vsi->seid; ctxt.info = vsi->info; ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL); if (ret) { dev_info(&vsi->back->pdev->dev, "update vlan stripping failed, err %pe aq_err %s\n", ERR_PTR(ret), i40e_aq_str(&vsi->back->hw, vsi->back->hw.aq.asq_last_status)); } } /** * i40e_add_vlan_all_mac - Add a MAC/VLAN filter for each existing MAC address * @vsi: the vsi being configured * @vid: vlan id to be added (0 = untagged only , -1 = any) * * This is a helper function for adding a new MAC/VLAN filter with the * specified VLAN for each existing MAC address already in the hash table. * This function does *not* perform any accounting to update filters based on * VLAN mode. * * NOTE: this function expects to be called while under the * mac_filter_hash_lock **/ int i40e_add_vlan_all_mac(struct i40e_vsi *vsi, s16 vid) { struct i40e_mac_filter *f, *add_f; struct hlist_node *h; int bkt; hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) { /* If we're asked to add a filter that has been marked for * removal, it is safe to simply restore it to active state. * __i40e_del_filter will have simply deleted any filters which * were previously marked NEW or FAILED, so if it is currently * marked REMOVE it must have previously been ACTIVE. Since we * haven't yet run the sync filters task, just restore this * filter to the ACTIVE state so that the sync task leaves it * in place. */ if (f->state == I40E_FILTER_REMOVE && f->vlan == vid) { f->state = I40E_FILTER_ACTIVE; continue; } else if (f->state == I40E_FILTER_REMOVE) { continue; } add_f = i40e_add_filter(vsi, f->macaddr, vid); if (!add_f) { dev_info(&vsi->back->pdev->dev, "Could not add vlan filter %d for %pM\n", vid, f->macaddr); return -ENOMEM; } } return 0; } /** * i40e_vsi_add_vlan - Add VSI membership for given VLAN * @vsi: the VSI being configured * @vid: VLAN id to be added **/ int i40e_vsi_add_vlan(struct i40e_vsi *vsi, u16 vid) { int err; if (vsi->info.pvid) return -EINVAL; /* The network stack will attempt to add VID=0, with the intention to * receive priority tagged packets with a VLAN of 0. Our HW receives * these packets by default when configured to receive untagged * packets, so we don't need to add a filter for this case. * Additionally, HW interprets adding a VID=0 filter as meaning to * receive *only* tagged traffic and stops receiving untagged traffic. * Thus, we do not want to actually add a filter for VID=0 */ if (!vid) return 0; /* Locked once because all functions invoked below iterates list*/ spin_lock_bh(&vsi->mac_filter_hash_lock); err = i40e_add_vlan_all_mac(vsi, vid); spin_unlock_bh(&vsi->mac_filter_hash_lock); if (err) return err; /* schedule our worker thread which will take care of * applying the new filter changes */ i40e_service_event_schedule(vsi->back); return 0; } /** * i40e_rm_vlan_all_mac - Remove MAC/VLAN pair for all MAC with the given VLAN * @vsi: the vsi being configured * @vid: vlan id to be removed (0 = untagged only , -1 = any) * * This function should be used to remove all VLAN filters which match the * given VID. It does not schedule the service event and does not take the * mac_filter_hash_lock so it may be combined with other operations under * a single invocation of the mac_filter_hash_lock. * * NOTE: this function expects to be called while under the * mac_filter_hash_lock */ void i40e_rm_vlan_all_mac(struct i40e_vsi *vsi, s16 vid) { struct i40e_mac_filter *f; struct hlist_node *h; int bkt; hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) { if (f->vlan == vid) __i40e_del_filter(vsi, f); } } /** * i40e_vsi_kill_vlan - Remove VSI membership for given VLAN * @vsi: the VSI being configured * @vid: VLAN id to be removed **/ void i40e_vsi_kill_vlan(struct i40e_vsi *vsi, u16 vid) { if (!vid || vsi->info.pvid) return; spin_lock_bh(&vsi->mac_filter_hash_lock); i40e_rm_vlan_all_mac(vsi, vid); spin_unlock_bh(&vsi->mac_filter_hash_lock); /* schedule our worker thread which will take care of * applying the new filter changes */ i40e_service_event_schedule(vsi->back); } /** * i40e_vlan_rx_add_vid - Add a vlan id filter to HW offload * @netdev: network interface to be adjusted * @proto: unused protocol value * @vid: vlan id to be added * * net_device_ops implementation for adding vlan ids **/ static int i40e_vlan_rx_add_vid(struct net_device *netdev, __always_unused __be16 proto, u16 vid) { struct i40e_netdev_priv *np = netdev_priv(netdev); struct i40e_vsi *vsi = np->vsi; int ret = 0; if (vid >= VLAN_N_VID) return -EINVAL; ret = i40e_vsi_add_vlan(vsi, vid); if (!ret) set_bit(vid, vsi->active_vlans); return ret; } /** * i40e_vlan_rx_add_vid_up - Add a vlan id filter to HW offload in UP path * @netdev: network interface to be adjusted * @proto: unused protocol value * @vid: vlan id to be added **/ static void i40e_vlan_rx_add_vid_up(struct net_device *netdev, __always_unused __be16 proto, u16 vid) { struct i40e_netdev_priv *np = netdev_priv(netdev); struct i40e_vsi *vsi = np->vsi; if (vid >= VLAN_N_VID) return; set_bit(vid, vsi->active_vlans); } /** * i40e_vlan_rx_kill_vid - Remove a vlan id filter from HW offload * @netdev: network interface to be adjusted * @proto: unused protocol value * @vid: vlan id to be removed * * net_device_ops implementation for removing vlan ids **/ static int i40e_vlan_rx_kill_vid(struct net_device *netdev, __always_unused __be16 proto, u16 vid) { struct i40e_netdev_priv *np = netdev_priv(netdev); struct i40e_vsi *vsi = np->vsi; /* return code is ignored as there is nothing a user * can do about failure to remove and a log message was * already printed from the other function */ i40e_vsi_kill_vlan(vsi, vid); clear_bit(vid, vsi->active_vlans); return 0; } /** * i40e_restore_vlan - Reinstate vlans when vsi/netdev comes back up * @vsi: the vsi being brought back up **/ static void i40e_restore_vlan(struct i40e_vsi *vsi) { u16 vid; if (!vsi->netdev) return; if (vsi->netdev->features & NETIF_F_HW_VLAN_CTAG_RX) i40e_vlan_stripping_enable(vsi); else i40e_vlan_stripping_disable(vsi); for_each_set_bit(vid, vsi->active_vlans, VLAN_N_VID) i40e_vlan_rx_add_vid_up(vsi->netdev, htons(ETH_P_8021Q), vid); } /** * i40e_vsi_add_pvid - Add pvid for the VSI * @vsi: the vsi being adjusted * @vid: the vlan id to set as a PVID **/ int i40e_vsi_add_pvid(struct i40e_vsi *vsi, u16 vid) { struct i40e_vsi_context ctxt; int ret; vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID); vsi->info.pvid = cpu_to_le16(vid); vsi->info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_TAGGED | I40E_AQ_VSI_PVLAN_INSERT_PVID | I40E_AQ_VSI_PVLAN_EMOD_STR; ctxt.seid = vsi->seid; ctxt.info = vsi->info; ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL); if (ret) { dev_info(&vsi->back->pdev->dev, "add pvid failed, err %pe aq_err %s\n", ERR_PTR(ret), i40e_aq_str(&vsi->back->hw, vsi->back->hw.aq.asq_last_status)); return -ENOENT; } return 0; } /** * i40e_vsi_remove_pvid - Remove the pvid from the VSI * @vsi: the vsi being adjusted * * Just use the vlan_rx_register() service to put it back to normal **/ void i40e_vsi_remove_pvid(struct i40e_vsi *vsi) { vsi->info.pvid = 0; i40e_vlan_stripping_disable(vsi); } /** * i40e_vsi_setup_tx_resources - Allocate VSI Tx queue resources * @vsi: ptr to the VSI * * If this function returns with an error, then it's possible one or * more of the rings is populated (while the rest are not). It is the * callers duty to clean those orphaned rings. * * Return 0 on success, negative on failure **/ static int i40e_vsi_setup_tx_resources(struct i40e_vsi *vsi) { int i, err = 0; for (i = 0; i < vsi->num_queue_pairs && !err; i++) err = i40e_setup_tx_descriptors(vsi->tx_rings[i]); if (!i40e_enabled_xdp_vsi(vsi)) return err; for (i = 0; i < vsi->num_queue_pairs && !err; i++) err = i40e_setup_tx_descriptors(vsi->xdp_rings[i]); return err; } /** * i40e_vsi_free_tx_resources - Free Tx resources for VSI queues * @vsi: ptr to the VSI * * Free VSI's transmit software resources **/ static void i40e_vsi_free_tx_resources(struct i40e_vsi *vsi) { int i; if (vsi->tx_rings) { for (i = 0; i < vsi->num_queue_pairs; i++) if (vsi->tx_rings[i] && vsi->tx_rings[i]->desc) i40e_free_tx_resources(vsi->tx_rings[i]); } if (vsi->xdp_rings) { for (i = 0; i < vsi->num_queue_pairs; i++) if (vsi->xdp_rings[i] && vsi->xdp_rings[i]->desc) i40e_free_tx_resources(vsi->xdp_rings[i]); } } /** * i40e_vsi_setup_rx_resources - Allocate VSI queues Rx resources * @vsi: ptr to the VSI * * If this function returns with an error, then it's possible one or * more of the rings is populated (while the rest are not). It is the * callers duty to clean those orphaned rings. * * Return 0 on success, negative on failure **/ static int i40e_vsi_setup_rx_resources(struct i40e_vsi *vsi) { int i, err = 0; for (i = 0; i < vsi->num_queue_pairs && !err; i++) err = i40e_setup_rx_descriptors(vsi->rx_rings[i]); return err; } /** * i40e_vsi_free_rx_resources - Free Rx Resources for VSI queues * @vsi: ptr to the VSI * * Free all receive software resources **/ static void i40e_vsi_free_rx_resources(struct i40e_vsi *vsi) { int i; if (!vsi->rx_rings) return; for (i = 0; i < vsi->num_queue_pairs; i++) if (vsi->rx_rings[i] && vsi->rx_rings[i]->desc) i40e_free_rx_resources(vsi->rx_rings[i]); } /** * i40e_config_xps_tx_ring - Configure XPS for a Tx ring * @ring: The Tx ring to configure * * This enables/disables XPS for a given Tx descriptor ring * based on the TCs enabled for the VSI that ring belongs to. **/ static void i40e_config_xps_tx_ring(struct i40e_ring *ring) { int cpu; if (!ring->q_vector || !ring->netdev || ring->ch) return; /* We only initialize XPS once, so as not to overwrite user settings */ if (test_and_set_bit(__I40E_TX_XPS_INIT_DONE, ring->state)) return; cpu = cpumask_local_spread(ring->q_vector->v_idx, -1); netif_set_xps_queue(ring->netdev, get_cpu_mask(cpu), ring->queue_index); } /** * i40e_xsk_pool - Retrieve the AF_XDP buffer pool if XDP and ZC is enabled * @ring: The Tx or Rx ring * * Returns the AF_XDP buffer pool or NULL. **/ static struct xsk_buff_pool *i40e_xsk_pool(struct i40e_ring *ring) { bool xdp_on = i40e_enabled_xdp_vsi(ring->vsi); int qid = ring->queue_index; if (ring_is_xdp(ring)) qid -= ring->vsi->alloc_queue_pairs; if (!xdp_on || !test_bit(qid, ring->vsi->af_xdp_zc_qps)) return NULL; return xsk_get_pool_from_qid(ring->vsi->netdev, qid); } /** * i40e_configure_tx_ring - Configure a transmit ring context and rest * @ring: The Tx ring to configure * * Configure the Tx descriptor ring in the HMC context. **/ static int i40e_configure_tx_ring(struct i40e_ring *ring) { struct i40e_vsi *vsi = ring->vsi; u16 pf_q = vsi->base_queue + ring->queue_index; struct i40e_hw *hw = &vsi->back->hw; struct i40e_hmc_obj_txq tx_ctx; u32 qtx_ctl = 0; int err = 0; if (ring_is_xdp(ring)) ring->xsk_pool = i40e_xsk_pool(ring); /* some ATR related tx ring init */ if (vsi->back->flags & I40E_FLAG_FD_ATR_ENABLED) { ring->atr_sample_rate = vsi->back->atr_sample_rate; ring->atr_count = 0; } else { ring->atr_sample_rate = 0; } /* configure XPS */ i40e_config_xps_tx_ring(ring); /* clear the context structure first */ memset(&tx_ctx, 0, sizeof(tx_ctx)); tx_ctx.new_context = 1; tx_ctx.base = (ring->dma / 128); tx_ctx.qlen = ring->count; tx_ctx.fd_ena = !!(vsi->back->flags & (I40E_FLAG_FD_SB_ENABLED | I40E_FLAG_FD_ATR_ENABLED)); tx_ctx.timesync_ena = !!(vsi->back->flags & I40E_FLAG_PTP); /* FDIR VSI tx ring can still use RS bit and writebacks */ if (vsi->type != I40E_VSI_FDIR) tx_ctx.head_wb_ena = 1; tx_ctx.head_wb_addr = ring->dma + (ring->count * sizeof(struct i40e_tx_desc)); /* As part of VSI creation/update, FW allocates certain * Tx arbitration queue sets for each TC enabled for * the VSI. The FW returns the handles to these queue * sets as part of the response buffer to Add VSI, * Update VSI, etc. AQ commands. It is expected that * these queue set handles be associated with the Tx * queues by the driver as part of the TX queue context * initialization. This has to be done regardless of * DCB as by default everything is mapped to TC0. */ if (ring->ch) tx_ctx.rdylist = le16_to_cpu(ring->ch->info.qs_handle[ring->dcb_tc]); else tx_ctx.rdylist = le16_to_cpu(vsi->info.qs_handle[ring->dcb_tc]); tx_ctx.rdylist_act = 0; /* clear the context in the HMC */ err = i40e_clear_lan_tx_queue_context(hw, pf_q); if (err) { dev_info(&vsi->back->pdev->dev, "Failed to clear LAN Tx queue context on Tx ring %d (pf_q %d), error: %d\n", ring->queue_index, pf_q, err); return -ENOMEM; } /* set the context in the HMC */ err = i40e_set_lan_tx_queue_context(hw, pf_q, &tx_ctx); if (err) { dev_info(&vsi->back->pdev->dev, "Failed to set LAN Tx queue context on Tx ring %d (pf_q %d, error: %d\n", ring->queue_index, pf_q, err); return -ENOMEM; } /* Now associate this queue with this PCI function */ if (ring->ch) { if (ring->ch->type == I40E_VSI_VMDQ2) qtx_ctl = I40E_QTX_CTL_VM_QUEUE; else return -EINVAL; qtx_ctl |= (ring->ch->vsi_number << I40E_QTX_CTL_VFVM_INDX_SHIFT) & I40E_QTX_CTL_VFVM_INDX_MASK; } else { if (vsi->type == I40E_VSI_VMDQ2) { qtx_ctl = I40E_QTX_CTL_VM_QUEUE; qtx_ctl |= ((vsi->id) << I40E_QTX_CTL_VFVM_INDX_SHIFT) & I40E_QTX_CTL_VFVM_INDX_MASK; } else { qtx_ctl = I40E_QTX_CTL_PF_QUEUE; } } qtx_ctl |= ((hw->pf_id << I40E_QTX_CTL_PF_INDX_SHIFT) & I40E_QTX_CTL_PF_INDX_MASK); wr32(hw, I40E_QTX_CTL(pf_q), qtx_ctl); i40e_flush(hw); /* cache tail off for easier writes later */ ring->tail = hw->hw_addr + I40E_QTX_TAIL(pf_q); return 0; } /** * i40e_rx_offset - Return expected offset into page to access data * @rx_ring: Ring we are requesting offset of * * Returns the offset value for ring into the data buffer. */ static unsigned int i40e_rx_offset(struct i40e_ring *rx_ring) { return ring_uses_build_skb(rx_ring) ? I40E_SKB_PAD : 0; } /** * i40e_configure_rx_ring - Configure a receive ring context * @ring: The Rx ring to configure * * Configure the Rx descriptor ring in the HMC context. **/ static int i40e_configure_rx_ring(struct i40e_ring *ring) { struct i40e_vsi *vsi = ring->vsi; u32 chain_len = vsi->back->hw.func_caps.rx_buf_chain_len; u16 pf_q = vsi->base_queue + ring->queue_index; struct i40e_hw *hw = &vsi->back->hw; struct i40e_hmc_obj_rxq rx_ctx; int err = 0; bool ok; int ret; bitmap_zero(ring->state, __I40E_RING_STATE_NBITS); /* clear the context structure first */ memset(&rx_ctx, 0, sizeof(rx_ctx)); if (ring->vsi->type == I40E_VSI_MAIN) xdp_rxq_info_unreg_mem_model(&ring->xdp_rxq); ring->xsk_pool = i40e_xsk_pool(ring); if (ring->xsk_pool) { ring->rx_buf_len = xsk_pool_get_rx_frame_size(ring->xsk_pool); ret = xdp_rxq_info_reg_mem_model(&ring->xdp_rxq, MEM_TYPE_XSK_BUFF_POOL, NULL); if (ret) return ret; dev_info(&vsi->back->pdev->dev, "Registered XDP mem model MEM_TYPE_XSK_BUFF_POOL on Rx ring %d\n", ring->queue_index); } else { ring->rx_buf_len = vsi->rx_buf_len; if (ring->vsi->type == I40E_VSI_MAIN) { ret = xdp_rxq_info_reg_mem_model(&ring->xdp_rxq, MEM_TYPE_PAGE_SHARED, NULL); if (ret) return ret; } } xdp_init_buff(&ring->xdp, i40e_rx_pg_size(ring) / 2, &ring->xdp_rxq); rx_ctx.dbuff = DIV_ROUND_UP(ring->rx_buf_len, BIT_ULL(I40E_RXQ_CTX_DBUFF_SHIFT)); rx_ctx.base = (ring->dma / 128); rx_ctx.qlen = ring->count; /* use 16 byte descriptors */ rx_ctx.dsize = 0; /* descriptor type is always zero * rx_ctx.dtype = 0; */ rx_ctx.hsplit_0 = 0; rx_ctx.rxmax = min_t(u16, vsi->max_frame, chain_len * ring->rx_buf_len); if (hw->revision_id == 0) rx_ctx.lrxqthresh = 0; else rx_ctx.lrxqthresh = 1; rx_ctx.crcstrip = 1; rx_ctx.l2tsel = 1; /* this controls whether VLAN is stripped from inner headers */ rx_ctx.showiv = 0; /* set the prefena field to 1 because the manual says to */ rx_ctx.prefena = 1; /* clear the context in the HMC */ err = i40e_clear_lan_rx_queue_context(hw, pf_q); if (err) { dev_info(&vsi->back->pdev->dev, "Failed to clear LAN Rx queue context on Rx ring %d (pf_q %d), error: %d\n", ring->queue_index, pf_q, err); return -ENOMEM; } /* set the context in the HMC */ err = i40e_set_lan_rx_queue_context(hw, pf_q, &rx_ctx); if (err) { dev_info(&vsi->back->pdev->dev, "Failed to set LAN Rx queue context on Rx ring %d (pf_q %d), error: %d\n", ring->queue_index, pf_q, err); return -ENOMEM; } /* configure Rx buffer alignment */ if (!vsi->netdev || (vsi->back->flags & I40E_FLAG_LEGACY_RX)) { if (I40E_2K_TOO_SMALL_WITH_PADDING) { dev_info(&vsi->back->pdev->dev, "2k Rx buffer is too small to fit standard MTU and skb_shared_info\n"); return -EOPNOTSUPP; } clear_ring_build_skb_enabled(ring); } else { set_ring_build_skb_enabled(ring); } ring->rx_offset = i40e_rx_offset(ring); /* cache tail for quicker writes, and clear the reg before use */ ring->tail = hw->hw_addr + I40E_QRX_TAIL(pf_q); writel(0, ring->tail); if (ring->xsk_pool) { xsk_pool_set_rxq_info(ring->xsk_pool, &ring->xdp_rxq); ok = i40e_alloc_rx_buffers_zc(ring, I40E_DESC_UNUSED(ring)); } else { ok = !i40e_alloc_rx_buffers(ring, I40E_DESC_UNUSED(ring)); } if (!ok) { /* Log this in case the user has forgotten to give the kernel * any buffers, even later in the application. */ dev_info(&vsi->back->pdev->dev, "Failed to allocate some buffers on %sRx ring %d (pf_q %d)\n", ring->xsk_pool ? "AF_XDP ZC enabled " : "", ring->queue_index, pf_q); } return 0; } /** * i40e_vsi_configure_tx - Configure the VSI for Tx * @vsi: VSI structure describing this set of rings and resources * * Configure the Tx VSI for operation. **/ static int i40e_vsi_configure_tx(struct i40e_vsi *vsi) { int err = 0; u16 i; for (i = 0; (i < vsi->num_queue_pairs) && !err; i++) err = i40e_configure_tx_ring(vsi->tx_rings[i]); if (err || !i40e_enabled_xdp_vsi(vsi)) return err; for (i = 0; (i < vsi->num_queue_pairs) && !err; i++) err = i40e_configure_tx_ring(vsi->xdp_rings[i]); return err; } /** * i40e_vsi_configure_rx - Configure the VSI for Rx * @vsi: the VSI being configured * * Configure the Rx VSI for operation. **/ static int i40e_vsi_configure_rx(struct i40e_vsi *vsi) { int err = 0; u16 i; vsi->max_frame = i40e_max_vsi_frame_size(vsi, vsi->xdp_prog); vsi->rx_buf_len = i40e_calculate_vsi_rx_buf_len(vsi); #if (PAGE_SIZE < 8192) if (vsi->netdev && !I40E_2K_TOO_SMALL_WITH_PADDING && vsi->netdev->mtu <= ETH_DATA_LEN) { vsi->rx_buf_len = I40E_RXBUFFER_1536 - NET_IP_ALIGN; vsi->max_frame = vsi->rx_buf_len; } #endif /* set up individual rings */ for (i = 0; i < vsi->num_queue_pairs && !err; i++) err = i40e_configure_rx_ring(vsi->rx_rings[i]); return err; } /** * i40e_vsi_config_dcb_rings - Update rings to reflect DCB TC * @vsi: ptr to the VSI **/ static void i40e_vsi_config_dcb_rings(struct i40e_vsi *vsi) { struct i40e_ring *tx_ring, *rx_ring; u16 qoffset, qcount; int i, n; if (!(vsi->back->flags & I40E_FLAG_DCB_ENABLED)) { /* Reset the TC information */ for (i = 0; i < vsi->num_queue_pairs; i++) { rx_ring = vsi->rx_rings[i]; tx_ring = vsi->tx_rings[i]; rx_ring->dcb_tc = 0; tx_ring->dcb_tc = 0; } return; } for (n = 0; n < I40E_MAX_TRAFFIC_CLASS; n++) { if (!(vsi->tc_config.enabled_tc & BIT_ULL(n))) continue; qoffset = vsi->tc_config.tc_info[n].qoffset; qcount = vsi->tc_config.tc_info[n].qcount; for (i = qoffset; i < (qoffset + qcount); i++) { rx_ring = vsi->rx_rings[i]; tx_ring = vsi->tx_rings[i]; rx_ring->dcb_tc = n; tx_ring->dcb_tc = n; } } } /** * i40e_set_vsi_rx_mode - Call set_rx_mode on a VSI * @vsi: ptr to the VSI **/ static void i40e_set_vsi_rx_mode(struct i40e_vsi *vsi) { if (vsi->netdev) i40e_set_rx_mode(vsi->netdev); } /** * i40e_reset_fdir_filter_cnt - Reset flow director filter counters * @pf: Pointer to the targeted PF * * Set all flow director counters to 0. */ static void i40e_reset_fdir_filter_cnt(struct i40e_pf *pf) { pf->fd_tcp4_filter_cnt = 0; pf->fd_udp4_filter_cnt = 0; pf->fd_sctp4_filter_cnt = 0; pf->fd_ip4_filter_cnt = 0; pf->fd_tcp6_filter_cnt = 0; pf->fd_udp6_filter_cnt = 0; pf->fd_sctp6_filter_cnt = 0; pf->fd_ip6_filter_cnt = 0; } /** * i40e_fdir_filter_restore - Restore the Sideband Flow Director filters * @vsi: Pointer to the targeted VSI * * This function replays the hlist on the hw where all the SB Flow Director * filters were saved. **/ static void i40e_fdir_filter_restore(struct i40e_vsi *vsi) { struct i40e_fdir_filter *filter; struct i40e_pf *pf = vsi->back; struct hlist_node *node; if (!(pf->flags & I40E_FLAG_FD_SB_ENABLED)) return; /* Reset FDir counters as we're replaying all existing filters */ i40e_reset_fdir_filter_cnt(pf); hlist_for_each_entry_safe(filter, node, &pf->fdir_filter_list, fdir_node) { i40e_add_del_fdir(vsi, filter, true); } } /** * i40e_vsi_configure - Set up the VSI for action * @vsi: the VSI being configured **/ static int i40e_vsi_configure(struct i40e_vsi *vsi) { int err; i40e_set_vsi_rx_mode(vsi); i40e_restore_vlan(vsi); i40e_vsi_config_dcb_rings(vsi); err = i40e_vsi_configure_tx(vsi); if (!err) err = i40e_vsi_configure_rx(vsi); return err; } /** * i40e_vsi_configure_msix - MSIX mode Interrupt Config in the HW * @vsi: the VSI being configured **/ static void i40e_vsi_configure_msix(struct i40e_vsi *vsi) { bool has_xdp = i40e_enabled_xdp_vsi(vsi); struct i40e_pf *pf = vsi->back; struct i40e_hw *hw = &pf->hw; u16 vector; int i, q; u32 qp; /* The interrupt indexing is offset by 1 in the PFINT_ITRn * and PFINT_LNKLSTn registers, e.g.: * PFINT_ITRn[0..n-1] gets msix-1..msix-n (qpair interrupts) */ qp = vsi->base_queue; vector = vsi->base_vector; for (i = 0; i < vsi->num_q_vectors; i++, vector++) { struct i40e_q_vector *q_vector = vsi->q_vectors[i]; q_vector->rx.next_update = jiffies + 1; q_vector->rx.target_itr = ITR_TO_REG(vsi->rx_rings[i]->itr_setting); wr32(hw, I40E_PFINT_ITRN(I40E_RX_ITR, vector - 1), q_vector->rx.target_itr >> 1); q_vector->rx.current_itr = q_vector->rx.target_itr; q_vector->tx.next_update = jiffies + 1; q_vector->tx.target_itr = ITR_TO_REG(vsi->tx_rings[i]->itr_setting); wr32(hw, I40E_PFINT_ITRN(I40E_TX_ITR, vector - 1), q_vector->tx.target_itr >> 1); q_vector->tx.current_itr = q_vector->tx.target_itr; wr32(hw, I40E_PFINT_RATEN(vector - 1), i40e_intrl_usec_to_reg(vsi->int_rate_limit)); /* begin of linked list for RX queue assigned to this vector */ wr32(hw, I40E_PFINT_LNKLSTN(vector - 1), qp); for (q = 0; q < q_vector->num_ringpairs; q++) { u32 nextqp = has_xdp ? qp + vsi->alloc_queue_pairs : qp; u32 val; val = I40E_QINT_RQCTL_CAUSE_ENA_MASK | (I40E_RX_ITR << I40E_QINT_RQCTL_ITR_INDX_SHIFT) | (vector << I40E_QINT_RQCTL_MSIX_INDX_SHIFT) | (nextqp << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT) | (I40E_QUEUE_TYPE_TX << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT); wr32(hw, I40E_QINT_RQCTL(qp), val); if (has_xdp) { /* TX queue with next queue set to TX */ val = I40E_QINT_TQCTL_CAUSE_ENA_MASK | (I40E_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT) | (vector << I40E_QINT_TQCTL_MSIX_INDX_SHIFT) | (qp << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT) | (I40E_QUEUE_TYPE_TX << I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT); wr32(hw, I40E_QINT_TQCTL(nextqp), val); } /* TX queue with next RX or end of linked list */ val = I40E_QINT_TQCTL_CAUSE_ENA_MASK | (I40E_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT) | (vector << I40E_QINT_TQCTL_MSIX_INDX_SHIFT) | ((qp + 1) << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT) | (I40E_QUEUE_TYPE_RX << I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT); /* Terminate the linked list */ if (q == (q_vector->num_ringpairs - 1)) val |= (I40E_QUEUE_END_OF_LIST << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT); wr32(hw, I40E_QINT_TQCTL(qp), val); qp++; } } i40e_flush(hw); } /** * i40e_enable_misc_int_causes - enable the non-queue interrupts * @pf: pointer to private device data structure **/ static void i40e_enable_misc_int_causes(struct i40e_pf *pf) { struct i40e_hw *hw = &pf->hw; u32 val; /* clear things first */ wr32(hw, I40E_PFINT_ICR0_ENA, 0); /* disable all */ rd32(hw, I40E_PFINT_ICR0); /* read to clear */ val = I40E_PFINT_ICR0_ENA_ECC_ERR_MASK | I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK | I40E_PFINT_ICR0_ENA_GRST_MASK | I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_MASK | I40E_PFINT_ICR0_ENA_GPIO_MASK | I40E_PFINT_ICR0_ENA_HMC_ERR_MASK | I40E_PFINT_ICR0_ENA_VFLR_MASK | I40E_PFINT_ICR0_ENA_ADMINQ_MASK; if (pf->flags & I40E_FLAG_IWARP_ENABLED) val |= I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK; if (pf->flags & I40E_FLAG_PTP) val |= I40E_PFINT_ICR0_ENA_TIMESYNC_MASK; wr32(hw, I40E_PFINT_ICR0_ENA, val); /* SW_ITR_IDX = 0, but don't change INTENA */ wr32(hw, I40E_PFINT_DYN_CTL0, I40E_PFINT_DYN_CTL0_SW_ITR_INDX_MASK | I40E_PFINT_DYN_CTL0_INTENA_MSK_MASK); /* OTHER_ITR_IDX = 0 */ wr32(hw, I40E_PFINT_STAT_CTL0, 0); } /** * i40e_configure_msi_and_legacy - Legacy mode interrupt config in the HW * @vsi: the VSI being configured **/ static void i40e_configure_msi_and_legacy(struct i40e_vsi *vsi) { u32 nextqp = i40e_enabled_xdp_vsi(vsi) ? vsi->alloc_queue_pairs : 0; struct i40e_q_vector *q_vector = vsi->q_vectors[0]; struct i40e_pf *pf = vsi->back; struct i40e_hw *hw = &pf->hw; /* set the ITR configuration */ q_vector->rx.next_update = jiffies + 1; q_vector->rx.target_itr = ITR_TO_REG(vsi->rx_rings[0]->itr_setting); wr32(hw, I40E_PFINT_ITR0(I40E_RX_ITR), q_vector->rx.target_itr >> 1); q_vector->rx.current_itr = q_vector->rx.target_itr; q_vector->tx.next_update = jiffies + 1; q_vector->tx.target_itr = ITR_TO_REG(vsi->tx_rings[0]->itr_setting); wr32(hw, I40E_PFINT_ITR0(I40E_TX_ITR), q_vector->tx.target_itr >> 1); q_vector->tx.current_itr = q_vector->tx.target_itr; i40e_enable_misc_int_causes(pf); /* FIRSTQ_INDX = 0, FIRSTQ_TYPE = 0 (rx) */ wr32(hw, I40E_PFINT_LNKLST0, 0); /* Associate the queue pair to the vector and enable the queue * interrupt RX queue in linked list with next queue set to TX */ wr32(hw, I40E_QINT_RQCTL(0), I40E_QINT_RQCTL_VAL(nextqp, 0, TX)); if (i40e_enabled_xdp_vsi(vsi)) { /* TX queue in linked list with next queue set to TX */ wr32(hw, I40E_QINT_TQCTL(nextqp), I40E_QINT_TQCTL_VAL(nextqp, 0, TX)); } /* last TX queue so the next RX queue doesn't matter */ wr32(hw, I40E_QINT_TQCTL(0), I40E_QINT_TQCTL_VAL(I40E_QUEUE_END_OF_LIST, 0, RX)); i40e_flush(hw); } /** * i40e_irq_dynamic_disable_icr0 - Disable default interrupt generation for icr0 * @pf: board private structure **/ void i40e_irq_dynamic_disable_icr0(struct i40e_pf *pf) { struct i40e_hw *hw = &pf->hw; wr32(hw, I40E_PFINT_DYN_CTL0, I40E_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT); i40e_flush(hw); } /** * i40e_irq_dynamic_enable_icr0 - Enable default interrupt generation for icr0 * @pf: board private structure **/ void i40e_irq_dynamic_enable_icr0(struct i40e_pf *pf) { struct i40e_hw *hw = &pf->hw; u32 val; val = I40E_PFINT_DYN_CTL0_INTENA_MASK | I40E_PFINT_DYN_CTL0_CLEARPBA_MASK | (I40E_ITR_NONE << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT); wr32(hw, I40E_PFINT_DYN_CTL0, val); i40e_flush(hw); } /** * i40e_msix_clean_rings - MSIX mode Interrupt Handler * @irq: interrupt number * @data: pointer to a q_vector **/ static irqreturn_t i40e_msix_clean_rings(int irq, void *data) { struct i40e_q_vector *q_vector = data; if (!q_vector->tx.ring && !q_vector->rx.ring) return IRQ_HANDLED; napi_schedule_irqoff(&q_vector->napi); return IRQ_HANDLED; } /** * i40e_irq_affinity_notify - Callback for affinity changes * @notify: context as to what irq was changed * @mask: the new affinity mask * * This is a callback function used by the irq_set_affinity_notifier function * so that we may register to receive changes to the irq affinity masks. **/ static void i40e_irq_affinity_notify(struct irq_affinity_notify *notify, const cpumask_t *mask) { struct i40e_q_vector *q_vector = container_of(notify, struct i40e_q_vector, affinity_notify); cpumask_copy(&q_vector->affinity_mask, mask); } /** * i40e_irq_affinity_release - Callback for affinity notifier release * @ref: internal core kernel usage * * This is a callback function used by the irq_set_affinity_notifier function * to inform the current notification subscriber that they will no longer * receive notifications. **/ static void i40e_irq_affinity_release(struct kref *ref) {} /** * i40e_vsi_request_irq_msix - Initialize MSI-X interrupts * @vsi: the VSI being configured * @basename: name for the vector * * Allocates MSI-X vectors and requests interrupts from the kernel. **/ static int i40e_vsi_request_irq_msix(struct i40e_vsi *vsi, char *basename) { int q_vectors = vsi->num_q_vectors; struct i40e_pf *pf = vsi->back; int base = vsi->base_vector; int rx_int_idx = 0; int tx_int_idx = 0; int vector, err; int irq_num; int cpu; for (vector = 0; vector < q_vectors; vector++) { struct i40e_q_vector *q_vector = vsi->q_vectors[vector]; irq_num = pf->msix_entries[base + vector].vector; if (q_vector->tx.ring && q_vector->rx.ring) { snprintf(q_vector->name, sizeof(q_vector->name) - 1, "%s-%s-%d", basename, "TxRx", rx_int_idx++); tx_int_idx++; } else if (q_vector->rx.ring) { snprintf(q_vector->name, sizeof(q_vector->name) - 1, "%s-%s-%d", basename, "rx", rx_int_idx++); } else if (q_vector->tx.ring) { snprintf(q_vector->name, sizeof(q_vector->name) - 1, "%s-%s-%d", basename, "tx", tx_int_idx++); } else { /* skip this unused q_vector */ continue; } err = request_irq(irq_num, vsi->irq_handler, 0, q_vector->name, q_vector); if (err) { dev_info(&pf->pdev->dev, "MSIX request_irq failed, error: %d\n", err); goto free_queue_irqs; } /* register for affinity change notifications */ q_vector->irq_num = irq_num; q_vector->affinity_notify.notify = i40e_irq_affinity_notify; q_vector->affinity_notify.release = i40e_irq_affinity_release; irq_set_affinity_notifier(irq_num, &q_vector->affinity_notify); /* Spread affinity hints out across online CPUs. * * get_cpu_mask returns a static constant mask with * a permanent lifetime so it's ok to pass to * irq_update_affinity_hint without making a copy. */ cpu = cpumask_local_spread(q_vector->v_idx, -1); irq_update_affinity_hint(irq_num, get_cpu_mask(cpu)); } vsi->irqs_ready = true; return 0; free_queue_irqs: while (vector) { vector--; irq_num = pf->msix_entries[base + vector].vector; irq_set_affinity_notifier(irq_num, NULL); irq_update_affinity_hint(irq_num, NULL); free_irq(irq_num, &vsi->q_vectors[vector]); } return err; } /** * i40e_vsi_disable_irq - Mask off queue interrupt generation on the VSI * @vsi: the VSI being un-configured **/ static void i40e_vsi_disable_irq(struct i40e_vsi *vsi) { struct i40e_pf *pf = vsi->back; struct i40e_hw *hw = &pf->hw; int base = vsi->base_vector; int i; /* disable interrupt causation from each queue */ for (i = 0; i < vsi->num_queue_pairs; i++) { u32 val; val = rd32(hw, I40E_QINT_TQCTL(vsi->tx_rings[i]->reg_idx)); val &= ~I40E_QINT_TQCTL_CAUSE_ENA_MASK; wr32(hw, I40E_QINT_TQCTL(vsi->tx_rings[i]->reg_idx), val); val = rd32(hw, I40E_QINT_RQCTL(vsi->rx_rings[i]->reg_idx)); val &= ~I40E_QINT_RQCTL_CAUSE_ENA_MASK; wr32(hw, I40E_QINT_RQCTL(vsi->rx_rings[i]->reg_idx), val); if (!i40e_enabled_xdp_vsi(vsi)) continue; wr32(hw, I40E_QINT_TQCTL(vsi->xdp_rings[i]->reg_idx), 0); } /* disable each interrupt */ if (pf->flags & I40E_FLAG_MSIX_ENABLED) { for (i = vsi->base_vector; i < (vsi->num_q_vectors + vsi->base_vector); i++) wr32(hw, I40E_PFINT_DYN_CTLN(i - 1), 0); i40e_flush(hw); for (i = 0; i < vsi->num_q_vectors; i++) synchronize_irq(pf->msix_entries[i + base].vector); } else { /* Legacy and MSI mode - this stops all interrupt handling */ wr32(hw, I40E_PFINT_ICR0_ENA, 0); wr32(hw, I40E_PFINT_DYN_CTL0, 0); i40e_flush(hw); synchronize_irq(pf->pdev->irq); } } /** * i40e_vsi_enable_irq - Enable IRQ for the given VSI * @vsi: the VSI being configured **/ static int i40e_vsi_enable_irq(struct i40e_vsi *vsi) { struct i40e_pf *pf = vsi->back; int i; if (pf->flags & I40E_FLAG_MSIX_ENABLED) { for (i = 0; i < vsi->num_q_vectors; i++) i40e_irq_dynamic_enable(vsi, i); } else { i40e_irq_dynamic_enable_icr0(pf); } i40e_flush(&pf->hw); return 0; } /** * i40e_free_misc_vector - Free the vector that handles non-queue events * @pf: board private structure **/ static void i40e_free_misc_vector(struct i40e_pf *pf) { /* Disable ICR 0 */ wr32(&pf->hw, I40E_PFINT_ICR0_ENA, 0); i40e_flush(&pf->hw); if (pf->flags & I40E_FLAG_MSIX_ENABLED && pf->msix_entries) { free_irq(pf->msix_entries[0].vector, pf); clear_bit(__I40E_MISC_IRQ_REQUESTED, pf->state); } } /** * i40e_intr - MSI/Legacy and non-queue interrupt handler * @irq: interrupt number * @data: pointer to a q_vector * * This is the handler used for all MSI/Legacy interrupts, and deals * with both queue and non-queue interrupts. This is also used in * MSIX mode to handle the non-queue interrupts. **/ static irqreturn_t i40e_intr(int irq, void *data) { struct i40e_pf *pf = (struct i40e_pf *)data; struct i40e_hw *hw = &pf->hw; irqreturn_t ret = IRQ_NONE; u32 icr0, icr0_remaining; u32 val, ena_mask; icr0 = rd32(hw, I40E_PFINT_ICR0); ena_mask = rd32(hw, I40E_PFINT_ICR0_ENA); /* if sharing a legacy IRQ, we might get called w/o an intr pending */ if ((icr0 & I40E_PFINT_ICR0_INTEVENT_MASK) == 0) goto enable_intr; /* if interrupt but no bits showing, must be SWINT */ if (((icr0 & ~I40E_PFINT_ICR0_INTEVENT_MASK) == 0) || (icr0 & I40E_PFINT_ICR0_SWINT_MASK)) pf->sw_int_count++; if ((pf->flags & I40E_FLAG_IWARP_ENABLED) && (icr0 & I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK)) { ena_mask &= ~I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK; dev_dbg(&pf->pdev->dev, "cleared PE_CRITERR\n"); set_bit(__I40E_CORE_RESET_REQUESTED, pf->state); } /* only q0 is used in MSI/Legacy mode, and none are used in MSIX */ if (icr0 & I40E_PFINT_ICR0_QUEUE_0_MASK) { struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi]; struct i40e_q_vector *q_vector = vsi->q_vectors[0]; /* We do not have a way to disarm Queue causes while leaving * interrupt enabled for all other causes, ideally * interrupt should be disabled while we are in NAPI but * this is not a performance path and napi_schedule() * can deal with rescheduling. */ if (!test_bit(__I40E_DOWN, pf->state)) napi_schedule_irqoff(&q_vector->napi); } if (icr0 & I40E_PFINT_ICR0_ADMINQ_MASK) { ena_mask &= ~I40E_PFINT_ICR0_ENA_ADMINQ_MASK; set_bit(__I40E_ADMINQ_EVENT_PENDING, pf->state); i40e_debug(&pf->hw, I40E_DEBUG_NVM, "AdminQ event\n"); } if (icr0 & I40E_PFINT_ICR0_MAL_DETECT_MASK) { ena_mask &= ~I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK; set_bit(__I40E_MDD_EVENT_PENDING, pf->state); } if (icr0 & I40E_PFINT_ICR0_VFLR_MASK) { /* disable any further VFLR event notifications */ if (test_bit(__I40E_VF_RESETS_DISABLED, pf->state)) { u32 reg = rd32(hw, I40E_PFINT_ICR0_ENA); reg &= ~I40E_PFINT_ICR0_VFLR_MASK; wr32(hw, I40E_PFINT_ICR0_ENA, reg); } else { ena_mask &= ~I40E_PFINT_ICR0_ENA_VFLR_MASK; set_bit(__I40E_VFLR_EVENT_PENDING, pf->state); } } if (icr0 & I40E_PFINT_ICR0_GRST_MASK) { if (!test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state)) set_bit(__I40E_RESET_INTR_RECEIVED, pf->state); ena_mask &= ~I40E_PFINT_ICR0_ENA_GRST_MASK; val = rd32(hw, I40E_GLGEN_RSTAT); val = (val & I40E_GLGEN_RSTAT_RESET_TYPE_MASK) >> I40E_GLGEN_RSTAT_RESET_TYPE_SHIFT; if (val == I40E_RESET_CORER) { pf->corer_count++; } else if (val == I40E_RESET_GLOBR) { pf->globr_count++; } else if (val == I40E_RESET_EMPR) { pf->empr_count++; set_bit(__I40E_EMP_RESET_INTR_RECEIVED, pf->state); } } if (icr0 & I40E_PFINT_ICR0_HMC_ERR_MASK) { icr0 &= ~I40E_PFINT_ICR0_HMC_ERR_MASK; dev_info(&pf->pdev->dev, "HMC error interrupt\n"); dev_info(&pf->pdev->dev, "HMC error info 0x%x, HMC error data 0x%x\n", rd32(hw, I40E_PFHMC_ERRORINFO), rd32(hw, I40E_PFHMC_ERRORDATA)); } if (icr0 & I40E_PFINT_ICR0_TIMESYNC_MASK) { u32 prttsyn_stat = rd32(hw, I40E_PRTTSYN_STAT_0); if (prttsyn_stat & I40E_PRTTSYN_STAT_0_EVENT0_MASK) schedule_work(&pf->ptp_extts0_work); if (prttsyn_stat & I40E_PRTTSYN_STAT_0_TXTIME_MASK) i40e_ptp_tx_hwtstamp(pf); icr0 &= ~I40E_PFINT_ICR0_ENA_TIMESYNC_MASK; } /* If a critical error is pending we have no choice but to reset the * device. * Report and mask out any remaining unexpected interrupts. */ icr0_remaining = icr0 & ena_mask; if (icr0_remaining) { dev_info(&pf->pdev->dev, "unhandled interrupt icr0=0x%08x\n", icr0_remaining); if ((icr0_remaining & I40E_PFINT_ICR0_PE_CRITERR_MASK) || (icr0_remaining & I40E_PFINT_ICR0_PCI_EXCEPTION_MASK) || (icr0_remaining & I40E_PFINT_ICR0_ECC_ERR_MASK)) { dev_info(&pf->pdev->dev, "device will be reset\n"); set_bit(__I40E_PF_RESET_REQUESTED, pf->state); i40e_service_event_schedule(pf); } ena_mask &= ~icr0_remaining; } ret = IRQ_HANDLED; enable_intr: /* re-enable interrupt causes */ wr32(hw, I40E_PFINT_ICR0_ENA, ena_mask); if (!test_bit(__I40E_DOWN, pf->state) || test_bit(__I40E_RECOVERY_MODE, pf->state)) { i40e_service_event_schedule(pf); i40e_irq_dynamic_enable_icr0(pf); } return ret; } /** * i40e_clean_fdir_tx_irq - Reclaim resources after transmit completes * @tx_ring: tx ring to clean * @budget: how many cleans we're allowed * * Returns true if there's any budget left (e.g. the clean is finished) **/ static bool i40e_clean_fdir_tx_irq(struct i40e_ring *tx_ring, int budget) { struct i40e_vsi *vsi = tx_ring->vsi; u16 i = tx_ring->next_to_clean; struct i40e_tx_buffer *tx_buf; struct i40e_tx_desc *tx_desc; tx_buf = &tx_ring->tx_bi[i]; tx_desc = I40E_TX_DESC(tx_ring, i); i -= tx_ring->count; do { struct i40e_tx_desc *eop_desc = tx_buf->next_to_watch; /* if next_to_watch is not set then there is no work pending */ if (!eop_desc) break; /* prevent any other reads prior to eop_desc */ smp_rmb(); /* if the descriptor isn't done, no work yet to do */ if (!(eop_desc->cmd_type_offset_bsz & cpu_to_le64(I40E_TX_DESC_DTYPE_DESC_DONE))) break; /* clear next_to_watch to prevent false hangs */ tx_buf->next_to_watch = NULL; tx_desc->buffer_addr = 0; tx_desc->cmd_type_offset_bsz = 0; /* move past filter desc */ tx_buf++; tx_desc++; i++; if (unlikely(!i)) { i -= tx_ring->count; tx_buf = tx_ring->tx_bi; tx_desc = I40E_TX_DESC(tx_ring, 0); } /* unmap skb header data */ dma_unmap_single(tx_ring->dev, dma_unmap_addr(tx_buf, dma), dma_unmap_len(tx_buf, len), DMA_TO_DEVICE); if (tx_buf->tx_flags & I40E_TX_FLAGS_FD_SB) kfree(tx_buf->raw_buf); tx_buf->raw_buf = NULL; tx_buf->tx_flags = 0; tx_buf->next_to_watch = NULL; dma_unmap_len_set(tx_buf, len, 0); tx_desc->buffer_addr = 0; tx_desc->cmd_type_offset_bsz = 0; /* move us past the eop_desc for start of next FD desc */ tx_buf++; tx_desc++; i++; if (unlikely(!i)) { i -= tx_ring->count; tx_buf = tx_ring->tx_bi; tx_desc = I40E_TX_DESC(tx_ring, 0); } /* update budget accounting */ budget--; } while (likely(budget)); i += tx_ring->count; tx_ring->next_to_clean = i; if (vsi->back->flags & I40E_FLAG_MSIX_ENABLED) i40e_irq_dynamic_enable(vsi, tx_ring->q_vector->v_idx); return budget > 0; } /** * i40e_fdir_clean_ring - Interrupt Handler for FDIR SB ring * @irq: interrupt number * @data: pointer to a q_vector **/ static irqreturn_t i40e_fdir_clean_ring(int irq, void *data) { struct i40e_q_vector *q_vector = data; struct i40e_vsi *vsi; if (!q_vector->tx.ring) return IRQ_HANDLED; vsi = q_vector->tx.ring->vsi; i40e_clean_fdir_tx_irq(q_vector->tx.ring, vsi->work_limit); return IRQ_HANDLED; } /** * i40e_map_vector_to_qp - Assigns the queue pair to the vector * @vsi: the VSI being configured * @v_idx: vector index * @qp_idx: queue pair index **/ static void i40e_map_vector_to_qp(struct i40e_vsi *vsi, int v_idx, int qp_idx) { struct i40e_q_vector *q_vector = vsi->q_vectors[v_idx]; struct i40e_ring *tx_ring = vsi->tx_rings[qp_idx]; struct i40e_ring *rx_ring = vsi->rx_rings[qp_idx]; tx_ring->q_vector = q_vector; tx_ring->next = q_vector->tx.ring; q_vector->tx.ring = tx_ring; q_vector->tx.count++; /* Place XDP Tx ring in the same q_vector ring list as regular Tx */ if (i40e_enabled_xdp_vsi(vsi)) { struct i40e_ring *xdp_ring = vsi->xdp_rings[qp_idx]; xdp_ring->q_vector = q_vector; xdp_ring->next = q_vector->tx.ring; q_vector->tx.ring = xdp_ring; q_vector->tx.count++; } rx_ring->q_vector = q_vector; rx_ring->next = q_vector->rx.ring; q_vector->rx.ring = rx_ring; q_vector->rx.count++; } /** * i40e_vsi_map_rings_to_vectors - Maps descriptor rings to vectors * @vsi: the VSI being configured * * This function maps descriptor rings to the queue-specific vectors * we were allotted through the MSI-X enabling code. Ideally, we'd have * one vector per queue pair, but on a constrained vector budget, we * group the queue pairs as "efficiently" as possible. **/ static void i40e_vsi_map_rings_to_vectors(struct i40e_vsi *vsi) { int qp_remaining = vsi->num_queue_pairs; int q_vectors = vsi->num_q_vectors; int num_ringpairs; int v_start = 0; int qp_idx = 0; /* If we don't have enough vectors for a 1-to-1 mapping, we'll have to * group them so there are multiple queues per vector. * It is also important to go through all the vectors available to be * sure that if we don't use all the vectors, that the remaining vectors * are cleared. This is especially important when decreasing the * number of queues in use. */ for (; v_start < q_vectors; v_start++) { struct i40e_q_vector *q_vector = vsi->q_vectors[v_start]; num_ringpairs = DIV_ROUND_UP(qp_remaining, q_vectors - v_start); q_vector->num_ringpairs = num_ringpairs; q_vector->reg_idx = q_vector->v_idx + vsi->base_vector - 1; q_vector->rx.count = 0; q_vector->tx.count = 0; q_vector->rx.ring = NULL; q_vector->tx.ring = NULL; while (num_ringpairs--) { i40e_map_vector_to_qp(vsi, v_start, qp_idx); qp_idx++; qp_remaining--; } } } /** * i40e_vsi_request_irq - Request IRQ from the OS * @vsi: the VSI being configured * @basename: name for the vector **/ static int i40e_vsi_request_irq(struct i40e_vsi *vsi, char *basename) { struct i40e_pf *pf = vsi->back; int err; if (pf->flags & I40E_FLAG_MSIX_ENABLED) err = i40e_vsi_request_irq_msix(vsi, basename); else if (pf->flags & I40E_FLAG_MSI_ENABLED) err = request_irq(pf->pdev->irq, i40e_intr, 0, pf->int_name, pf); else err = request_irq(pf->pdev->irq, i40e_intr, IRQF_SHARED, pf->int_name, pf); if (err) dev_info(&pf->pdev->dev, "request_irq failed, Error %d\n", err); return err; } #ifdef CONFIG_NET_POLL_CONTROLLER /** * i40e_netpoll - A Polling 'interrupt' handler * @netdev: network interface device structure * * This is used by netconsole to send skbs without having to re-enable * interrupts. It's not called while the normal interrupt routine is executing. **/ static void i40e_netpoll(struct net_device *netdev) { struct i40e_netdev_priv *np = netdev_priv(netdev); struct i40e_vsi *vsi = np->vsi; struct i40e_pf *pf = vsi->back; int i; /* if interface is down do nothing */ if (test_bit(__I40E_VSI_DOWN, vsi->state)) return; if (pf->flags & I40E_FLAG_MSIX_ENABLED) { for (i = 0; i < vsi->num_q_vectors; i++) i40e_msix_clean_rings(0, vsi->q_vectors[i]); } else { i40e_intr(pf->pdev->irq, netdev); } } #endif #define I40E_QTX_ENA_WAIT_COUNT 50 /** * i40e_pf_txq_wait - Wait for a PF's Tx queue to be enabled or disabled * @pf: the PF being configured * @pf_q: the PF queue * @enable: enable or disable state of the queue * * This routine will wait for the given Tx queue of the PF to reach the * enabled or disabled state. * Returns -ETIMEDOUT in case of failing to reach the requested state after * multiple retries; else will return 0 in case of success. **/ static int i40e_pf_txq_wait(struct i40e_pf *pf, int pf_q, bool enable) { int i; u32 tx_reg; for (i = 0; i < I40E_QUEUE_WAIT_RETRY_LIMIT; i++) { tx_reg = rd32(&pf->hw, I40E_QTX_ENA(pf_q)); if (enable == !!(tx_reg & I40E_QTX_ENA_QENA_STAT_MASK)) break; usleep_range(10, 20); } if (i >= I40E_QUEUE_WAIT_RETRY_LIMIT) return -ETIMEDOUT; return 0; } /** * i40e_control_tx_q - Start or stop a particular Tx queue * @pf: the PF structure * @pf_q: the PF queue to configure * @enable: start or stop the queue * * This function enables or disables a single queue. Note that any delay * required after the operation is expected to be handled by the caller of * this function. **/ static void i40e_control_tx_q(struct i40e_pf *pf, int pf_q, bool enable) { struct i40e_hw *hw = &pf->hw; u32 tx_reg; int i; /* warn the TX unit of coming changes */ i40e_pre_tx_queue_cfg(&pf->hw, pf_q, enable); if (!enable) usleep_range(10, 20); for (i = 0; i < I40E_QTX_ENA_WAIT_COUNT; i++) { tx_reg = rd32(hw, I40E_QTX_ENA(pf_q)); if (((tx_reg >> I40E_QTX_ENA_QENA_REQ_SHIFT) & 1) == ((tx_reg >> I40E_QTX_ENA_QENA_STAT_SHIFT) & 1)) break; usleep_range(1000, 2000); } /* Skip if the queue is already in the requested state */ if (enable == !!(tx_reg & I40E_QTX_ENA_QENA_STAT_MASK)) return; /* turn on/off the queue */ if (enable) { wr32(hw, I40E_QTX_HEAD(pf_q), 0); tx_reg |= I40E_QTX_ENA_QENA_REQ_MASK; } else { tx_reg &= ~I40E_QTX_ENA_QENA_REQ_MASK; } wr32(hw, I40E_QTX_ENA(pf_q), tx_reg); } /** * i40e_control_wait_tx_q - Start/stop Tx queue and wait for completion * @seid: VSI SEID * @pf: the PF structure * @pf_q: the PF queue to configure * @is_xdp: true if the queue is used for XDP * @enable: start or stop the queue **/ int i40e_control_wait_tx_q(int seid, struct i40e_pf *pf, int pf_q, bool is_xdp, bool enable) { int ret; i40e_control_tx_q(pf, pf_q, enable); /* wait for the change to finish */ ret = i40e_pf_txq_wait(pf, pf_q, enable); if (ret) { dev_info(&pf->pdev->dev, "VSI seid %d %sTx ring %d %sable timeout\n", seid, (is_xdp ? "XDP " : ""), pf_q, (enable ? "en" : "dis")); } return ret; } /** * i40e_vsi_enable_tx - Start a VSI's rings * @vsi: the VSI being configured **/ static int i40e_vsi_enable_tx(struct i40e_vsi *vsi) { struct i40e_pf *pf = vsi->back; int i, pf_q, ret = 0; pf_q = vsi->base_queue; for (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) { ret = i40e_control_wait_tx_q(vsi->seid, pf, pf_q, false /*is xdp*/, true); if (ret) break; if (!i40e_enabled_xdp_vsi(vsi)) continue; ret = i40e_control_wait_tx_q(vsi->seid, pf, pf_q + vsi->alloc_queue_pairs, true /*is xdp*/, true); if (ret) break; } return ret; } /** * i40e_pf_rxq_wait - Wait for a PF's Rx queue to be enabled or disabled * @pf: the PF being configured * @pf_q: the PF queue * @enable: enable or disable state of the queue * * This routine will wait for the given Rx queue of the PF to reach the * enabled or disabled state. * Returns -ETIMEDOUT in case of failing to reach the requested state after * multiple retries; else will return 0 in case of success. **/ static int i40e_pf_rxq_wait(struct i40e_pf *pf, int pf_q, bool enable) { int i; u32 rx_reg; for (i = 0; i < I40E_QUEUE_WAIT_RETRY_LIMIT; i++) { rx_reg = rd32(&pf->hw, I40E_QRX_ENA(pf_q)); if (enable == !!(rx_reg & I40E_QRX_ENA_QENA_STAT_MASK)) break; usleep_range(10, 20); } if (i >= I40E_QUEUE_WAIT_RETRY_LIMIT) return -ETIMEDOUT; return 0; } /** * i40e_control_rx_q - Start or stop a particular Rx queue * @pf: the PF structure * @pf_q: the PF queue to configure * @enable: start or stop the queue * * This function enables or disables a single queue. Note that * any delay required after the operation is expected to be * handled by the caller of this function. **/ static void i40e_control_rx_q(struct i40e_pf *pf, int pf_q, bool enable) { struct i40e_hw *hw = &pf->hw; u32 rx_reg; int i; for (i = 0; i < I40E_QTX_ENA_WAIT_COUNT; i++) { rx_reg = rd32(hw, I40E_QRX_ENA(pf_q)); if (((rx_reg >> I40E_QRX_ENA_QENA_REQ_SHIFT) & 1) == ((rx_reg >> I40E_QRX_ENA_QENA_STAT_SHIFT) & 1)) break; usleep_range(1000, 2000); } /* Skip if the queue is already in the requested state */ if (enable == !!(rx_reg & I40E_QRX_ENA_QENA_STAT_MASK)) return; /* turn on/off the queue */ if (enable) rx_reg |= I40E_QRX_ENA_QENA_REQ_MASK; else rx_reg &= ~I40E_QRX_ENA_QENA_REQ_MASK; wr32(hw, I40E_QRX_ENA(pf_q), rx_reg); } /** * i40e_control_wait_rx_q * @pf: the PF structure * @pf_q: queue being configured * @enable: start or stop the rings * * This function enables or disables a single queue along with waiting * for the change to finish. The caller of this function should handle * the delays needed in the case of disabling queues. **/ int i40e_control_wait_rx_q(struct i40e_pf *pf, int pf_q, bool enable) { int ret = 0; i40e_control_rx_q(pf, pf_q, enable); /* wait for the change to finish */ ret = i40e_pf_rxq_wait(pf, pf_q, enable); if (ret) return ret; return ret; } /** * i40e_vsi_enable_rx - Start a VSI's rings * @vsi: the VSI being configured **/ static int i40e_vsi_enable_rx(struct i40e_vsi *vsi) { struct i40e_pf *pf = vsi->back; int i, pf_q, ret = 0; pf_q = vsi->base_queue; for (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) { ret = i40e_control_wait_rx_q(pf, pf_q, true); if (ret) { dev_info(&pf->pdev->dev, "VSI seid %d Rx ring %d enable timeout\n", vsi->seid, pf_q); break; } } return ret; } /** * i40e_vsi_start_rings - Start a VSI's rings * @vsi: the VSI being configured **/ int i40e_vsi_start_rings(struct i40e_vsi *vsi) { int ret = 0; /* do rx first for enable and last for disable */ ret = i40e_vsi_enable_rx(vsi); if (ret) return ret; ret = i40e_vsi_enable_tx(vsi); return ret; } #define I40E_DISABLE_TX_GAP_MSEC 50 /** * i40e_vsi_stop_rings - Stop a VSI's rings * @vsi: the VSI being configured **/ void i40e_vsi_stop_rings(struct i40e_vsi *vsi) { struct i40e_pf *pf = vsi->back; int pf_q, err, q_end; /* When port TX is suspended, don't wait */ if (test_bit(__I40E_PORT_SUSPENDED, vsi->back->state)) return i40e_vsi_stop_rings_no_wait(vsi); q_end = vsi->base_queue + vsi->num_queue_pairs; for (pf_q = vsi->base_queue; pf_q < q_end; pf_q++) i40e_pre_tx_queue_cfg(&pf->hw, (u32)pf_q, false); for (pf_q = vsi->base_queue; pf_q < q_end; pf_q++) { err = i40e_control_wait_rx_q(pf, pf_q, false); if (err) dev_info(&pf->pdev->dev, "VSI seid %d Rx ring %d disable timeout\n", vsi->seid, pf_q); } msleep(I40E_DISABLE_TX_GAP_MSEC); pf_q = vsi->base_queue; for (pf_q = vsi->base_queue; pf_q < q_end; pf_q++) wr32(&pf->hw, I40E_QTX_ENA(pf_q), 0); i40e_vsi_wait_queues_disabled(vsi); } /** * i40e_vsi_stop_rings_no_wait - Stop a VSI's rings and do not delay * @vsi: the VSI being shutdown * * This function stops all the rings for a VSI but does not delay to verify * that rings have been disabled. It is expected that the caller is shutting * down multiple VSIs at once and will delay together for all the VSIs after * initiating the shutdown. This is particularly useful for shutting down lots * of VFs together. Otherwise, a large delay can be incurred while configuring * each VSI in serial. **/ void i40e_vsi_stop_rings_no_wait(struct i40e_vsi *vsi) { struct i40e_pf *pf = vsi->back; int i, pf_q; pf_q = vsi->base_queue; for (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) { i40e_control_tx_q(pf, pf_q, false); i40e_control_rx_q(pf, pf_q, false); } } /** * i40e_vsi_free_irq - Free the irq association with the OS * @vsi: the VSI being configured **/ static void i40e_vsi_free_irq(struct i40e_vsi *vsi) { struct i40e_pf *pf = vsi->back; struct i40e_hw *hw = &pf->hw; int base = vsi->base_vector; u32 val, qp; int i; if (pf->flags & I40E_FLAG_MSIX_ENABLED) { if (!vsi->q_vectors) return; if (!vsi->irqs_ready) return; vsi->irqs_ready = false; for (i = 0; i < vsi->num_q_vectors; i++) { int irq_num; u16 vector; vector = i + base; irq_num = pf->msix_entries[vector].vector; /* free only the irqs that were actually requested */ if (!vsi->q_vectors[i] || !vsi->q_vectors[i]->num_ringpairs) continue; /* clear the affinity notifier in the IRQ descriptor */ irq_set_affinity_notifier(irq_num, NULL); /* remove our suggested affinity mask for this IRQ */ irq_update_affinity_hint(irq_num, NULL); free_irq(irq_num, vsi->q_vectors[i]); /* Tear down the interrupt queue link list * * We know that they come in pairs and always * the Rx first, then the Tx. To clear the * link list, stick the EOL value into the * next_q field of the registers. */ val = rd32(hw, I40E_PFINT_LNKLSTN(vector - 1)); qp = (val & I40E_PFINT_LNKLSTN_FIRSTQ_INDX_MASK) >> I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT; val |= I40E_QUEUE_END_OF_LIST << I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT; wr32(hw, I40E_PFINT_LNKLSTN(vector - 1), val); while (qp != I40E_QUEUE_END_OF_LIST) { u32 next; val = rd32(hw, I40E_QINT_RQCTL(qp)); val &= ~(I40E_QINT_RQCTL_MSIX_INDX_MASK | I40E_QINT_RQCTL_MSIX0_INDX_MASK | I40E_QINT_RQCTL_CAUSE_ENA_MASK | I40E_QINT_RQCTL_INTEVENT_MASK); val |= (I40E_QINT_RQCTL_ITR_INDX_MASK | I40E_QINT_RQCTL_NEXTQ_INDX_MASK); wr32(hw, I40E_QINT_RQCTL(qp), val); val = rd32(hw, I40E_QINT_TQCTL(qp)); next = (val & I40E_QINT_TQCTL_NEXTQ_INDX_MASK) >> I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT; val &= ~(I40E_QINT_TQCTL_MSIX_INDX_MASK | I40E_QINT_TQCTL_MSIX0_INDX_MASK | I40E_QINT_TQCTL_CAUSE_ENA_MASK | I40E_QINT_TQCTL_INTEVENT_MASK); val |= (I40E_QINT_TQCTL_ITR_INDX_MASK | I40E_QINT_TQCTL_NEXTQ_INDX_MASK); wr32(hw, I40E_QINT_TQCTL(qp), val); qp = next; } } } else { free_irq(pf->pdev->irq, pf); val = rd32(hw, I40E_PFINT_LNKLST0); qp = (val & I40E_PFINT_LNKLSTN_FIRSTQ_INDX_MASK) >> I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT; val |= I40E_QUEUE_END_OF_LIST << I40E_PFINT_LNKLST0_FIRSTQ_INDX_SHIFT; wr32(hw, I40E_PFINT_LNKLST0, val); val = rd32(hw, I40E_QINT_RQCTL(qp)); val &= ~(I40E_QINT_RQCTL_MSIX_INDX_MASK | I40E_QINT_RQCTL_MSIX0_INDX_MASK | I40E_QINT_RQCTL_CAUSE_ENA_MASK | I40E_QINT_RQCTL_INTEVENT_MASK); val |= (I40E_QINT_RQCTL_ITR_INDX_MASK | I40E_QINT_RQCTL_NEXTQ_INDX_MASK); wr32(hw, I40E_QINT_RQCTL(qp), val); val = rd32(hw, I40E_QINT_TQCTL(qp)); val &= ~(I40E_QINT_TQCTL_MSIX_INDX_MASK | I40E_QINT_TQCTL_MSIX0_INDX_MASK | I40E_QINT_TQCTL_CAUSE_ENA_MASK | I40E_QINT_TQCTL_INTEVENT_MASK); val |= (I40E_QINT_TQCTL_ITR_INDX_MASK | I40E_QINT_TQCTL_NEXTQ_INDX_MASK); wr32(hw, I40E_QINT_TQCTL(qp), val); } } /** * i40e_free_q_vector - Free memory allocated for specific interrupt vector * @vsi: the VSI being configured * @v_idx: Index of vector to be freed * * This function frees the memory allocated to the q_vector. In addition if * NAPI is enabled it will delete any references to the NAPI struct prior * to freeing the q_vector. **/ static void i40e_free_q_vector(struct i40e_vsi *vsi, int v_idx) { struct i40e_q_vector *q_vector = vsi->q_vectors[v_idx]; struct i40e_ring *ring; if (!q_vector) return; /* disassociate q_vector from rings */ i40e_for_each_ring(ring, q_vector->tx) ring->q_vector = NULL; i40e_for_each_ring(ring, q_vector->rx) ring->q_vector = NULL; /* only VSI w/ an associated netdev is set up w/ NAPI */ if (vsi->netdev) netif_napi_del(&q_vector->napi); vsi->q_vectors[v_idx] = NULL; kfree_rcu(q_vector, rcu); } /** * i40e_vsi_free_q_vectors - Free memory allocated for interrupt vectors * @vsi: the VSI being un-configured * * This frees the memory allocated to the q_vectors and * deletes references to the NAPI struct. **/ static void i40e_vsi_free_q_vectors(struct i40e_vsi *vsi) { int v_idx; for (v_idx = 0; v_idx < vsi->num_q_vectors; v_idx++) i40e_free_q_vector(vsi, v_idx); } /** * i40e_reset_interrupt_capability - Disable interrupt setup in OS * @pf: board private structure **/ static void i40e_reset_interrupt_capability(struct i40e_pf *pf) { /* If we're in Legacy mode, the interrupt was cleaned in vsi_close */ if (pf->flags & I40E_FLAG_MSIX_ENABLED) { pci_disable_msix(pf->pdev); kfree(pf->msix_entries); pf->msix_entries = NULL; kfree(pf->irq_pile); pf->irq_pile = NULL; } else if (pf->flags & I40E_FLAG_MSI_ENABLED) { pci_disable_msi(pf->pdev); } pf->flags &= ~(I40E_FLAG_MSIX_ENABLED | I40E_FLAG_MSI_ENABLED); } /** * i40e_clear_interrupt_scheme - Clear the current interrupt scheme settings * @pf: board private structure * * We go through and clear interrupt specific resources and reset the structure * to pre-load conditions **/ static void i40e_clear_interrupt_scheme(struct i40e_pf *pf) { int i; if (test_bit(__I40E_MISC_IRQ_REQUESTED, pf->state)) i40e_free_misc_vector(pf); i40e_put_lump(pf->irq_pile, pf->iwarp_base_vector, I40E_IWARP_IRQ_PILE_ID); i40e_put_lump(pf->irq_pile, 0, I40E_PILE_VALID_BIT-1); for (i = 0; i < pf->num_alloc_vsi; i++) if (pf->vsi[i]) i40e_vsi_free_q_vectors(pf->vsi[i]); i40e_reset_interrupt_capability(pf); } /** * i40e_napi_enable_all - Enable NAPI for all q_vectors in the VSI * @vsi: the VSI being configured **/ static void i40e_napi_enable_all(struct i40e_vsi *vsi) { int q_idx; if (!vsi->netdev) return; for (q_idx = 0; q_idx < vsi->num_q_vectors; q_idx++) { struct i40e_q_vector *q_vector = vsi->q_vectors[q_idx]; if (q_vector->rx.ring || q_vector->tx.ring) napi_enable(&q_vector->napi); } } /** * i40e_napi_disable_all - Disable NAPI for all q_vectors in the VSI * @vsi: the VSI being configured **/ static void i40e_napi_disable_all(struct i40e_vsi *vsi) { int q_idx; if (!vsi->netdev) return; for (q_idx = 0; q_idx < vsi->num_q_vectors; q_idx++) { struct i40e_q_vector *q_vector = vsi->q_vectors[q_idx]; if (q_vector->rx.ring || q_vector->tx.ring) napi_disable(&q_vector->napi); } } /** * i40e_vsi_close - Shut down a VSI * @vsi: the vsi to be quelled **/ static void i40e_vsi_close(struct i40e_vsi *vsi) { struct i40e_pf *pf = vsi->back; if (!test_and_set_bit(__I40E_VSI_DOWN, vsi->state)) i40e_down(vsi); i40e_vsi_free_irq(vsi); i40e_vsi_free_tx_resources(vsi); i40e_vsi_free_rx_resources(vsi); vsi->current_netdev_flags = 0; set_bit(__I40E_CLIENT_SERVICE_REQUESTED, pf->state); if (test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state)) set_bit(__I40E_CLIENT_RESET, pf->state); } /** * i40e_quiesce_vsi - Pause a given VSI * @vsi: the VSI being paused **/ static void i40e_quiesce_vsi(struct i40e_vsi *vsi) { if (test_bit(__I40E_VSI_DOWN, vsi->state)) return; set_bit(__I40E_VSI_NEEDS_RESTART, vsi->state); if (vsi->netdev && netif_running(vsi->netdev)) vsi->netdev->netdev_ops->ndo_stop(vsi->netdev); else i40e_vsi_close(vsi); } /** * i40e_unquiesce_vsi - Resume a given VSI * @vsi: the VSI being resumed **/ static void i40e_unquiesce_vsi(struct i40e_vsi *vsi) { if (!test_and_clear_bit(__I40E_VSI_NEEDS_RESTART, vsi->state)) return; if (vsi->netdev && netif_running(vsi->netdev)) vsi->netdev->netdev_ops->ndo_open(vsi->netdev); else i40e_vsi_open(vsi); /* this clears the DOWN bit */ } /** * i40e_pf_quiesce_all_vsi - Pause all VSIs on a PF * @pf: the PF **/ static void i40e_pf_quiesce_all_vsi(struct i40e_pf *pf) { int v; for (v = 0; v < pf->num_alloc_vsi; v++) { if (pf->vsi[v]) i40e_quiesce_vsi(pf->vsi[v]); } } /** * i40e_pf_unquiesce_all_vsi - Resume all VSIs on a PF * @pf: the PF **/ static void i40e_pf_unquiesce_all_vsi(struct i40e_pf *pf) { int v; for (v = 0; v < pf->num_alloc_vsi; v++) { if (pf->vsi[v]) i40e_unquiesce_vsi(pf->vsi[v]); } } /** * i40e_vsi_wait_queues_disabled - Wait for VSI's queues to be disabled * @vsi: the VSI being configured * * Wait until all queues on a given VSI have been disabled. **/ int i40e_vsi_wait_queues_disabled(struct i40e_vsi *vsi) { struct i40e_pf *pf = vsi->back; int i, pf_q, ret; pf_q = vsi->base_queue; for (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) { /* Check and wait for the Tx queue */ ret = i40e_pf_txq_wait(pf, pf_q, false); if (ret) { dev_info(&pf->pdev->dev, "VSI seid %d Tx ring %d disable timeout\n", vsi->seid, pf_q); return ret; } if (!i40e_enabled_xdp_vsi(vsi)) goto wait_rx; /* Check and wait for the XDP Tx queue */ ret = i40e_pf_txq_wait(pf, pf_q + vsi->alloc_queue_pairs, false); if (ret) { dev_info(&pf->pdev->dev, "VSI seid %d XDP Tx ring %d disable timeout\n", vsi->seid, pf_q); return ret; } wait_rx: /* Check and wait for the Rx queue */ ret = i40e_pf_rxq_wait(pf, pf_q, false); if (ret) { dev_info(&pf->pdev->dev, "VSI seid %d Rx ring %d disable timeout\n", vsi->seid, pf_q); return ret; } } return 0; } #ifdef CONFIG_I40E_DCB /** * i40e_pf_wait_queues_disabled - Wait for all queues of PF VSIs to be disabled * @pf: the PF * * This function waits for the queues to be in disabled state for all the * VSIs that are managed by this PF. **/ static int i40e_pf_wait_queues_disabled(struct i40e_pf *pf) { int v, ret = 0; for (v = 0; v < pf->hw.func_caps.num_vsis; v++) { if (pf->vsi[v]) { ret = i40e_vsi_wait_queues_disabled(pf->vsi[v]); if (ret) break; } } return ret; } #endif /** * i40e_get_iscsi_tc_map - Return TC map for iSCSI APP * @pf: pointer to PF * * Get TC map for ISCSI PF type that will include iSCSI TC * and LAN TC. **/ static u8 i40e_get_iscsi_tc_map(struct i40e_pf *pf) { struct i40e_dcb_app_priority_table app; struct i40e_hw *hw = &pf->hw; u8 enabled_tc = 1; /* TC0 is always enabled */ u8 tc, i; /* Get the iSCSI APP TLV */ struct i40e_dcbx_config *dcbcfg = &hw->local_dcbx_config; for (i = 0; i < dcbcfg->numapps; i++) { app = dcbcfg->app[i]; if (app.selector == I40E_APP_SEL_TCPIP && app.protocolid == I40E_APP_PROTOID_ISCSI) { tc = dcbcfg->etscfg.prioritytable[app.priority]; enabled_tc |= BIT(tc); break; } } return enabled_tc; } /** * i40e_dcb_get_num_tc - Get the number of TCs from DCBx config * @dcbcfg: the corresponding DCBx configuration structure * * Return the number of TCs from given DCBx configuration **/ static u8 i40e_dcb_get_num_tc(struct i40e_dcbx_config *dcbcfg) { int i, tc_unused = 0; u8 num_tc = 0; u8 ret = 0; /* Scan the ETS Config Priority Table to find * traffic class enabled for a given priority * and create a bitmask of enabled TCs */ for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) num_tc |= BIT(dcbcfg->etscfg.prioritytable[i]); /* Now scan the bitmask to check for * contiguous TCs starting with TC0 */ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { if (num_tc & BIT(i)) { if (!tc_unused) { ret++; } else { pr_err("Non-contiguous TC - Disabling DCB\n"); return 1; } } else { tc_unused = 1; } } /* There is always at least TC0 */ if (!ret) ret = 1; return ret; } /** * i40e_dcb_get_enabled_tc - Get enabled traffic classes * @dcbcfg: the corresponding DCBx configuration structure * * Query the current DCB configuration and return the number of * traffic classes enabled from the given DCBX config **/ static u8 i40e_dcb_get_enabled_tc(struct i40e_dcbx_config *dcbcfg) { u8 num_tc = i40e_dcb_get_num_tc(dcbcfg); u8 enabled_tc = 1; u8 i; for (i = 0; i < num_tc; i++) enabled_tc |= BIT(i); return enabled_tc; } /** * i40e_mqprio_get_enabled_tc - Get enabled traffic classes * @pf: PF being queried * * Query the current MQPRIO configuration and return the number of * traffic classes enabled. **/ static u8 i40e_mqprio_get_enabled_tc(struct i40e_pf *pf) { struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi]; u8 num_tc = vsi->mqprio_qopt.qopt.num_tc; u8 enabled_tc = 1, i; for (i = 1; i < num_tc; i++) enabled_tc |= BIT(i); return enabled_tc; } /** * i40e_pf_get_num_tc - Get enabled traffic classes for PF * @pf: PF being queried * * Return number of traffic classes enabled for the given PF **/ static u8 i40e_pf_get_num_tc(struct i40e_pf *pf) { struct i40e_hw *hw = &pf->hw; u8 i, enabled_tc = 1; u8 num_tc = 0; struct i40e_dcbx_config *dcbcfg = &hw->local_dcbx_config; if (i40e_is_tc_mqprio_enabled(pf)) return pf->vsi[pf->lan_vsi]->mqprio_qopt.qopt.num_tc; /* If neither MQPRIO nor DCB is enabled, then always use single TC */ if (!(pf->flags & I40E_FLAG_DCB_ENABLED)) return 1; /* SFP mode will be enabled for all TCs on port */ if (!(pf->flags & I40E_FLAG_MFP_ENABLED)) return i40e_dcb_get_num_tc(dcbcfg); /* MFP mode return count of enabled TCs for this PF */ if (pf->hw.func_caps.iscsi) enabled_tc = i40e_get_iscsi_tc_map(pf); else return 1; /* Only TC0 */ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { if (enabled_tc & BIT(i)) num_tc++; } return num_tc; } /** * i40e_pf_get_tc_map - Get bitmap for enabled traffic classes * @pf: PF being queried * * Return a bitmap for enabled traffic classes for this PF. **/ static u8 i40e_pf_get_tc_map(struct i40e_pf *pf) { if (i40e_is_tc_mqprio_enabled(pf)) return i40e_mqprio_get_enabled_tc(pf); /* If neither MQPRIO nor DCB is enabled for this PF then just return * default TC */ if (!(pf->flags & I40E_FLAG_DCB_ENABLED)) return I40E_DEFAULT_TRAFFIC_CLASS; /* SFP mode we want PF to be enabled for all TCs */ if (!(pf->flags & I40E_FLAG_MFP_ENABLED)) return i40e_dcb_get_enabled_tc(&pf->hw.local_dcbx_config); /* MFP enabled and iSCSI PF type */ if (pf->hw.func_caps.iscsi) return i40e_get_iscsi_tc_map(pf); else return I40E_DEFAULT_TRAFFIC_CLASS; } /** * i40e_vsi_get_bw_info - Query VSI BW Information * @vsi: the VSI being queried * * Returns 0 on success, negative value on failure **/ static int i40e_vsi_get_bw_info(struct i40e_vsi *vsi) { struct i40e_aqc_query_vsi_ets_sla_config_resp bw_ets_config = {0}; struct i40e_aqc_query_vsi_bw_config_resp bw_config = {0}; struct i40e_pf *pf = vsi->back; struct i40e_hw *hw = &pf->hw; u32 tc_bw_max; int ret; int i; /* Get the VSI level BW configuration */ ret = i40e_aq_query_vsi_bw_config(hw, vsi->seid, &bw_config, NULL); if (ret) { dev_info(&pf->pdev->dev, "couldn't get PF vsi bw config, err %pe aq_err %s\n", ERR_PTR(ret), i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); return -EINVAL; } /* Get the VSI level BW configuration per TC */ ret = i40e_aq_query_vsi_ets_sla_config(hw, vsi->seid, &bw_ets_config, NULL); if (ret) { dev_info(&pf->pdev->dev, "couldn't get PF vsi ets bw config, err %pe aq_err %s\n", ERR_PTR(ret), i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); return -EINVAL; } if (bw_config.tc_valid_bits != bw_ets_config.tc_valid_bits) { dev_info(&pf->pdev->dev, "Enabled TCs mismatch from querying VSI BW info 0x%08x 0x%08x\n", bw_config.tc_valid_bits, bw_ets_config.tc_valid_bits); /* Still continuing */ } vsi->bw_limit = le16_to_cpu(bw_config.port_bw_limit); vsi->bw_max_quanta = bw_config.max_bw; tc_bw_max = le16_to_cpu(bw_ets_config.tc_bw_max[0]) | (le16_to_cpu(bw_ets_config.tc_bw_max[1]) << 16); for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { vsi->bw_ets_share_credits[i] = bw_ets_config.share_credits[i]; vsi->bw_ets_limit_credits[i] = le16_to_cpu(bw_ets_config.credits[i]); /* 3 bits out of 4 for each TC */ vsi->bw_ets_max_quanta[i] = (u8)((tc_bw_max >> (i*4)) & 0x7); } return 0; } /** * i40e_vsi_configure_bw_alloc - Configure VSI BW allocation per TC * @vsi: the VSI being configured * @enabled_tc: TC bitmap * @bw_share: BW shared credits per TC * * Returns 0 on success, negative value on failure **/ static int i40e_vsi_configure_bw_alloc(struct i40e_vsi *vsi, u8 enabled_tc, u8 *bw_share) { struct i40e_aqc_configure_vsi_tc_bw_data bw_data; struct i40e_pf *pf = vsi->back; int ret; int i; /* There is no need to reset BW when mqprio mode is on. */ if (i40e_is_tc_mqprio_enabled(pf)) return 0; if (!vsi->mqprio_qopt.qopt.hw && !(pf->flags & I40E_FLAG_DCB_ENABLED)) { ret = i40e_set_bw_limit(vsi, vsi->seid, 0); if (ret) dev_info(&pf->pdev->dev, "Failed to reset tx rate for vsi->seid %u\n", vsi->seid); return ret; } memset(&bw_data, 0, sizeof(bw_data)); bw_data.tc_valid_bits = enabled_tc; for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) bw_data.tc_bw_credits[i] = bw_share[i]; ret = i40e_aq_config_vsi_tc_bw(&pf->hw, vsi->seid, &bw_data, NULL); if (ret) { dev_info(&pf->pdev->dev, "AQ command Config VSI BW allocation per TC failed = %d\n", pf->hw.aq.asq_last_status); return -EINVAL; } for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) vsi->info.qs_handle[i] = bw_data.qs_handles[i]; return 0; } /** * i40e_vsi_config_netdev_tc - Setup the netdev TC configuration * @vsi: the VSI being configured * @enabled_tc: TC map to be enabled * **/ static void i40e_vsi_config_netdev_tc(struct i40e_vsi *vsi, u8 enabled_tc) { struct net_device *netdev = vsi->netdev; struct i40e_pf *pf = vsi->back; struct i40e_hw *hw = &pf->hw; u8 netdev_tc = 0; int i; struct i40e_dcbx_config *dcbcfg = &hw->local_dcbx_config; if (!netdev) return; if (!enabled_tc) { netdev_reset_tc(netdev); return; } /* Set up actual enabled TCs on the VSI */ if (netdev_set_num_tc(netdev, vsi->tc_config.numtc)) return; /* set per TC queues for the VSI */ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { /* Only set TC queues for enabled tcs * * e.g. For a VSI that has TC0 and TC3 enabled the * enabled_tc bitmap would be 0x00001001; the driver * will set the numtc for netdev as 2 that will be * referenced by the netdev layer as TC 0 and 1. */ if (vsi->tc_config.enabled_tc & BIT(i)) netdev_set_tc_queue(netdev, vsi->tc_config.tc_info[i].netdev_tc, vsi->tc_config.tc_info[i].qcount, vsi->tc_config.tc_info[i].qoffset); } if (i40e_is_tc_mqprio_enabled(pf)) return; /* Assign UP2TC map for the VSI */ for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) { /* Get the actual TC# for the UP */ u8 ets_tc = dcbcfg->etscfg.prioritytable[i]; /* Get the mapped netdev TC# for the UP */ netdev_tc = vsi->tc_config.tc_info[ets_tc].netdev_tc; netdev_set_prio_tc_map(netdev, i, netdev_tc); } } /** * i40e_vsi_update_queue_map - Update our copy of VSi info with new queue map * @vsi: the VSI being configured * @ctxt: the ctxt buffer returned from AQ VSI update param command **/ static void i40e_vsi_update_queue_map(struct i40e_vsi *vsi, struct i40e_vsi_context *ctxt) { /* copy just the sections touched not the entire info * since not all sections are valid as returned by * update vsi params */ vsi->info.mapping_flags = ctxt->info.mapping_flags; memcpy(&vsi->info.queue_mapping, &ctxt->info.queue_mapping, sizeof(vsi->info.queue_mapping)); memcpy(&vsi->info.tc_mapping, ctxt->info.tc_mapping, sizeof(vsi->info.tc_mapping)); } /** * i40e_update_adq_vsi_queues - update queue mapping for ADq VSI * @vsi: the VSI being reconfigured * @vsi_offset: offset from main VF VSI */ int i40e_update_adq_vsi_queues(struct i40e_vsi *vsi, int vsi_offset) { struct i40e_vsi_context ctxt = {}; struct i40e_pf *pf; struct i40e_hw *hw; int ret; if (!vsi) return -EINVAL; pf = vsi->back; hw = &pf->hw; ctxt.seid = vsi->seid; ctxt.pf_num = hw->pf_id; ctxt.vf_num = vsi->vf_id + hw->func_caps.vf_base_id + vsi_offset; ctxt.uplink_seid = vsi->uplink_seid; ctxt.connection_type = I40E_AQ_VSI_CONN_TYPE_NORMAL; ctxt.flags = I40E_AQ_VSI_TYPE_VF; ctxt.info = vsi->info; i40e_vsi_setup_queue_map(vsi, &ctxt, vsi->tc_config.enabled_tc, false); if (vsi->reconfig_rss) { vsi->rss_size = min_t(int, pf->alloc_rss_size, vsi->num_queue_pairs); ret = i40e_vsi_config_rss(vsi); if (ret) { dev_info(&pf->pdev->dev, "Failed to reconfig rss for num_queues\n"); return ret; } vsi->reconfig_rss = false; } ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL); if (ret) { dev_info(&pf->pdev->dev, "Update vsi config failed, err %pe aq_err %s\n", ERR_PTR(ret), i40e_aq_str(hw, hw->aq.asq_last_status)); return ret; } /* update the local VSI info with updated queue map */ i40e_vsi_update_queue_map(vsi, &ctxt); vsi->info.valid_sections = 0; return ret; } /** * i40e_vsi_config_tc - Configure VSI Tx Scheduler for given TC map * @vsi: VSI to be configured * @enabled_tc: TC bitmap * * This configures a particular VSI for TCs that are mapped to the * given TC bitmap. It uses default bandwidth share for TCs across * VSIs to configure TC for a particular VSI. * * NOTE: * It is expected that the VSI queues have been quisced before calling * this function. **/ static int i40e_vsi_config_tc(struct i40e_vsi *vsi, u8 enabled_tc) { u8 bw_share[I40E_MAX_TRAFFIC_CLASS] = {0}; struct i40e_pf *pf = vsi->back; struct i40e_hw *hw = &pf->hw; struct i40e_vsi_context ctxt; int ret = 0; int i; /* Check if enabled_tc is same as existing or new TCs */ if (vsi->tc_config.enabled_tc == enabled_tc && vsi->mqprio_qopt.mode != TC_MQPRIO_MODE_CHANNEL) return ret; /* Enable ETS TCs with equal BW Share for now across all VSIs */ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { if (enabled_tc & BIT(i)) bw_share[i] = 1; } ret = i40e_vsi_configure_bw_alloc(vsi, enabled_tc, bw_share); if (ret) { struct i40e_aqc_query_vsi_bw_config_resp bw_config = {0}; dev_info(&pf->pdev->dev, "Failed configuring TC map %d for VSI %d\n", enabled_tc, vsi->seid); ret = i40e_aq_query_vsi_bw_config(hw, vsi->seid, &bw_config, NULL); if (ret) { dev_info(&pf->pdev->dev, "Failed querying vsi bw info, err %pe aq_err %s\n", ERR_PTR(ret), i40e_aq_str(hw, hw->aq.asq_last_status)); goto out; } if ((bw_config.tc_valid_bits & enabled_tc) != enabled_tc) { u8 valid_tc = bw_config.tc_valid_bits & enabled_tc; if (!valid_tc) valid_tc = bw_config.tc_valid_bits; /* Always enable TC0, no matter what */ valid_tc |= 1; dev_info(&pf->pdev->dev, "Requested tc 0x%x, but FW reports 0x%x as valid. Attempting to use 0x%x.\n", enabled_tc, bw_config.tc_valid_bits, valid_tc); enabled_tc = valid_tc; } ret = i40e_vsi_configure_bw_alloc(vsi, enabled_tc, bw_share); if (ret) { dev_err(&pf->pdev->dev, "Unable to configure TC map %d for VSI %d\n", enabled_tc, vsi->seid); goto out; } } /* Update Queue Pairs Mapping for currently enabled UPs */ ctxt.seid = vsi->seid; ctxt.pf_num = vsi->back->hw.pf_id; ctxt.vf_num = 0; ctxt.uplink_seid = vsi->uplink_seid; ctxt.info = vsi->info; if (i40e_is_tc_mqprio_enabled(pf)) { ret = i40e_vsi_setup_queue_map_mqprio(vsi, &ctxt, enabled_tc); if (ret) goto out; } else { i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, false); } /* On destroying the qdisc, reset vsi->rss_size, as number of enabled * queues changed. */ if (!vsi->mqprio_qopt.qopt.hw && vsi->reconfig_rss) { vsi->rss_size = min_t(int, vsi->back->alloc_rss_size, vsi->num_queue_pairs); ret = i40e_vsi_config_rss(vsi); if (ret) { dev_info(&vsi->back->pdev->dev, "Failed to reconfig rss for num_queues\n"); return ret; } vsi->reconfig_rss = false; } if (vsi->back->flags & I40E_FLAG_IWARP_ENABLED) { ctxt.info.valid_sections |= cpu_to_le16(I40E_AQ_VSI_PROP_QUEUE_OPT_VALID); ctxt.info.queueing_opt_flags |= I40E_AQ_VSI_QUE_OPT_TCP_ENA; } /* Update the VSI after updating the VSI queue-mapping * information */ ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL); if (ret) { dev_info(&pf->pdev->dev, "Update vsi tc config failed, err %pe aq_err %s\n", ERR_PTR(ret), i40e_aq_str(hw, hw->aq.asq_last_status)); goto out; } /* update the local VSI info with updated queue map */ i40e_vsi_update_queue_map(vsi, &ctxt); vsi->info.valid_sections = 0; /* Update current VSI BW information */ ret = i40e_vsi_get_bw_info(vsi); if (ret) { dev_info(&pf->pdev->dev, "Failed updating vsi bw info, err %pe aq_err %s\n", ERR_PTR(ret), i40e_aq_str(hw, hw->aq.asq_last_status)); goto out; } /* Update the netdev TC setup */ i40e_vsi_config_netdev_tc(vsi, enabled_tc); out: return ret; } /** * i40e_get_link_speed - Returns link speed for the interface * @vsi: VSI to be configured * **/ static int i40e_get_link_speed(struct i40e_vsi *vsi) { struct i40e_pf *pf = vsi->back; switch (pf->hw.phy.link_info.link_speed) { case I40E_LINK_SPEED_40GB: return 40000; case I40E_LINK_SPEED_25GB: return 25000; case I40E_LINK_SPEED_20GB: return 20000; case I40E_LINK_SPEED_10GB: return 10000; case I40E_LINK_SPEED_1GB: return 1000; default: return -EINVAL; } } /** * i40e_bw_bytes_to_mbits - Convert max_tx_rate from bytes to mbits * @vsi: Pointer to vsi structure * @max_tx_rate: max TX rate in bytes to be converted into Mbits * * Helper function to convert units before send to set BW limit **/ static u64 i40e_bw_bytes_to_mbits(struct i40e_vsi *vsi, u64 max_tx_rate) { if (max_tx_rate < I40E_BW_MBPS_DIVISOR) { dev_warn(&vsi->back->pdev->dev, "Setting max tx rate to minimum usable value of 50Mbps.\n"); max_tx_rate = I40E_BW_CREDIT_DIVISOR; } else { do_div(max_tx_rate, I40E_BW_MBPS_DIVISOR); } return max_tx_rate; } /** * i40e_set_bw_limit - setup BW limit for Tx traffic based on max_tx_rate * @vsi: VSI to be configured * @seid: seid of the channel/VSI * @max_tx_rate: max TX rate to be configured as BW limit * * Helper function to set BW limit for a given VSI **/ int i40e_set_bw_limit(struct i40e_vsi *vsi, u16 seid, u64 max_tx_rate) { struct i40e_pf *pf = vsi->back; u64 credits = 0; int speed = 0; int ret = 0; speed = i40e_get_link_speed(vsi); if (max_tx_rate > speed) { dev_err(&pf->pdev->dev, "Invalid max tx rate %llu specified for VSI seid %d.", max_tx_rate, seid); return -EINVAL; } if (max_tx_rate && max_tx_rate < I40E_BW_CREDIT_DIVISOR) { dev_warn(&pf->pdev->dev, "Setting max tx rate to minimum usable value of 50Mbps.\n"); max_tx_rate = I40E_BW_CREDIT_DIVISOR; } /* Tx rate credits are in values of 50Mbps, 0 is disabled */ credits = max_tx_rate; do_div(credits, I40E_BW_CREDIT_DIVISOR); ret = i40e_aq_config_vsi_bw_limit(&pf->hw, seid, credits, I40E_MAX_BW_INACTIVE_ACCUM, NULL); if (ret) dev_err(&pf->pdev->dev, "Failed set tx rate (%llu Mbps) for vsi->seid %u, err %pe aq_err %s\n", max_tx_rate, seid, ERR_PTR(ret), i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); return ret; } /** * i40e_remove_queue_channels - Remove queue channels for the TCs * @vsi: VSI to be configured * * Remove queue channels for the TCs **/ static void i40e_remove_queue_channels(struct i40e_vsi *vsi) { enum i40e_admin_queue_err last_aq_status; struct i40e_cloud_filter *cfilter; struct i40e_channel *ch, *ch_tmp; struct i40e_pf *pf = vsi->back; struct hlist_node *node; int ret, i; /* Reset rss size that was stored when reconfiguring rss for * channel VSIs with non-power-of-2 queue count. */ vsi->current_rss_size = 0; /* perform cleanup for channels if they exist */ if (list_empty(&vsi->ch_list)) return; list_for_each_entry_safe(ch, ch_tmp, &vsi->ch_list, list) { struct i40e_vsi *p_vsi; list_del(&ch->list); p_vsi = ch->parent_vsi; if (!p_vsi || !ch->initialized) { kfree(ch); continue; } /* Reset queue contexts */ for (i = 0; i < ch->num_queue_pairs; i++) { struct i40e_ring *tx_ring, *rx_ring; u16 pf_q; pf_q = ch->base_queue + i; tx_ring = vsi->tx_rings[pf_q]; tx_ring->ch = NULL; rx_ring = vsi->rx_rings[pf_q]; rx_ring->ch = NULL; } /* Reset BW configured for this VSI via mqprio */ ret = i40e_set_bw_limit(vsi, ch->seid, 0); if (ret) dev_info(&vsi->back->pdev->dev, "Failed to reset tx rate for ch->seid %u\n", ch->seid); /* delete cloud filters associated with this channel */ hlist_for_each_entry_safe(cfilter, node, &pf->cloud_filter_list, cloud_node) { if (cfilter->seid != ch->seid) continue; hash_del(&cfilter->cloud_node); if (cfilter->dst_port) ret = i40e_add_del_cloud_filter_big_buf(vsi, cfilter, false); else ret = i40e_add_del_cloud_filter(vsi, cfilter, false); last_aq_status = pf->hw.aq.asq_last_status; if (ret) dev_info(&pf->pdev->dev, "Failed to delete cloud filter, err %pe aq_err %s\n", ERR_PTR(ret), i40e_aq_str(&pf->hw, last_aq_status)); kfree(cfilter); } /* delete VSI from FW */ ret = i40e_aq_delete_element(&vsi->back->hw, ch->seid, NULL); if (ret) dev_err(&vsi->back->pdev->dev, "unable to remove channel (%d) for parent VSI(%d)\n", ch->seid, p_vsi->seid); kfree(ch); } INIT_LIST_HEAD(&vsi->ch_list); } /** * i40e_get_max_queues_for_channel * @vsi: ptr to VSI to which channels are associated with * * Helper function which returns max value among the queue counts set on the * channels/TCs created. **/ static int i40e_get_max_queues_for_channel(struct i40e_vsi *vsi) { struct i40e_channel *ch, *ch_tmp; int max = 0; list_for_each_entry_safe(ch, ch_tmp, &vsi->ch_list, list) { if (!ch->initialized) continue; if (ch->num_queue_pairs > max) max = ch->num_queue_pairs; } return max; } /** * i40e_validate_num_queues - validate num_queues w.r.t channel * @pf: ptr to PF device * @num_queues: number of queues * @vsi: the parent VSI * @reconfig_rss: indicates should the RSS be reconfigured or not * * This function validates number of queues in the context of new channel * which is being established and determines if RSS should be reconfigured * or not for parent VSI. **/ static int i40e_validate_num_queues(struct i40e_pf *pf, int num_queues, struct i40e_vsi *vsi, bool *reconfig_rss) { int max_ch_queues; if (!reconfig_rss) return -EINVAL; *reconfig_rss = false; if (vsi->current_rss_size) { if (num_queues > vsi->current_rss_size) { dev_dbg(&pf->pdev->dev, "Error: num_queues (%d) > vsi's current_size(%d)\n", num_queues, vsi->current_rss_size); return -EINVAL; } else if ((num_queues < vsi->current_rss_size) && (!is_power_of_2(num_queues))) { dev_dbg(&pf->pdev->dev, "Error: num_queues (%d) < vsi's current_size(%d), but not power of 2\n", num_queues, vsi->current_rss_size); return -EINVAL; } } if (!is_power_of_2(num_queues)) { /* Find the max num_queues configured for channel if channel * exist. * if channel exist, then enforce 'num_queues' to be more than * max ever queues configured for channel. */ max_ch_queues = i40e_get_max_queues_for_channel(vsi); if (num_queues < max_ch_queues) { dev_dbg(&pf->pdev->dev, "Error: num_queues (%d) < max queues configured for channel(%d)\n", num_queues, max_ch_queues); return -EINVAL; } *reconfig_rss = true; } return 0; } /** * i40e_vsi_reconfig_rss - reconfig RSS based on specified rss_size * @vsi: the VSI being setup * @rss_size: size of RSS, accordingly LUT gets reprogrammed * * This function reconfigures RSS by reprogramming LUTs using 'rss_size' **/ static int i40e_vsi_reconfig_rss(struct i40e_vsi *vsi, u16 rss_size) { struct i40e_pf *pf = vsi->back; u8 seed[I40E_HKEY_ARRAY_SIZE]; struct i40e_hw *hw = &pf->hw; int local_rss_size; u8 *lut; int ret; if (!vsi->rss_size) return -EINVAL; if (rss_size > vsi->rss_size) return -EINVAL; local_rss_size = min_t(int, vsi->rss_size, rss_size); lut = kzalloc(vsi->rss_table_size, GFP_KERNEL); if (!lut) return -ENOMEM; /* Ignoring user configured lut if there is one */ i40e_fill_rss_lut(pf, lut, vsi->rss_table_size, local_rss_size); /* Use user configured hash key if there is one, otherwise * use default. */ if (vsi->rss_hkey_user) memcpy(seed, vsi->rss_hkey_user, I40E_HKEY_ARRAY_SIZE); else netdev_rss_key_fill((void *)seed, I40E_HKEY_ARRAY_SIZE); ret = i40e_config_rss(vsi, seed, lut, vsi->rss_table_size); if (ret) { dev_info(&pf->pdev->dev, "Cannot set RSS lut, err %pe aq_err %s\n", ERR_PTR(ret), i40e_aq_str(hw, hw->aq.asq_last_status)); kfree(lut); return ret; } kfree(lut); /* Do the update w.r.t. storing rss_size */ if (!vsi->orig_rss_size) vsi->orig_rss_size = vsi->rss_size; vsi->current_rss_size = local_rss_size; return ret; } /** * i40e_channel_setup_queue_map - Setup a channel queue map * @pf: ptr to PF device * @ctxt: VSI context structure * @ch: ptr to channel structure * * Setup queue map for a specific channel **/ static void i40e_channel_setup_queue_map(struct i40e_pf *pf, struct i40e_vsi_context *ctxt, struct i40e_channel *ch) { u16 qcount, qmap, sections = 0; u8 offset = 0; int pow; sections = I40E_AQ_VSI_PROP_QUEUE_MAP_VALID; sections |= I40E_AQ_VSI_PROP_SCHED_VALID; qcount = min_t(int, ch->num_queue_pairs, pf->num_lan_msix); ch->num_queue_pairs = qcount; /* find the next higher power-of-2 of num queue pairs */ pow = ilog2(qcount); if (!is_power_of_2(qcount)) pow++; qmap = (offset << I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT) | (pow << I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT); /* Setup queue TC[0].qmap for given VSI context */ ctxt->info.tc_mapping[0] = cpu_to_le16(qmap); ctxt->info.up_enable_bits = 0x1; /* TC0 enabled */ ctxt->info.mapping_flags |= cpu_to_le16(I40E_AQ_VSI_QUE_MAP_CONTIG); ctxt->info.queue_mapping[0] = cpu_to_le16(ch->base_queue); ctxt->info.valid_sections |= cpu_to_le16(sections); } /** * i40e_add_channel - add a channel by adding VSI * @pf: ptr to PF device * @uplink_seid: underlying HW switching element (VEB) ID * @ch: ptr to channel structure * * Add a channel (VSI) using add_vsi and queue_map **/ static int i40e_add_channel(struct i40e_pf *pf, u16 uplink_seid, struct i40e_channel *ch) { struct i40e_hw *hw = &pf->hw; struct i40e_vsi_context ctxt; u8 enabled_tc = 0x1; /* TC0 enabled */ int ret; if (ch->type != I40E_VSI_VMDQ2) { dev_info(&pf->pdev->dev, "add new vsi failed, ch->type %d\n", ch->type); return -EINVAL; } memset(&ctxt, 0, sizeof(ctxt)); ctxt.pf_num = hw->pf_id; ctxt.vf_num = 0; ctxt.uplink_seid = uplink_seid; ctxt.connection_type = I40E_AQ_VSI_CONN_TYPE_NORMAL; if (ch->type == I40E_VSI_VMDQ2) ctxt.flags = I40E_AQ_VSI_TYPE_VMDQ2; if (pf->flags & I40E_FLAG_VEB_MODE_ENABLED) { ctxt.info.valid_sections |= cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID); ctxt.info.switch_id = cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB); } /* Set queue map for a given VSI context */ i40e_channel_setup_queue_map(pf, &ctxt, ch); /* Now time to create VSI */ ret = i40e_aq_add_vsi(hw, &ctxt, NULL); if (ret) { dev_info(&pf->pdev->dev, "add new vsi failed, err %pe aq_err %s\n", ERR_PTR(ret), i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); return -ENOENT; } /* Success, update channel, set enabled_tc only if the channel * is not a macvlan */ ch->enabled_tc = !i40e_is_channel_macvlan(ch) && enabled_tc; ch->seid = ctxt.seid; ch->vsi_number = ctxt.vsi_number; ch->stat_counter_idx = le16_to_cpu(ctxt.info.stat_counter_idx); /* copy just the sections touched not the entire info * since not all sections are valid as returned by * update vsi params */ ch->info.mapping_flags = ctxt.info.mapping_flags; memcpy(&ch->info.queue_mapping, &ctxt.info.queue_mapping, sizeof(ctxt.info.queue_mapping)); memcpy(&ch->info.tc_mapping, ctxt.info.tc_mapping, sizeof(ctxt.info.tc_mapping)); return 0; } static int i40e_channel_config_bw(struct i40e_vsi *vsi, struct i40e_channel *ch, u8 *bw_share) { struct i40e_aqc_configure_vsi_tc_bw_data bw_data; int ret; int i; memset(&bw_data, 0, sizeof(bw_data)); bw_data.tc_valid_bits = ch->enabled_tc; for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) bw_data.tc_bw_credits[i] = bw_share[i]; ret = i40e_aq_config_vsi_tc_bw(&vsi->back->hw, ch->seid, &bw_data, NULL); if (ret) { dev_info(&vsi->back->pdev->dev, "Config VSI BW allocation per TC failed, aq_err: %d for new_vsi->seid %u\n", vsi->back->hw.aq.asq_last_status, ch->seid); return -EINVAL; } for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) ch->info.qs_handle[i] = bw_data.qs_handles[i]; return 0; } /** * i40e_channel_config_tx_ring - config TX ring associated with new channel * @pf: ptr to PF device * @vsi: the VSI being setup * @ch: ptr to channel structure * * Configure TX rings associated with channel (VSI) since queues are being * from parent VSI. **/ static int i40e_channel_config_tx_ring(struct i40e_pf *pf, struct i40e_vsi *vsi, struct i40e_channel *ch) { u8 bw_share[I40E_MAX_TRAFFIC_CLASS] = {0}; int ret; int i; /* Enable ETS TCs with equal BW Share for now across all VSIs */ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { if (ch->enabled_tc & BIT(i)) bw_share[i] = 1; } /* configure BW for new VSI */ ret = i40e_channel_config_bw(vsi, ch, bw_share); if (ret) { dev_info(&vsi->back->pdev->dev, "Failed configuring TC map %d for channel (seid %u)\n", ch->enabled_tc, ch->seid); return ret; } for (i = 0; i < ch->num_queue_pairs; i++) { struct i40e_ring *tx_ring, *rx_ring; u16 pf_q; pf_q = ch->base_queue + i; /* Get to TX ring ptr of main VSI, for re-setup TX queue * context */ tx_ring = vsi->tx_rings[pf_q]; tx_ring->ch = ch; /* Get the RX ring ptr */ rx_ring = vsi->rx_rings[pf_q]; rx_ring->ch = ch; } return 0; } /** * i40e_setup_hw_channel - setup new channel * @pf: ptr to PF device * @vsi: the VSI being setup * @ch: ptr to channel structure * @uplink_seid: underlying HW switching element (VEB) ID * @type: type of channel to be created (VMDq2/VF) * * Setup new channel (VSI) based on specified type (VMDq2/VF) * and configures TX rings accordingly **/ static inline int i40e_setup_hw_channel(struct i40e_pf *pf, struct i40e_vsi *vsi, struct i40e_channel *ch, u16 uplink_seid, u8 type) { int ret; ch->initialized = false; ch->base_queue = vsi->next_base_queue; ch->type = type; /* Proceed with creation of channel (VMDq2) VSI */ ret = i40e_add_channel(pf, uplink_seid, ch); if (ret) { dev_info(&pf->pdev->dev, "failed to add_channel using uplink_seid %u\n", uplink_seid); return ret; } /* Mark the successful creation of channel */ ch->initialized = true; /* Reconfigure TX queues using QTX_CTL register */ ret = i40e_channel_config_tx_ring(pf, vsi, ch); if (ret) { dev_info(&pf->pdev->dev, "failed to configure TX rings for channel %u\n", ch->seid); return ret; } /* update 'next_base_queue' */ vsi->next_base_queue = vsi->next_base_queue + ch->num_queue_pairs; dev_dbg(&pf->pdev->dev, "Added channel: vsi_seid %u, vsi_number %u, stat_counter_idx %u, num_queue_pairs %u, pf->next_base_queue %d\n", ch->seid, ch->vsi_number, ch->stat_counter_idx, ch->num_queue_pairs, vsi->next_base_queue); return ret; } /** * i40e_setup_channel - setup new channel using uplink element * @pf: ptr to PF device * @vsi: pointer to the VSI to set up the channel within * @ch: ptr to channel structure * * Setup new channel (VSI) based on specified type (VMDq2/VF) * and uplink switching element (uplink_seid) **/ static bool i40e_setup_channel(struct i40e_pf *pf, struct i40e_vsi *vsi, struct i40e_channel *ch) { u8 vsi_type; u16 seid; int ret; if (vsi->type == I40E_VSI_MAIN) { vsi_type = I40E_VSI_VMDQ2; } else { dev_err(&pf->pdev->dev, "unsupported parent vsi type(%d)\n", vsi->type); return false; } /* underlying switching element */ seid = pf->vsi[pf->lan_vsi]->uplink_seid; /* create channel (VSI), configure TX rings */ ret = i40e_setup_hw_channel(pf, vsi, ch, seid, vsi_type); if (ret) { dev_err(&pf->pdev->dev, "failed to setup hw_channel\n"); return false; } return ch->initialized ? true : false; } /** * i40e_validate_and_set_switch_mode - sets up switch mode correctly * @vsi: ptr to VSI which has PF backing * * Sets up switch mode correctly if it needs to be changed and perform * what are allowed modes. **/ static int i40e_validate_and_set_switch_mode(struct i40e_vsi *vsi) { u8 mode; struct i40e_pf *pf = vsi->back; struct i40e_hw *hw = &pf->hw; int ret; ret = i40e_get_capabilities(pf, i40e_aqc_opc_list_dev_capabilities); if (ret) return -EINVAL; if (hw->dev_caps.switch_mode) { /* if switch mode is set, support mode2 (non-tunneled for * cloud filter) for now */ u32 switch_mode = hw->dev_caps.switch_mode & I40E_SWITCH_MODE_MASK; if (switch_mode >= I40E_CLOUD_FILTER_MODE1) { if (switch_mode == I40E_CLOUD_FILTER_MODE2) return 0; dev_err(&pf->pdev->dev, "Invalid switch_mode (%d), only non-tunneled mode for cloud filter is supported\n", hw->dev_caps.switch_mode); return -EINVAL; } } /* Set Bit 7 to be valid */ mode = I40E_AQ_SET_SWITCH_BIT7_VALID; /* Set L4type for TCP support */ mode |= I40E_AQ_SET_SWITCH_L4_TYPE_TCP; /* Set cloud filter mode */ mode |= I40E_AQ_SET_SWITCH_MODE_NON_TUNNEL; /* Prep mode field for set_switch_config */ ret = i40e_aq_set_switch_config(hw, pf->last_sw_conf_flags, pf->last_sw_conf_valid_flags, mode, NULL); if (ret && hw->aq.asq_last_status != I40E_AQ_RC_ESRCH) dev_err(&pf->pdev->dev, "couldn't set switch config bits, err %pe aq_err %s\n", ERR_PTR(ret), i40e_aq_str(hw, hw->aq.asq_last_status)); return ret; } /** * i40e_create_queue_channel - function to create channel * @vsi: VSI to be configured * @ch: ptr to channel (it contains channel specific params) * * This function creates channel (VSI) using num_queues specified by user, * reconfigs RSS if needed. **/ int i40e_create_queue_channel(struct i40e_vsi *vsi, struct i40e_channel *ch) { struct i40e_pf *pf = vsi->back; bool reconfig_rss; int err; if (!ch) return -EINVAL; if (!ch->num_queue_pairs) { dev_err(&pf->pdev->dev, "Invalid num_queues requested: %d\n", ch->num_queue_pairs); return -EINVAL; } /* validate user requested num_queues for channel */ err = i40e_validate_num_queues(pf, ch->num_queue_pairs, vsi, &reconfig_rss); if (err) { dev_info(&pf->pdev->dev, "Failed to validate num_queues (%d)\n", ch->num_queue_pairs); return -EINVAL; } /* By default we are in VEPA mode, if this is the first VF/VMDq * VSI to be added switch to VEB mode. */ if (!(pf->flags & I40E_FLAG_VEB_MODE_ENABLED)) { pf->flags |= I40E_FLAG_VEB_MODE_ENABLED; if (vsi->type == I40E_VSI_MAIN) { if (i40e_is_tc_mqprio_enabled(pf)) i40e_do_reset(pf, I40E_PF_RESET_FLAG, true); else i40e_do_reset_safe(pf, I40E_PF_RESET_FLAG); } /* now onwards for main VSI, number of queues will be value * of TC0's queue count */ } /* By this time, vsi->cnt_q_avail shall be set to non-zero and * it should be more than num_queues */ if (!vsi->cnt_q_avail || vsi->cnt_q_avail < ch->num_queue_pairs) { dev_dbg(&pf->pdev->dev, "Error: cnt_q_avail (%u) less than num_queues %d\n", vsi->cnt_q_avail, ch->num_queue_pairs); return -EINVAL; } /* reconfig_rss only if vsi type is MAIN_VSI */ if (reconfig_rss && (vsi->type == I40E_VSI_MAIN)) { err = i40e_vsi_reconfig_rss(vsi, ch->num_queue_pairs); if (err) { dev_info(&pf->pdev->dev, "Error: unable to reconfig rss for num_queues (%u)\n", ch->num_queue_pairs); return -EINVAL; } } if (!i40e_setup_channel(pf, vsi, ch)) { dev_info(&pf->pdev->dev, "Failed to setup channel\n"); return -EINVAL; } dev_info(&pf->pdev->dev, "Setup channel (id:%u) utilizing num_queues %d\n", ch->seid, ch->num_queue_pairs); /* configure VSI for BW limit */ if (ch->max_tx_rate) { u64 credits = ch->max_tx_rate; if (i40e_set_bw_limit(vsi, ch->seid, ch->max_tx_rate)) return -EINVAL; do_div(credits, I40E_BW_CREDIT_DIVISOR); dev_dbg(&pf->pdev->dev, "Set tx rate of %llu Mbps (count of 50Mbps %llu) for vsi->seid %u\n", ch->max_tx_rate, credits, ch->seid); } /* in case of VF, this will be main SRIOV VSI */ ch->parent_vsi = vsi; /* and update main_vsi's count for queue_available to use */ vsi->cnt_q_avail -= ch->num_queue_pairs; return 0; } /** * i40e_configure_queue_channels - Add queue channel for the given TCs * @vsi: VSI to be configured * * Configures queue channel mapping to the given TCs **/ static int i40e_configure_queue_channels(struct i40e_vsi *vsi) { struct i40e_channel *ch; u64 max_rate = 0; int ret = 0, i; /* Create app vsi with the TCs. Main VSI with TC0 is already set up */ vsi->tc_seid_map[0] = vsi->seid; for (i = 1; i < I40E_MAX_TRAFFIC_CLASS; i++) { if (vsi->tc_config.enabled_tc & BIT(i)) { ch = kzalloc(sizeof(*ch), GFP_KERNEL); if (!ch) { ret = -ENOMEM; goto err_free; } INIT_LIST_HEAD(&ch->list); ch->num_queue_pairs = vsi->tc_config.tc_info[i].qcount; ch->base_queue = vsi->tc_config.tc_info[i].qoffset; /* Bandwidth limit through tc interface is in bytes/s, * change to Mbit/s */ max_rate = vsi->mqprio_qopt.max_rate[i]; do_div(max_rate, I40E_BW_MBPS_DIVISOR); ch->max_tx_rate = max_rate; list_add_tail(&ch->list, &vsi->ch_list); ret = i40e_create_queue_channel(vsi, ch); if (ret) { dev_err(&vsi->back->pdev->dev, "Failed creating queue channel with TC%d: queues %d\n", i, ch->num_queue_pairs); goto err_free; } vsi->tc_seid_map[i] = ch->seid; } } /* reset to reconfigure TX queue contexts */ i40e_do_reset(vsi->back, I40E_PF_RESET_FLAG, true); return ret; err_free: i40e_remove_queue_channels(vsi); return ret; } /** * i40e_veb_config_tc - Configure TCs for given VEB * @veb: given VEB * @enabled_tc: TC bitmap * * Configures given TC bitmap for VEB (switching) element **/ int i40e_veb_config_tc(struct i40e_veb *veb, u8 enabled_tc) { struct i40e_aqc_configure_switching_comp_bw_config_data bw_data = {0}; struct i40e_pf *pf = veb->pf; int ret = 0; int i; /* No TCs or already enabled TCs just return */ if (!enabled_tc || veb->enabled_tc == enabled_tc) return ret; bw_data.tc_valid_bits = enabled_tc; /* bw_data.absolute_credits is not set (relative) */ /* Enable ETS TCs with equal BW Share for now */ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { if (enabled_tc & BIT(i)) bw_data.tc_bw_share_credits[i] = 1; } ret = i40e_aq_config_switch_comp_bw_config(&pf->hw, veb->seid, &bw_data, NULL); if (ret) { dev_info(&pf->pdev->dev, "VEB bw config failed, err %pe aq_err %s\n", ERR_PTR(ret), i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); goto out; } /* Update the BW information */ ret = i40e_veb_get_bw_info(veb); if (ret) { dev_info(&pf->pdev->dev, "Failed getting veb bw config, err %pe aq_err %s\n", ERR_PTR(ret), i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); } out: return ret; } #ifdef CONFIG_I40E_DCB /** * i40e_dcb_reconfigure - Reconfigure all VEBs and VSIs * @pf: PF struct * * Reconfigure VEB/VSIs on a given PF; it is assumed that * the caller would've quiesce all the VSIs before calling * this function **/ static void i40e_dcb_reconfigure(struct i40e_pf *pf) { u8 tc_map = 0; int ret; u8 v; /* Enable the TCs available on PF to all VEBs */ tc_map = i40e_pf_get_tc_map(pf); if (tc_map == I40E_DEFAULT_TRAFFIC_CLASS) return; for (v = 0; v < I40E_MAX_VEB; v++) { if (!pf->veb[v]) continue; ret = i40e_veb_config_tc(pf->veb[v], tc_map); if (ret) { dev_info(&pf->pdev->dev, "Failed configuring TC for VEB seid=%d\n", pf->veb[v]->seid); /* Will try to configure as many components */ } } /* Update each VSI */ for (v = 0; v < pf->num_alloc_vsi; v++) { if (!pf->vsi[v]) continue; /* - Enable all TCs for the LAN VSI * - For all others keep them at TC0 for now */ if (v == pf->lan_vsi) tc_map = i40e_pf_get_tc_map(pf); else tc_map = I40E_DEFAULT_TRAFFIC_CLASS; ret = i40e_vsi_config_tc(pf->vsi[v], tc_map); if (ret) { dev_info(&pf->pdev->dev, "Failed configuring TC for VSI seid=%d\n", pf->vsi[v]->seid); /* Will try to configure as many components */ } else { /* Re-configure VSI vectors based on updated TC map */ i40e_vsi_map_rings_to_vectors(pf->vsi[v]); if (pf->vsi[v]->netdev) i40e_dcbnl_set_all(pf->vsi[v]); } } } /** * i40e_resume_port_tx - Resume port Tx * @pf: PF struct * * Resume a port's Tx and issue a PF reset in case of failure to * resume. **/ static int i40e_resume_port_tx(struct i40e_pf *pf) { struct i40e_hw *hw = &pf->hw; int ret; ret = i40e_aq_resume_port_tx(hw, NULL); if (ret) { dev_info(&pf->pdev->dev, "Resume Port Tx failed, err %pe aq_err %s\n", ERR_PTR(ret), i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); /* Schedule PF reset to recover */ set_bit(__I40E_PF_RESET_REQUESTED, pf->state); i40e_service_event_schedule(pf); } return ret; } /** * i40e_suspend_port_tx - Suspend port Tx * @pf: PF struct * * Suspend a port's Tx and issue a PF reset in case of failure. **/ static int i40e_suspend_port_tx(struct i40e_pf *pf) { struct i40e_hw *hw = &pf->hw; int ret; ret = i40e_aq_suspend_port_tx(hw, pf->mac_seid, NULL); if (ret) { dev_info(&pf->pdev->dev, "Suspend Port Tx failed, err %pe aq_err %s\n", ERR_PTR(ret), i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); /* Schedule PF reset to recover */ set_bit(__I40E_PF_RESET_REQUESTED, pf->state); i40e_service_event_schedule(pf); } return ret; } /** * i40e_hw_set_dcb_config - Program new DCBX settings into HW * @pf: PF being configured * @new_cfg: New DCBX configuration * * Program DCB settings into HW and reconfigure VEB/VSIs on * given PF. Uses "Set LLDP MIB" AQC to program the hardware. **/ static int i40e_hw_set_dcb_config(struct i40e_pf *pf, struct i40e_dcbx_config *new_cfg) { struct i40e_dcbx_config *old_cfg = &pf->hw.local_dcbx_config; int ret; /* Check if need reconfiguration */ if (!memcmp(&new_cfg, &old_cfg, sizeof(new_cfg))) { dev_dbg(&pf->pdev->dev, "No Change in DCB Config required.\n"); return 0; } /* Config change disable all VSIs */ i40e_pf_quiesce_all_vsi(pf); /* Copy the new config to the current config */ *old_cfg = *new_cfg; old_cfg->etsrec = old_cfg->etscfg; ret = i40e_set_dcb_config(&pf->hw); if (ret) { dev_info(&pf->pdev->dev, "Set DCB Config failed, err %pe aq_err %s\n", ERR_PTR(ret), i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); goto out; } /* Changes in configuration update VEB/VSI */ i40e_dcb_reconfigure(pf); out: /* In case of reset do not try to resume anything */ if (!test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state)) { /* Re-start the VSIs if disabled */ ret = i40e_resume_port_tx(pf); /* In case of error no point in resuming VSIs */ if (ret) goto err; i40e_pf_unquiesce_all_vsi(pf); } err: return ret; } /** * i40e_hw_dcb_config - Program new DCBX settings into HW * @pf: PF being configured * @new_cfg: New DCBX configuration * * Program DCB settings into HW and reconfigure VEB/VSIs on * given PF **/ int i40e_hw_dcb_config(struct i40e_pf *pf, struct i40e_dcbx_config *new_cfg) { struct i40e_aqc_configure_switching_comp_ets_data ets_data; u8 prio_type[I40E_MAX_TRAFFIC_CLASS] = {0}; u32 mfs_tc[I40E_MAX_TRAFFIC_CLASS]; struct i40e_dcbx_config *old_cfg; u8 mode[I40E_MAX_TRAFFIC_CLASS]; struct i40e_rx_pb_config pb_cfg; struct i40e_hw *hw = &pf->hw; u8 num_ports = hw->num_ports; bool need_reconfig; int ret = -EINVAL; u8 lltc_map = 0; u8 tc_map = 0; u8 new_numtc; u8 i; dev_dbg(&pf->pdev->dev, "Configuring DCB registers directly\n"); /* Un-pack information to Program ETS HW via shared API * numtc, tcmap * LLTC map * ETS/NON-ETS arbiter mode * max exponent (credit refills) * Total number of ports * PFC priority bit-map * Priority Table * BW % per TC * Arbiter mode between UPs sharing same TC * TSA table (ETS or non-ETS) * EEE enabled or not * MFS TC table */ new_numtc = i40e_dcb_get_num_tc(new_cfg); memset(&ets_data, 0, sizeof(ets_data)); for (i = 0; i < new_numtc; i++) { tc_map |= BIT(i); switch (new_cfg->etscfg.tsatable[i]) { case I40E_IEEE_TSA_ETS: prio_type[i] = I40E_DCB_PRIO_TYPE_ETS; ets_data.tc_bw_share_credits[i] = new_cfg->etscfg.tcbwtable[i]; break; case I40E_IEEE_TSA_STRICT: prio_type[i] = I40E_DCB_PRIO_TYPE_STRICT; lltc_map |= BIT(i); ets_data.tc_bw_share_credits[i] = I40E_DCB_STRICT_PRIO_CREDITS; break; default: /* Invalid TSA type */ need_reconfig = false; goto out; } } old_cfg = &hw->local_dcbx_config; /* Check if need reconfiguration */ need_reconfig = i40e_dcb_need_reconfig(pf, old_cfg, new_cfg); /* If needed, enable/disable frame tagging, disable all VSIs * and suspend port tx */ if (need_reconfig) { /* Enable DCB tagging only when more than one TC */ if (new_numtc > 1) pf->flags |= I40E_FLAG_DCB_ENABLED; else pf->flags &= ~I40E_FLAG_DCB_ENABLED; set_bit(__I40E_PORT_SUSPENDED, pf->state); /* Reconfiguration needed quiesce all VSIs */ i40e_pf_quiesce_all_vsi(pf); ret = i40e_suspend_port_tx(pf); if (ret) goto err; } /* Configure Port ETS Tx Scheduler */ ets_data.tc_valid_bits = tc_map; ets_data.tc_strict_priority_flags = lltc_map; ret = i40e_aq_config_switch_comp_ets (hw, pf->mac_seid, &ets_data, i40e_aqc_opc_modify_switching_comp_ets, NULL); if (ret) { dev_info(&pf->pdev->dev, "Modify Port ETS failed, err %pe aq_err %s\n", ERR_PTR(ret), i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); goto out; } /* Configure Rx ETS HW */ memset(&mode, I40E_DCB_ARB_MODE_ROUND_ROBIN, sizeof(mode)); i40e_dcb_hw_set_num_tc(hw, new_numtc); i40e_dcb_hw_rx_fifo_config(hw, I40E_DCB_ARB_MODE_ROUND_ROBIN, I40E_DCB_ARB_MODE_STRICT_PRIORITY, I40E_DCB_DEFAULT_MAX_EXPONENT, lltc_map); i40e_dcb_hw_rx_cmd_monitor_config(hw, new_numtc, num_ports); i40e_dcb_hw_rx_ets_bw_config(hw, new_cfg->etscfg.tcbwtable, mode, prio_type); i40e_dcb_hw_pfc_config(hw, new_cfg->pfc.pfcenable, new_cfg->etscfg.prioritytable); i40e_dcb_hw_rx_up2tc_config(hw, new_cfg->etscfg.prioritytable); /* Configure Rx Packet Buffers in HW */ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { mfs_tc[i] = pf->vsi[pf->lan_vsi]->netdev->mtu; mfs_tc[i] += I40E_PACKET_HDR_PAD; } i40e_dcb_hw_calculate_pool_sizes(hw, num_ports, false, new_cfg->pfc.pfcenable, mfs_tc, &pb_cfg); i40e_dcb_hw_rx_pb_config(hw, &pf->pb_cfg, &pb_cfg); /* Update the local Rx Packet buffer config */ pf->pb_cfg = pb_cfg; /* Inform the FW about changes to DCB configuration */ ret = i40e_aq_dcb_updated(&pf->hw, NULL); if (ret) { dev_info(&pf->pdev->dev, "DCB Updated failed, err %pe aq_err %s\n", ERR_PTR(ret), i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); goto out; } /* Update the port DCBx configuration */ *old_cfg = *new_cfg; /* Changes in configuration update VEB/VSI */ i40e_dcb_reconfigure(pf); out: /* Re-start the VSIs if disabled */ if (need_reconfig) { ret = i40e_resume_port_tx(pf); clear_bit(__I40E_PORT_SUSPENDED, pf->state); /* In case of error no point in resuming VSIs */ if (ret) goto err; /* Wait for the PF's queues to be disabled */ ret = i40e_pf_wait_queues_disabled(pf); if (ret) { /* Schedule PF reset to recover */ set_bit(__I40E_PF_RESET_REQUESTED, pf->state); i40e_service_event_schedule(pf); goto err; } else { i40e_pf_unquiesce_all_vsi(pf); set_bit(__I40E_CLIENT_SERVICE_REQUESTED, pf->state); set_bit(__I40E_CLIENT_L2_CHANGE, pf->state); } /* registers are set, lets apply */ if (pf->hw_features & I40E_HW_USE_SET_LLDP_MIB) ret = i40e_hw_set_dcb_config(pf, new_cfg); } err: return ret; } /** * i40e_dcb_sw_default_config - Set default DCB configuration when DCB in SW * @pf: PF being queried * * Set default DCB configuration in case DCB is to be done in SW. **/ int i40e_dcb_sw_default_config(struct i40e_pf *pf) { struct i40e_dcbx_config *dcb_cfg = &pf->hw.local_dcbx_config; struct i40e_aqc_configure_switching_comp_ets_data ets_data; struct i40e_hw *hw = &pf->hw; int err; if (pf->hw_features & I40E_HW_USE_SET_LLDP_MIB) { /* Update the local cached instance with TC0 ETS */ memset(&pf->tmp_cfg, 0, sizeof(struct i40e_dcbx_config)); pf->tmp_cfg.etscfg.willing = I40E_IEEE_DEFAULT_ETS_WILLING; pf->tmp_cfg.etscfg.maxtcs = 0; pf->tmp_cfg.etscfg.tcbwtable[0] = I40E_IEEE_DEFAULT_ETS_TCBW; pf->tmp_cfg.etscfg.tsatable[0] = I40E_IEEE_TSA_ETS; pf->tmp_cfg.pfc.willing = I40E_IEEE_DEFAULT_PFC_WILLING; pf->tmp_cfg.pfc.pfccap = I40E_MAX_TRAFFIC_CLASS; /* FW needs one App to configure HW */ pf->tmp_cfg.numapps = I40E_IEEE_DEFAULT_NUM_APPS; pf->tmp_cfg.app[0].selector = I40E_APP_SEL_ETHTYPE; pf->tmp_cfg.app[0].priority = I40E_IEEE_DEFAULT_APP_PRIO; pf->tmp_cfg.app[0].protocolid = I40E_APP_PROTOID_FCOE; return i40e_hw_set_dcb_config(pf, &pf->tmp_cfg); } memset(&ets_data, 0, sizeof(ets_data)); ets_data.tc_valid_bits = I40E_DEFAULT_TRAFFIC_CLASS; /* TC0 only */ ets_data.tc_strict_priority_flags = 0; /* ETS */ ets_data.tc_bw_share_credits[0] = I40E_IEEE_DEFAULT_ETS_TCBW; /* 100% to TC0 */ /* Enable ETS on the Physical port */ err = i40e_aq_config_switch_comp_ets (hw, pf->mac_seid, &ets_data, i40e_aqc_opc_enable_switching_comp_ets, NULL); if (err) { dev_info(&pf->pdev->dev, "Enable Port ETS failed, err %pe aq_err %s\n", ERR_PTR(err), i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); err = -ENOENT; goto out; } /* Update the local cached instance with TC0 ETS */ dcb_cfg->etscfg.willing = I40E_IEEE_DEFAULT_ETS_WILLING; dcb_cfg->etscfg.cbs = 0; dcb_cfg->etscfg.maxtcs = I40E_MAX_TRAFFIC_CLASS; dcb_cfg->etscfg.tcbwtable[0] = I40E_IEEE_DEFAULT_ETS_TCBW; out: return err; } /** * i40e_init_pf_dcb - Initialize DCB configuration * @pf: PF being configured * * Query the current DCB configuration and cache it * in the hardware structure **/ static int i40e_init_pf_dcb(struct i40e_pf *pf) { struct i40e_hw *hw = &pf->hw; int err; /* Do not enable DCB for SW1 and SW2 images even if the FW is capable * Also do not enable DCBx if FW LLDP agent is disabled */ if (pf->hw_features & I40E_HW_NO_DCB_SUPPORT) { dev_info(&pf->pdev->dev, "DCB is not supported.\n"); err = -EOPNOTSUPP; goto out; } if (pf->flags & I40E_FLAG_DISABLE_FW_LLDP) { dev_info(&pf->pdev->dev, "FW LLDP is disabled, attempting SW DCB\n"); err = i40e_dcb_sw_default_config(pf); if (err) { dev_info(&pf->pdev->dev, "Could not initialize SW DCB\n"); goto out; } dev_info(&pf->pdev->dev, "SW DCB initialization succeeded.\n"); pf->dcbx_cap = DCB_CAP_DCBX_HOST | DCB_CAP_DCBX_VER_IEEE; /* at init capable but disabled */ pf->flags |= I40E_FLAG_DCB_CAPABLE; pf->flags &= ~I40E_FLAG_DCB_ENABLED; goto out; } err = i40e_init_dcb(hw, true); if (!err) { /* Device/Function is not DCBX capable */ if ((!hw->func_caps.dcb) || (hw->dcbx_status == I40E_DCBX_STATUS_DISABLED)) { dev_info(&pf->pdev->dev, "DCBX offload is not supported or is disabled for this PF.\n"); } else { /* When status is not DISABLED then DCBX in FW */ pf->dcbx_cap = DCB_CAP_DCBX_LLD_MANAGED | DCB_CAP_DCBX_VER_IEEE; pf->flags |= I40E_FLAG_DCB_CAPABLE; /* Enable DCB tagging only when more than one TC * or explicitly disable if only one TC */ if (i40e_dcb_get_num_tc(&hw->local_dcbx_config) > 1) pf->flags |= I40E_FLAG_DCB_ENABLED; else pf->flags &= ~I40E_FLAG_DCB_ENABLED; dev_dbg(&pf->pdev->dev, "DCBX offload is supported for this PF.\n"); } } else if (pf->hw.aq.asq_last_status == I40E_AQ_RC_EPERM) { dev_info(&pf->pdev->dev, "FW LLDP disabled for this PF.\n"); pf->flags |= I40E_FLAG_DISABLE_FW_LLDP; } else { dev_info(&pf->pdev->dev, "Query for DCB configuration failed, err %pe aq_err %s\n", ERR_PTR(err), i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); } out: return err; } #endif /* CONFIG_I40E_DCB */ /** * i40e_print_link_message - print link up or down * @vsi: the VSI for which link needs a message * @isup: true of link is up, false otherwise */ void i40e_print_link_message(struct i40e_vsi *vsi, bool isup) { enum i40e_aq_link_speed new_speed; struct i40e_pf *pf = vsi->back; char *speed = "Unknown"; char *fc = "Unknown"; char *fec = ""; char *req_fec = ""; char *an = ""; if (isup) new_speed = pf->hw.phy.link_info.link_speed; else new_speed = I40E_LINK_SPEED_UNKNOWN; if ((vsi->current_isup == isup) && (vsi->current_speed == new_speed)) return; vsi->current_isup = isup; vsi->current_speed = new_speed; if (!isup) { netdev_info(vsi->netdev, "NIC Link is Down\n"); return; } /* Warn user if link speed on NPAR enabled partition is not at * least 10GB */ if (pf->hw.func_caps.npar_enable && (pf->hw.phy.link_info.link_speed == I40E_LINK_SPEED_1GB || pf->hw.phy.link_info.link_speed == I40E_LINK_SPEED_100MB)) netdev_warn(vsi->netdev, "The partition detected link speed that is less than 10Gbps\n"); switch (pf->hw.phy.link_info.link_speed) { case I40E_LINK_SPEED_40GB: speed = "40 G"; break; case I40E_LINK_SPEED_20GB: speed = "20 G"; break; case I40E_LINK_SPEED_25GB: speed = "25 G"; break; case I40E_LINK_SPEED_10GB: speed = "10 G"; break; case I40E_LINK_SPEED_5GB: speed = "5 G"; break; case I40E_LINK_SPEED_2_5GB: speed = "2.5 G"; break; case I40E_LINK_SPEED_1GB: speed = "1000 M"; break; case I40E_LINK_SPEED_100MB: speed = "100 M"; break; default: break; } switch (pf->hw.fc.current_mode) { case I40E_FC_FULL: fc = "RX/TX"; break; case I40E_FC_TX_PAUSE: fc = "TX"; break; case I40E_FC_RX_PAUSE: fc = "RX"; break; default: fc = "None"; break; } if (pf->hw.phy.link_info.link_speed == I40E_LINK_SPEED_25GB) { req_fec = "None"; fec = "None"; an = "False"; if (pf->hw.phy.link_info.an_info & I40E_AQ_AN_COMPLETED) an = "True"; if (pf->hw.phy.link_info.fec_info & I40E_AQ_CONFIG_FEC_KR_ENA) fec = "CL74 FC-FEC/BASE-R"; else if (pf->hw.phy.link_info.fec_info & I40E_AQ_CONFIG_FEC_RS_ENA) fec = "CL108 RS-FEC"; /* 'CL108 RS-FEC' should be displayed when RS is requested, or * both RS and FC are requested */ if (vsi->back->hw.phy.link_info.req_fec_info & (I40E_AQ_REQUEST_FEC_KR | I40E_AQ_REQUEST_FEC_RS)) { if (vsi->back->hw.phy.link_info.req_fec_info & I40E_AQ_REQUEST_FEC_RS) req_fec = "CL108 RS-FEC"; else req_fec = "CL74 FC-FEC/BASE-R"; } netdev_info(vsi->netdev, "NIC Link is Up, %sbps Full Duplex, Requested FEC: %s, Negotiated FEC: %s, Autoneg: %s, Flow Control: %s\n", speed, req_fec, fec, an, fc); } else if (pf->hw.device_id == I40E_DEV_ID_KX_X722) { req_fec = "None"; fec = "None"; an = "False"; if (pf->hw.phy.link_info.an_info & I40E_AQ_AN_COMPLETED) an = "True"; if (pf->hw.phy.link_info.fec_info & I40E_AQ_CONFIG_FEC_KR_ENA) fec = "CL74 FC-FEC/BASE-R"; if (pf->hw.phy.link_info.req_fec_info & I40E_AQ_REQUEST_FEC_KR) req_fec = "CL74 FC-FEC/BASE-R"; netdev_info(vsi->netdev, "NIC Link is Up, %sbps Full Duplex, Requested FEC: %s, Negotiated FEC: %s, Autoneg: %s, Flow Control: %s\n", speed, req_fec, fec, an, fc); } else { netdev_info(vsi->netdev, "NIC Link is Up, %sbps Full Duplex, Flow Control: %s\n", speed, fc); } } /** * i40e_up_complete - Finish the last steps of bringing up a connection * @vsi: the VSI being configured **/ static int i40e_up_complete(struct i40e_vsi *vsi) { struct i40e_pf *pf = vsi->back; int err; if (pf->flags & I40E_FLAG_MSIX_ENABLED) i40e_vsi_configure_msix(vsi); else i40e_configure_msi_and_legacy(vsi); /* start rings */ err = i40e_vsi_start_rings(vsi); if (err) return err; clear_bit(__I40E_VSI_DOWN, vsi->state); i40e_napi_enable_all(vsi); i40e_vsi_enable_irq(vsi); if ((pf->hw.phy.link_info.link_info & I40E_AQ_LINK_UP) && (vsi->netdev)) { i40e_print_link_message(vsi, true); netif_tx_start_all_queues(vsi->netdev); netif_carrier_on(vsi->netdev); } /* replay FDIR SB filters */ if (vsi->type == I40E_VSI_FDIR) { /* reset fd counters */ pf->fd_add_err = 0; pf->fd_atr_cnt = 0; i40e_fdir_filter_restore(vsi); } /* On the next run of the service_task, notify any clients of the new * opened netdev */ set_bit(__I40E_CLIENT_SERVICE_REQUESTED, pf->state); i40e_service_event_schedule(pf); return 0; } /** * i40e_vsi_reinit_locked - Reset the VSI * @vsi: the VSI being configured * * Rebuild the ring structs after some configuration * has changed, e.g. MTU size. **/ static void i40e_vsi_reinit_locked(struct i40e_vsi *vsi) { struct i40e_pf *pf = vsi->back; while (test_and_set_bit(__I40E_CONFIG_BUSY, pf->state)) usleep_range(1000, 2000); i40e_down(vsi); i40e_up(vsi); clear_bit(__I40E_CONFIG_BUSY, pf->state); } /** * i40e_force_link_state - Force the link status * @pf: board private structure * @is_up: whether the link state should be forced up or down **/ static int i40e_force_link_state(struct i40e_pf *pf, bool is_up) { struct i40e_aq_get_phy_abilities_resp abilities; struct i40e_aq_set_phy_config config = {0}; bool non_zero_phy_type = is_up; struct i40e_hw *hw = &pf->hw; u64 mask; u8 speed; int err; /* Card might've been put in an unstable state by other drivers * and applications, which causes incorrect speed values being * set on startup. In order to clear speed registers, we call * get_phy_capabilities twice, once to get initial state of * available speeds, and once to get current PHY config. */ err = i40e_aq_get_phy_capabilities(hw, false, true, &abilities, NULL); if (err) { dev_err(&pf->pdev->dev, "failed to get phy cap., ret = %pe last_status = %s\n", ERR_PTR(err), i40e_aq_str(hw, hw->aq.asq_last_status)); return err; } speed = abilities.link_speed; /* Get the current phy config */ err = i40e_aq_get_phy_capabilities(hw, false, false, &abilities, NULL); if (err) { dev_err(&pf->pdev->dev, "failed to get phy cap., ret = %pe last_status = %s\n", ERR_PTR(err), i40e_aq_str(hw, hw->aq.asq_last_status)); return err; } /* If link needs to go up, but was not forced to go down, * and its speed values are OK, no need for a flap * if non_zero_phy_type was set, still need to force up */ if (pf->flags & I40E_FLAG_TOTAL_PORT_SHUTDOWN_ENABLED) non_zero_phy_type = true; else if (is_up && abilities.phy_type != 0 && abilities.link_speed != 0) return 0; /* To force link we need to set bits for all supported PHY types, * but there are now more than 32, so we need to split the bitmap * across two fields. */ mask = I40E_PHY_TYPES_BITMASK; config.phy_type = non_zero_phy_type ? cpu_to_le32((u32)(mask & 0xffffffff)) : 0; config.phy_type_ext = non_zero_phy_type ? (u8)((mask >> 32) & 0xff) : 0; /* Copy the old settings, except of phy_type */ config.abilities = abilities.abilities; if (pf->flags & I40E_FLAG_TOTAL_PORT_SHUTDOWN_ENABLED) { if (is_up) config.abilities |= I40E_AQ_PHY_ENABLE_LINK; else config.abilities &= ~(I40E_AQ_PHY_ENABLE_LINK); } if (abilities.link_speed != 0) config.link_speed = abilities.link_speed; else config.link_speed = speed; config.eee_capability = abilities.eee_capability; config.eeer = abilities.eeer_val; config.low_power_ctrl = abilities.d3_lpan; config.fec_config = abilities.fec_cfg_curr_mod_ext_info & I40E_AQ_PHY_FEC_CONFIG_MASK; err = i40e_aq_set_phy_config(hw, &config, NULL); if (err) { dev_err(&pf->pdev->dev, "set phy config ret = %pe last_status = %s\n", ERR_PTR(err), i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); return err; } /* Update the link info */ err = i40e_update_link_info(hw); if (err) { /* Wait a little bit (on 40G cards it sometimes takes a really * long time for link to come back from the atomic reset) * and try once more */ msleep(1000); i40e_update_link_info(hw); } i40e_aq_set_link_restart_an(hw, is_up, NULL); return 0; } /** * i40e_up - Bring the connection back up after being down * @vsi: the VSI being configured **/ int i40e_up(struct i40e_vsi *vsi) { int err; if (vsi->type == I40E_VSI_MAIN && (vsi->back->flags & I40E_FLAG_LINK_DOWN_ON_CLOSE_ENABLED || vsi->back->flags & I40E_FLAG_TOTAL_PORT_SHUTDOWN_ENABLED)) i40e_force_link_state(vsi->back, true); err = i40e_vsi_configure(vsi); if (!err) err = i40e_up_complete(vsi); return err; } /** * i40e_down - Shutdown the connection processing * @vsi: the VSI being stopped **/ void i40e_down(struct i40e_vsi *vsi) { int i; /* It is assumed that the caller of this function * sets the vsi->state __I40E_VSI_DOWN bit. */ if (vsi->netdev) { netif_carrier_off(vsi->netdev); netif_tx_disable(vsi->netdev); } i40e_vsi_disable_irq(vsi); i40e_vsi_stop_rings(vsi); if (vsi->type == I40E_VSI_MAIN && (vsi->back->flags & I40E_FLAG_LINK_DOWN_ON_CLOSE_ENABLED || vsi->back->flags & I40E_FLAG_TOTAL_PORT_SHUTDOWN_ENABLED)) i40e_force_link_state(vsi->back, false); i40e_napi_disable_all(vsi); for (i = 0; i < vsi->num_queue_pairs; i++) { i40e_clean_tx_ring(vsi->tx_rings[i]); if (i40e_enabled_xdp_vsi(vsi)) { /* Make sure that in-progress ndo_xdp_xmit and * ndo_xsk_wakeup calls are completed. */ synchronize_rcu(); i40e_clean_tx_ring(vsi->xdp_rings[i]); } i40e_clean_rx_ring(vsi->rx_rings[i]); } } /** * i40e_validate_mqprio_qopt- validate queue mapping info * @vsi: the VSI being configured * @mqprio_qopt: queue parametrs **/ static int i40e_validate_mqprio_qopt(struct i40e_vsi *vsi, struct tc_mqprio_qopt_offload *mqprio_qopt) { u64 sum_max_rate = 0; u64 max_rate = 0; int i; if (mqprio_qopt->qopt.offset[0] != 0 || mqprio_qopt->qopt.num_tc < 1 || mqprio_qopt->qopt.num_tc > I40E_MAX_TRAFFIC_CLASS) return -EINVAL; for (i = 0; ; i++) { if (!mqprio_qopt->qopt.count[i]) return -EINVAL; if (mqprio_qopt->min_rate[i]) { dev_err(&vsi->back->pdev->dev, "Invalid min tx rate (greater than 0) specified\n"); return -EINVAL; } max_rate = mqprio_qopt->max_rate[i]; do_div(max_rate, I40E_BW_MBPS_DIVISOR); sum_max_rate += max_rate; if (i >= mqprio_qopt->qopt.num_tc - 1) break; if (mqprio_qopt->qopt.offset[i + 1] != (mqprio_qopt->qopt.offset[i] + mqprio_qopt->qopt.count[i])) return -EINVAL; } if (vsi->num_queue_pairs < (mqprio_qopt->qopt.offset[i] + mqprio_qopt->qopt.count[i])) { dev_err(&vsi->back->pdev->dev, "Failed to create traffic channel, insufficient number of queues.\n"); return -EINVAL; } if (sum_max_rate > i40e_get_link_speed(vsi)) { dev_err(&vsi->back->pdev->dev, "Invalid max tx rate specified\n"); return -EINVAL; } return 0; } /** * i40e_vsi_set_default_tc_config - set default values for tc configuration * @vsi: the VSI being configured **/ static void i40e_vsi_set_default_tc_config(struct i40e_vsi *vsi) { u16 qcount; int i; /* Only TC0 is enabled */ vsi->tc_config.numtc = 1; vsi->tc_config.enabled_tc = 1; qcount = min_t(int, vsi->alloc_queue_pairs, i40e_pf_get_max_q_per_tc(vsi->back)); for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { /* For the TC that is not enabled set the offset to default * queue and allocate one queue for the given TC. */ vsi->tc_config.tc_info[i].qoffset = 0; if (i == 0) vsi->tc_config.tc_info[i].qcount = qcount; else vsi->tc_config.tc_info[i].qcount = 1; vsi->tc_config.tc_info[i].netdev_tc = 0; } } /** * i40e_del_macvlan_filter * @hw: pointer to the HW structure * @seid: seid of the channel VSI * @macaddr: the mac address to apply as a filter * @aq_err: store the admin Q error * * This function deletes a mac filter on the channel VSI which serves as the * macvlan. Returns 0 on success. **/ static int i40e_del_macvlan_filter(struct i40e_hw *hw, u16 seid, const u8 *macaddr, int *aq_err) { struct i40e_aqc_remove_macvlan_element_data element; int status; memset(&element, 0, sizeof(element)); ether_addr_copy(element.mac_addr, macaddr); element.vlan_tag = 0; element.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH; status = i40e_aq_remove_macvlan(hw, seid, &element, 1, NULL); *aq_err = hw->aq.asq_last_status; return status; } /** * i40e_add_macvlan_filter * @hw: pointer to the HW structure * @seid: seid of the channel VSI * @macaddr: the mac address to apply as a filter * @aq_err: store the admin Q error * * This function adds a mac filter on the channel VSI which serves as the * macvlan. Returns 0 on success. **/ static int i40e_add_macvlan_filter(struct i40e_hw *hw, u16 seid, const u8 *macaddr, int *aq_err) { struct i40e_aqc_add_macvlan_element_data element; u16 cmd_flags = 0; int status; ether_addr_copy(element.mac_addr, macaddr); element.vlan_tag = 0; element.queue_number = 0; element.match_method = I40E_AQC_MM_ERR_NO_RES; cmd_flags |= I40E_AQC_MACVLAN_ADD_PERFECT_MATCH; element.flags = cpu_to_le16(cmd_flags); status = i40e_aq_add_macvlan(hw, seid, &element, 1, NULL); *aq_err = hw->aq.asq_last_status; return status; } /** * i40e_reset_ch_rings - Reset the queue contexts in a channel * @vsi: the VSI we want to access * @ch: the channel we want to access */ static void i40e_reset_ch_rings(struct i40e_vsi *vsi, struct i40e_channel *ch) { struct i40e_ring *tx_ring, *rx_ring; u16 pf_q; int i; for (i = 0; i < ch->num_queue_pairs; i++) { pf_q = ch->base_queue + i; tx_ring = vsi->tx_rings[pf_q]; tx_ring->ch = NULL; rx_ring = vsi->rx_rings[pf_q]; rx_ring->ch = NULL; } } /** * i40e_free_macvlan_channels * @vsi: the VSI we want to access * * This function frees the Qs of the channel VSI from * the stack and also deletes the channel VSIs which * serve as macvlans. */ static void i40e_free_macvlan_channels(struct i40e_vsi *vsi) { struct i40e_channel *ch, *ch_tmp; int ret; if (list_empty(&vsi->macvlan_list)) return; list_for_each_entry_safe(ch, ch_tmp, &vsi->macvlan_list, list) { struct i40e_vsi *parent_vsi; if (i40e_is_channel_macvlan(ch)) { i40e_reset_ch_rings(vsi, ch); clear_bit(ch->fwd->bit_no, vsi->fwd_bitmask); netdev_unbind_sb_channel(vsi->netdev, ch->fwd->netdev); netdev_set_sb_channel(ch->fwd->netdev, 0); kfree(ch->fwd); ch->fwd = NULL; } list_del(&ch->list); parent_vsi = ch->parent_vsi; if (!parent_vsi || !ch->initialized) { kfree(ch); continue; } /* remove the VSI */ ret = i40e_aq_delete_element(&vsi->back->hw, ch->seid, NULL); if (ret) dev_err(&vsi->back->pdev->dev, "unable to remove channel (%d) for parent VSI(%d)\n", ch->seid, parent_vsi->seid); kfree(ch); } vsi->macvlan_cnt = 0; } /** * i40e_fwd_ring_up - bring the macvlan device up * @vsi: the VSI we want to access * @vdev: macvlan netdevice * @fwd: the private fwd structure */ static int i40e_fwd_ring_up(struct i40e_vsi *vsi, struct net_device *vdev, struct i40e_fwd_adapter *fwd) { struct i40e_channel *ch = NULL, *ch_tmp, *iter; int ret = 0, num_tc = 1, i, aq_err; struct i40e_pf *pf = vsi->back; struct i40e_hw *hw = &pf->hw; /* Go through the list and find an available channel */ list_for_each_entry_safe(iter, ch_tmp, &vsi->macvlan_list, list) { if (!i40e_is_channel_macvlan(iter)) { iter->fwd = fwd; /* record configuration for macvlan interface in vdev */ for (i = 0; i < num_tc; i++) netdev_bind_sb_channel_queue(vsi->netdev, vdev, i, iter->num_queue_pairs, iter->base_queue); for (i = 0; i < iter->num_queue_pairs; i++) { struct i40e_ring *tx_ring, *rx_ring; u16 pf_q; pf_q = iter->base_queue + i; /* Get to TX ring ptr */ tx_ring = vsi->tx_rings[pf_q]; tx_ring->ch = iter; /* Get the RX ring ptr */ rx_ring = vsi->rx_rings[pf_q]; rx_ring->ch = iter; } ch = iter; break; } } if (!ch) return -EINVAL; /* Guarantee all rings are updated before we update the * MAC address filter. */ wmb(); /* Add a mac filter */ ret = i40e_add_macvlan_filter(hw, ch->seid, vdev->dev_addr, &aq_err); if (ret) { /* if we cannot add the MAC rule then disable the offload */ macvlan_release_l2fw_offload(vdev); for (i = 0; i < ch->num_queue_pairs; i++) { struct i40e_ring *rx_ring; u16 pf_q; pf_q = ch->base_queue + i; rx_ring = vsi->rx_rings[pf_q]; rx_ring->netdev = NULL; } dev_info(&pf->pdev->dev, "Error adding mac filter on macvlan err %pe, aq_err %s\n", ERR_PTR(ret), i40e_aq_str(hw, aq_err)); netdev_err(vdev, "L2fwd offload disabled to L2 filter error\n"); } return ret; } /** * i40e_setup_macvlans - create the channels which will be macvlans * @vsi: the VSI we want to access * @macvlan_cnt: no. of macvlans to be setup * @qcnt: no. of Qs per macvlan * @vdev: macvlan netdevice */ static int i40e_setup_macvlans(struct i40e_vsi *vsi, u16 macvlan_cnt, u16 qcnt, struct net_device *vdev) { struct i40e_pf *pf = vsi->back; struct i40e_hw *hw = &pf->hw; struct i40e_vsi_context ctxt; u16 sections, qmap, num_qps; struct i40e_channel *ch; int i, pow, ret = 0; u8 offset = 0; if (vsi->type != I40E_VSI_MAIN || !macvlan_cnt) return -EINVAL; num_qps = vsi->num_queue_pairs - (macvlan_cnt * qcnt); /* find the next higher power-of-2 of num queue pairs */ pow = fls(roundup_pow_of_two(num_qps) - 1); qmap = (offset << I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT) | (pow << I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT); /* Setup context bits for the main VSI */ sections = I40E_AQ_VSI_PROP_QUEUE_MAP_VALID; sections |= I40E_AQ_VSI_PROP_SCHED_VALID; memset(&ctxt, 0, sizeof(ctxt)); ctxt.seid = vsi->seid; ctxt.pf_num = vsi->back->hw.pf_id; ctxt.vf_num = 0; ctxt.uplink_seid = vsi->uplink_seid; ctxt.info = vsi->info; ctxt.info.tc_mapping[0] = cpu_to_le16(qmap); ctxt.info.mapping_flags |= cpu_to_le16(I40E_AQ_VSI_QUE_MAP_CONTIG); ctxt.info.queue_mapping[0] = cpu_to_le16(vsi->base_queue); ctxt.info.valid_sections |= cpu_to_le16(sections); /* Reconfigure RSS for main VSI with new max queue count */ vsi->rss_size = max_t(u16, num_qps, qcnt); ret = i40e_vsi_config_rss(vsi); if (ret) { dev_info(&pf->pdev->dev, "Failed to reconfig RSS for num_queues (%u)\n", vsi->rss_size); return ret; } vsi->reconfig_rss = true; dev_dbg(&vsi->back->pdev->dev, "Reconfigured RSS with num_queues (%u)\n", vsi->rss_size); vsi->next_base_queue = num_qps; vsi->cnt_q_avail = vsi->num_queue_pairs - num_qps; /* Update the VSI after updating the VSI queue-mapping * information */ ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL); if (ret) { dev_info(&pf->pdev->dev, "Update vsi tc config failed, err %pe aq_err %s\n", ERR_PTR(ret), i40e_aq_str(hw, hw->aq.asq_last_status)); return ret; } /* update the local VSI info with updated queue map */ i40e_vsi_update_queue_map(vsi, &ctxt); vsi->info.valid_sections = 0; /* Create channels for macvlans */ INIT_LIST_HEAD(&vsi->macvlan_list); for (i = 0; i < macvlan_cnt; i++) { ch = kzalloc(sizeof(*ch), GFP_KERNEL); if (!ch) { ret = -ENOMEM; goto err_free; } INIT_LIST_HEAD(&ch->list); ch->num_queue_pairs = qcnt; if (!i40e_setup_channel(pf, vsi, ch)) { ret = -EINVAL; kfree(ch); goto err_free; } ch->parent_vsi = vsi; vsi->cnt_q_avail -= ch->num_queue_pairs; vsi->macvlan_cnt++; list_add_tail(&ch->list, &vsi->macvlan_list); } return ret; err_free: dev_info(&pf->pdev->dev, "Failed to setup macvlans\n"); i40e_free_macvlan_channels(vsi); return ret; } /** * i40e_fwd_add - configure macvlans * @netdev: net device to configure * @vdev: macvlan netdevice **/ static void *i40e_fwd_add(struct net_device *netdev, struct net_device *vdev) { struct i40e_netdev_priv *np = netdev_priv(netdev); u16 q_per_macvlan = 0, macvlan_cnt = 0, vectors; struct i40e_vsi *vsi = np->vsi; struct i40e_pf *pf = vsi->back; struct i40e_fwd_adapter *fwd; int avail_macvlan, ret; if ((pf->flags & I40E_FLAG_DCB_ENABLED)) { netdev_info(netdev, "Macvlans are not supported when DCB is enabled\n"); return ERR_PTR(-EINVAL); } if (i40e_is_tc_mqprio_enabled(pf)) { netdev_info(netdev, "Macvlans are not supported when HW TC offload is on\n"); return ERR_PTR(-EINVAL); } if (pf->num_lan_msix < I40E_MIN_MACVLAN_VECTORS) { netdev_info(netdev, "Not enough vectors available to support macvlans\n"); return ERR_PTR(-EINVAL); } /* The macvlan device has to be a single Q device so that the * tc_to_txq field can be reused to pick the tx queue. */ if (netif_is_multiqueue(vdev)) return ERR_PTR(-ERANGE); if (!vsi->macvlan_cnt) { /* reserve bit 0 for the pf device */ set_bit(0, vsi->fwd_bitmask); /* Try to reserve as many queues as possible for macvlans. First * reserve 3/4th of max vectors, then half, then quarter and * calculate Qs per macvlan as you go */ vectors = pf->num_lan_msix; if (vectors <= I40E_MAX_MACVLANS && vectors > 64) { /* allocate 4 Qs per macvlan and 32 Qs to the PF*/ q_per_macvlan = 4; macvlan_cnt = (vectors - 32) / 4; } else if (vectors <= 64 && vectors > 32) { /* allocate 2 Qs per macvlan and 16 Qs to the PF*/ q_per_macvlan = 2; macvlan_cnt = (vectors - 16) / 2; } else if (vectors <= 32 && vectors > 16) { /* allocate 1 Q per macvlan and 16 Qs to the PF*/ q_per_macvlan = 1; macvlan_cnt = vectors - 16; } else if (vectors <= 16 && vectors > 8) { /* allocate 1 Q per macvlan and 8 Qs to the PF */ q_per_macvlan = 1; macvlan_cnt = vectors - 8; } else { /* allocate 1 Q per macvlan and 1 Q to the PF */ q_per_macvlan = 1; macvlan_cnt = vectors - 1; } if (macvlan_cnt == 0) return ERR_PTR(-EBUSY); /* Quiesce VSI queues */ i40e_quiesce_vsi(vsi); /* sets up the macvlans but does not "enable" them */ ret = i40e_setup_macvlans(vsi, macvlan_cnt, q_per_macvlan, vdev); if (ret) return ERR_PTR(ret); /* Unquiesce VSI */ i40e_unquiesce_vsi(vsi); } avail_macvlan = find_first_zero_bit(vsi->fwd_bitmask, vsi->macvlan_cnt); if (avail_macvlan >= I40E_MAX_MACVLANS) return ERR_PTR(-EBUSY); /* create the fwd struct */ fwd = kzalloc(sizeof(*fwd), GFP_KERNEL); if (!fwd) return ERR_PTR(-ENOMEM); set_bit(avail_macvlan, vsi->fwd_bitmask); fwd->bit_no = avail_macvlan; netdev_set_sb_channel(vdev, avail_macvlan); fwd->netdev = vdev; if (!netif_running(netdev)) return fwd; /* Set fwd ring up */ ret = i40e_fwd_ring_up(vsi, vdev, fwd); if (ret) { /* unbind the queues and drop the subordinate channel config */ netdev_unbind_sb_channel(netdev, vdev); netdev_set_sb_channel(vdev, 0); kfree(fwd); return ERR_PTR(-EINVAL); } return fwd; } /** * i40e_del_all_macvlans - Delete all the mac filters on the channels * @vsi: the VSI we want to access */ static void i40e_del_all_macvlans(struct i40e_vsi *vsi) { struct i40e_channel *ch, *ch_tmp; struct i40e_pf *pf = vsi->back; struct i40e_hw *hw = &pf->hw; int aq_err, ret = 0; if (list_empty(&vsi->macvlan_list)) return; list_for_each_entry_safe(ch, ch_tmp, &vsi->macvlan_list, list) { if (i40e_is_channel_macvlan(ch)) { ret = i40e_del_macvlan_filter(hw, ch->seid, i40e_channel_mac(ch), &aq_err); if (!ret) { /* Reset queue contexts */ i40e_reset_ch_rings(vsi, ch); clear_bit(ch->fwd->bit_no, vsi->fwd_bitmask); netdev_unbind_sb_channel(vsi->netdev, ch->fwd->netdev); netdev_set_sb_channel(ch->fwd->netdev, 0); kfree(ch->fwd); ch->fwd = NULL; } } } } /** * i40e_fwd_del - delete macvlan interfaces * @netdev: net device to configure * @vdev: macvlan netdevice */ static void i40e_fwd_del(struct net_device *netdev, void *vdev) { struct i40e_netdev_priv *np = netdev_priv(netdev); struct i40e_fwd_adapter *fwd = vdev; struct i40e_channel *ch, *ch_tmp; struct i40e_vsi *vsi = np->vsi; struct i40e_pf *pf = vsi->back; struct i40e_hw *hw = &pf->hw; int aq_err, ret = 0; /* Find the channel associated with the macvlan and del mac filter */ list_for_each_entry_safe(ch, ch_tmp, &vsi->macvlan_list, list) { if (i40e_is_channel_macvlan(ch) && ether_addr_equal(i40e_channel_mac(ch), fwd->netdev->dev_addr)) { ret = i40e_del_macvlan_filter(hw, ch->seid, i40e_channel_mac(ch), &aq_err); if (!ret) { /* Reset queue contexts */ i40e_reset_ch_rings(vsi, ch); clear_bit(ch->fwd->bit_no, vsi->fwd_bitmask); netdev_unbind_sb_channel(netdev, fwd->netdev); netdev_set_sb_channel(fwd->netdev, 0); kfree(ch->fwd); ch->fwd = NULL; } else { dev_info(&pf->pdev->dev, "Error deleting mac filter on macvlan err %pe, aq_err %s\n", ERR_PTR(ret), i40e_aq_str(hw, aq_err)); } break; } } } /** * i40e_setup_tc - configure multiple traffic classes * @netdev: net device to configure * @type_data: tc offload data **/ static int i40e_setup_tc(struct net_device *netdev, void *type_data) { struct tc_mqprio_qopt_offload *mqprio_qopt = type_data; struct i40e_netdev_priv *np = netdev_priv(netdev); struct i40e_vsi *vsi = np->vsi; struct i40e_pf *pf = vsi->back; u8 enabled_tc = 0, num_tc, hw; bool need_reset = false; int old_queue_pairs; int ret = -EINVAL; u16 mode; int i; old_queue_pairs = vsi->num_queue_pairs; num_tc = mqprio_qopt->qopt.num_tc; hw = mqprio_qopt->qopt.hw; mode = mqprio_qopt->mode; if (!hw) { pf->flags &= ~I40E_FLAG_TC_MQPRIO; memcpy(&vsi->mqprio_qopt, mqprio_qopt, sizeof(*mqprio_qopt)); goto config_tc; } /* Check if MFP enabled */ if (pf->flags & I40E_FLAG_MFP_ENABLED) { netdev_info(netdev, "Configuring TC not supported in MFP mode\n"); return ret; } switch (mode) { case TC_MQPRIO_MODE_DCB: pf->flags &= ~I40E_FLAG_TC_MQPRIO; /* Check if DCB enabled to continue */ if (!(pf->flags & I40E_FLAG_DCB_ENABLED)) { netdev_info(netdev, "DCB is not enabled for adapter\n"); return ret; } /* Check whether tc count is within enabled limit */ if (num_tc > i40e_pf_get_num_tc(pf)) { netdev_info(netdev, "TC count greater than enabled on link for adapter\n"); return ret; } break; case TC_MQPRIO_MODE_CHANNEL: if (pf->flags & I40E_FLAG_DCB_ENABLED) { netdev_info(netdev, "Full offload of TC Mqprio options is not supported when DCB is enabled\n"); return ret; } if (!(pf->flags & I40E_FLAG_MSIX_ENABLED)) return ret; ret = i40e_validate_mqprio_qopt(vsi, mqprio_qopt); if (ret) return ret; memcpy(&vsi->mqprio_qopt, mqprio_qopt, sizeof(*mqprio_qopt)); pf->flags |= I40E_FLAG_TC_MQPRIO; pf->flags &= ~I40E_FLAG_DCB_ENABLED; break; default: return -EINVAL; } config_tc: /* Generate TC map for number of tc requested */ for (i = 0; i < num_tc; i++) enabled_tc |= BIT(i); /* Requesting same TC configuration as already enabled */ if (enabled_tc == vsi->tc_config.enabled_tc && mode != TC_MQPRIO_MODE_CHANNEL) return 0; /* Quiesce VSI queues */ i40e_quiesce_vsi(vsi); if (!hw && !i40e_is_tc_mqprio_enabled(pf)) i40e_remove_queue_channels(vsi); /* Configure VSI for enabled TCs */ ret = i40e_vsi_config_tc(vsi, enabled_tc); if (ret) { netdev_info(netdev, "Failed configuring TC for VSI seid=%d\n", vsi->seid); need_reset = true; goto exit; } else if (enabled_tc && (!is_power_of_2(vsi->tc_config.tc_info[0].qcount))) { netdev_info(netdev, "Failed to create channel. Override queues (%u) not power of 2\n", vsi->tc_config.tc_info[0].qcount); ret = -EINVAL; need_reset = true; goto exit; } dev_info(&vsi->back->pdev->dev, "Setup channel (id:%u) utilizing num_queues %d\n", vsi->seid, vsi->tc_config.tc_info[0].qcount); if (i40e_is_tc_mqprio_enabled(pf)) { if (vsi->mqprio_qopt.max_rate[0]) { u64 max_tx_rate = i40e_bw_bytes_to_mbits(vsi, vsi->mqprio_qopt.max_rate[0]); ret = i40e_set_bw_limit(vsi, vsi->seid, max_tx_rate); if (!ret) { u64 credits = max_tx_rate; do_div(credits, I40E_BW_CREDIT_DIVISOR); dev_dbg(&vsi->back->pdev->dev, "Set tx rate of %llu Mbps (count of 50Mbps %llu) for vsi->seid %u\n", max_tx_rate, credits, vsi->seid); } else { need_reset = true; goto exit; } } ret = i40e_configure_queue_channels(vsi); if (ret) { vsi->num_queue_pairs = old_queue_pairs; netdev_info(netdev, "Failed configuring queue channels\n"); need_reset = true; goto exit; } } exit: /* Reset the configuration data to defaults, only TC0 is enabled */ if (need_reset) { i40e_vsi_set_default_tc_config(vsi); need_reset = false; } /* Unquiesce VSI */ i40e_unquiesce_vsi(vsi); return ret; } /** * i40e_set_cld_element - sets cloud filter element data * @filter: cloud filter rule * @cld: ptr to cloud filter element data * * This is helper function to copy data into cloud filter element **/ static inline void i40e_set_cld_element(struct i40e_cloud_filter *filter, struct i40e_aqc_cloud_filters_element_data *cld) { u32 ipa; int i; memset(cld, 0, sizeof(*cld)); ether_addr_copy(cld->outer_mac, filter->dst_mac); ether_addr_copy(cld->inner_mac, filter->src_mac); if (filter->n_proto != ETH_P_IP && filter->n_proto != ETH_P_IPV6) return; if (filter->n_proto == ETH_P_IPV6) { #define IPV6_MAX_INDEX (ARRAY_SIZE(filter->dst_ipv6) - 1) for (i = 0; i < ARRAY_SIZE(filter->dst_ipv6); i++) { ipa = be32_to_cpu(filter->dst_ipv6[IPV6_MAX_INDEX - i]); *(__le32 *)&cld->ipaddr.raw_v6.data[i * 2] = cpu_to_le32(ipa); } } else { ipa = be32_to_cpu(filter->dst_ipv4); memcpy(&cld->ipaddr.v4.data, &ipa, sizeof(ipa)); } cld->inner_vlan = cpu_to_le16(ntohs(filter->vlan_id)); /* tenant_id is not supported by FW now, once the support is enabled * fill the cld->tenant_id with cpu_to_le32(filter->tenant_id) */ if (filter->tenant_id) return; } /** * i40e_add_del_cloud_filter - Add/del cloud filter * @vsi: pointer to VSI * @filter: cloud filter rule * @add: if true, add, if false, delete * * Add or delete a cloud filter for a specific flow spec. * Returns 0 if the filter were successfully added. **/ int i40e_add_del_cloud_filter(struct i40e_vsi *vsi, struct i40e_cloud_filter *filter, bool add) { struct i40e_aqc_cloud_filters_element_data cld_filter; struct i40e_pf *pf = vsi->back; int ret; static const u16 flag_table[128] = { [I40E_CLOUD_FILTER_FLAGS_OMAC] = I40E_AQC_ADD_CLOUD_FILTER_OMAC, [I40E_CLOUD_FILTER_FLAGS_IMAC] = I40E_AQC_ADD_CLOUD_FILTER_IMAC, [I40E_CLOUD_FILTER_FLAGS_IMAC_IVLAN] = I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN, [I40E_CLOUD_FILTER_FLAGS_IMAC_TEN_ID] = I40E_AQC_ADD_CLOUD_FILTER_IMAC_TEN_ID, [I40E_CLOUD_FILTER_FLAGS_OMAC_TEN_ID_IMAC] = I40E_AQC_ADD_CLOUD_FILTER_OMAC_TEN_ID_IMAC, [I40E_CLOUD_FILTER_FLAGS_IMAC_IVLAN_TEN_ID] = I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN_TEN_ID, [I40E_CLOUD_FILTER_FLAGS_IIP] = I40E_AQC_ADD_CLOUD_FILTER_IIP, }; if (filter->flags >= ARRAY_SIZE(flag_table)) return -EIO; memset(&cld_filter, 0, sizeof(cld_filter)); /* copy element needed to add cloud filter from filter */ i40e_set_cld_element(filter, &cld_filter); if (filter->tunnel_type != I40E_CLOUD_TNL_TYPE_NONE) cld_filter.flags = cpu_to_le16(filter->tunnel_type << I40E_AQC_ADD_CLOUD_TNL_TYPE_SHIFT); if (filter->n_proto == ETH_P_IPV6) cld_filter.flags |= cpu_to_le16(flag_table[filter->flags] | I40E_AQC_ADD_CLOUD_FLAGS_IPV6); else cld_filter.flags |= cpu_to_le16(flag_table[filter->flags] | I40E_AQC_ADD_CLOUD_FLAGS_IPV4); if (add) ret = i40e_aq_add_cloud_filters(&pf->hw, filter->seid, &cld_filter, 1); else ret = i40e_aq_rem_cloud_filters(&pf->hw, filter->seid, &cld_filter, 1); if (ret) dev_dbg(&pf->pdev->dev, "Failed to %s cloud filter using l4 port %u, err %d aq_err %d\n", add ? "add" : "delete", filter->dst_port, ret, pf->hw.aq.asq_last_status); else dev_info(&pf->pdev->dev, "%s cloud filter for VSI: %d\n", add ? "Added" : "Deleted", filter->seid); return ret; } /** * i40e_add_del_cloud_filter_big_buf - Add/del cloud filter using big_buf * @vsi: pointer to VSI * @filter: cloud filter rule * @add: if true, add, if false, delete * * Add or delete a cloud filter for a specific flow spec using big buffer. * Returns 0 if the filter were successfully added. **/ int i40e_add_del_cloud_filter_big_buf(struct i40e_vsi *vsi, struct i40e_cloud_filter *filter, bool add) { struct i40e_aqc_cloud_filters_element_bb cld_filter; struct i40e_pf *pf = vsi->back; int ret; /* Both (src/dst) valid mac_addr are not supported */ if ((is_valid_ether_addr(filter->dst_mac) && is_valid_ether_addr(filter->src_mac)) || (is_multicast_ether_addr(filter->dst_mac) && is_multicast_ether_addr(filter->src_mac))) return -EOPNOTSUPP; /* Big buffer cloud filter needs 'L4 port' to be non-zero. Also, UDP * ports are not supported via big buffer now. */ if (!filter->dst_port || filter->ip_proto == IPPROTO_UDP) return -EOPNOTSUPP; /* adding filter using src_port/src_ip is not supported at this stage */ if (filter->src_port || (filter->src_ipv4 && filter->n_proto != ETH_P_IPV6) || !ipv6_addr_any(&filter->ip.v6.src_ip6)) return -EOPNOTSUPP; memset(&cld_filter, 0, sizeof(cld_filter)); /* copy element needed to add cloud filter from filter */ i40e_set_cld_element(filter, &cld_filter.element); if (is_valid_ether_addr(filter->dst_mac) || is_valid_ether_addr(filter->src_mac) || is_multicast_ether_addr(filter->dst_mac) || is_multicast_ether_addr(filter->src_mac)) { /* MAC + IP : unsupported mode */ if (filter->dst_ipv4) return -EOPNOTSUPP; /* since we validated that L4 port must be valid before * we get here, start with respective "flags" value * and update if vlan is present or not */ cld_filter.element.flags = cpu_to_le16(I40E_AQC_ADD_CLOUD_FILTER_MAC_PORT); if (filter->vlan_id) { cld_filter.element.flags = cpu_to_le16(I40E_AQC_ADD_CLOUD_FILTER_MAC_VLAN_PORT); } } else if ((filter->dst_ipv4 && filter->n_proto != ETH_P_IPV6) || !ipv6_addr_any(&filter->ip.v6.dst_ip6)) { cld_filter.element.flags = cpu_to_le16(I40E_AQC_ADD_CLOUD_FILTER_IP_PORT); if (filter->n_proto == ETH_P_IPV6) cld_filter.element.flags |= cpu_to_le16(I40E_AQC_ADD_CLOUD_FLAGS_IPV6); else cld_filter.element.flags |= cpu_to_le16(I40E_AQC_ADD_CLOUD_FLAGS_IPV4); } else { dev_err(&pf->pdev->dev, "either mac or ip has to be valid for cloud filter\n"); return -EINVAL; } /* Now copy L4 port in Byte 6..7 in general fields */ cld_filter.general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X16_WORD0] = be16_to_cpu(filter->dst_port); if (add) { /* Validate current device switch mode, change if necessary */ ret = i40e_validate_and_set_switch_mode(vsi); if (ret) { dev_err(&pf->pdev->dev, "failed to set switch mode, ret %d\n", ret); return ret; } ret = i40e_aq_add_cloud_filters_bb(&pf->hw, filter->seid, &cld_filter, 1); } else { ret = i40e_aq_rem_cloud_filters_bb(&pf->hw, filter->seid, &cld_filter, 1); } if (ret) dev_dbg(&pf->pdev->dev, "Failed to %s cloud filter(big buffer) err %d aq_err %d\n", add ? "add" : "delete", ret, pf->hw.aq.asq_last_status); else dev_info(&pf->pdev->dev, "%s cloud filter for VSI: %d, L4 port: %d\n", add ? "add" : "delete", filter->seid, ntohs(filter->dst_port)); return ret; } /** * i40e_parse_cls_flower - Parse tc flower filters provided by kernel * @vsi: Pointer to VSI * @f: Pointer to struct flow_cls_offload * @filter: Pointer to cloud filter structure * **/ static int i40e_parse_cls_flower(struct i40e_vsi *vsi, struct flow_cls_offload *f, struct i40e_cloud_filter *filter) { struct flow_rule *rule = flow_cls_offload_flow_rule(f); struct flow_dissector *dissector = rule->match.dissector; u16 n_proto_mask = 0, n_proto_key = 0, addr_type = 0; struct i40e_pf *pf = vsi->back; u8 field_flags = 0; if (dissector->used_keys & ~(BIT_ULL(FLOW_DISSECTOR_KEY_CONTROL) | BIT_ULL(FLOW_DISSECTOR_KEY_BASIC) | BIT_ULL(FLOW_DISSECTOR_KEY_ETH_ADDRS) | BIT_ULL(FLOW_DISSECTOR_KEY_VLAN) | BIT_ULL(FLOW_DISSECTOR_KEY_IPV4_ADDRS) | BIT_ULL(FLOW_DISSECTOR_KEY_IPV6_ADDRS) | BIT_ULL(FLOW_DISSECTOR_KEY_PORTS) | BIT_ULL(FLOW_DISSECTOR_KEY_ENC_KEYID))) { dev_err(&pf->pdev->dev, "Unsupported key used: 0x%llx\n", dissector->used_keys); return -EOPNOTSUPP; } if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_KEYID)) { struct flow_match_enc_keyid match; flow_rule_match_enc_keyid(rule, &match); if (match.mask->keyid != 0) field_flags |= I40E_CLOUD_FIELD_TEN_ID; filter->tenant_id = be32_to_cpu(match.key->keyid); } if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) { struct flow_match_basic match; flow_rule_match_basic(rule, &match); n_proto_key = ntohs(match.key->n_proto); n_proto_mask = ntohs(match.mask->n_proto); if (n_proto_key == ETH_P_ALL) { n_proto_key = 0; n_proto_mask = 0; } filter->n_proto = n_proto_key & n_proto_mask; filter->ip_proto = match.key->ip_proto; } if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) { struct flow_match_eth_addrs match; flow_rule_match_eth_addrs(rule, &match); /* use is_broadcast and is_zero to check for all 0xf or 0 */ if (!is_zero_ether_addr(match.mask->dst)) { if (is_broadcast_ether_addr(match.mask->dst)) { field_flags |= I40E_CLOUD_FIELD_OMAC; } else { dev_err(&pf->pdev->dev, "Bad ether dest mask %pM\n", match.mask->dst); return -EIO; } } if (!is_zero_ether_addr(match.mask->src)) { if (is_broadcast_ether_addr(match.mask->src)) { field_flags |= I40E_CLOUD_FIELD_IMAC; } else { dev_err(&pf->pdev->dev, "Bad ether src mask %pM\n", match.mask->src); return -EIO; } } ether_addr_copy(filter->dst_mac, match.key->dst); ether_addr_copy(filter->src_mac, match.key->src); } if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) { struct flow_match_vlan match; flow_rule_match_vlan(rule, &match); if (match.mask->vlan_id) { if (match.mask->vlan_id == VLAN_VID_MASK) { field_flags |= I40E_CLOUD_FIELD_IVLAN; } else { dev_err(&pf->pdev->dev, "Bad vlan mask 0x%04x\n", match.mask->vlan_id); return -EIO; } } filter->vlan_id = cpu_to_be16(match.key->vlan_id); } if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CONTROL)) { struct flow_match_control match; flow_rule_match_control(rule, &match); addr_type = match.key->addr_type; } if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) { struct flow_match_ipv4_addrs match; flow_rule_match_ipv4_addrs(rule, &match); if (match.mask->dst) { if (match.mask->dst == cpu_to_be32(0xffffffff)) { field_flags |= I40E_CLOUD_FIELD_IIP; } else { dev_err(&pf->pdev->dev, "Bad ip dst mask %pI4b\n", &match.mask->dst); return -EIO; } } if (match.mask->src) { if (match.mask->src == cpu_to_be32(0xffffffff)) { field_flags |= I40E_CLOUD_FIELD_IIP; } else { dev_err(&pf->pdev->dev, "Bad ip src mask %pI4b\n", &match.mask->src); return -EIO; } } if (field_flags & I40E_CLOUD_FIELD_TEN_ID) { dev_err(&pf->pdev->dev, "Tenant id not allowed for ip filter\n"); return -EIO; } filter->dst_ipv4 = match.key->dst; filter->src_ipv4 = match.key->src; } if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) { struct flow_match_ipv6_addrs match; flow_rule_match_ipv6_addrs(rule, &match); /* src and dest IPV6 address should not be LOOPBACK * (0:0:0:0:0:0:0:1), which can be represented as ::1 */ if (ipv6_addr_loopback(&match.key->dst) || ipv6_addr_loopback(&match.key->src)) { dev_err(&pf->pdev->dev, "Bad ipv6, addr is LOOPBACK\n"); return -EIO; } if (!ipv6_addr_any(&match.mask->dst) || !ipv6_addr_any(&match.mask->src)) field_flags |= I40E_CLOUD_FIELD_IIP; memcpy(&filter->src_ipv6, &match.key->src.s6_addr32, sizeof(filter->src_ipv6)); memcpy(&filter->dst_ipv6, &match.key->dst.s6_addr32, sizeof(filter->dst_ipv6)); } if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS)) { struct flow_match_ports match; flow_rule_match_ports(rule, &match); if (match.mask->src) { if (match.mask->src == cpu_to_be16(0xffff)) { field_flags |= I40E_CLOUD_FIELD_IIP; } else { dev_err(&pf->pdev->dev, "Bad src port mask 0x%04x\n", be16_to_cpu(match.mask->src)); return -EIO; } } if (match.mask->dst) { if (match.mask->dst == cpu_to_be16(0xffff)) { field_flags |= I40E_CLOUD_FIELD_IIP; } else { dev_err(&pf->pdev->dev, "Bad dst port mask 0x%04x\n", be16_to_cpu(match.mask->dst)); return -EIO; } } filter->dst_port = match.key->dst; filter->src_port = match.key->src; switch (filter->ip_proto) { case IPPROTO_TCP: case IPPROTO_UDP: break; default: dev_err(&pf->pdev->dev, "Only UDP and TCP transport are supported\n"); return -EINVAL; } } filter->flags = field_flags; return 0; } /** * i40e_handle_tclass: Forward to a traffic class on the device * @vsi: Pointer to VSI * @tc: traffic class index on the device * @filter: Pointer to cloud filter structure * **/ static int i40e_handle_tclass(struct i40e_vsi *vsi, u32 tc, struct i40e_cloud_filter *filter) { struct i40e_channel *ch, *ch_tmp; /* direct to a traffic class on the same device */ if (tc == 0) { filter->seid = vsi->seid; return 0; } else if (vsi->tc_config.enabled_tc & BIT(tc)) { if (!filter->dst_port) { dev_err(&vsi->back->pdev->dev, "Specify destination port to direct to traffic class that is not default\n"); return -EINVAL; } if (list_empty(&vsi->ch_list)) return -EINVAL; list_for_each_entry_safe(ch, ch_tmp, &vsi->ch_list, list) { if (ch->seid == vsi->tc_seid_map[tc]) filter->seid = ch->seid; } return 0; } dev_err(&vsi->back->pdev->dev, "TC is not enabled\n"); return -EINVAL; } /** * i40e_configure_clsflower - Configure tc flower filters * @vsi: Pointer to VSI * @cls_flower: Pointer to struct flow_cls_offload * **/ static int i40e_configure_clsflower(struct i40e_vsi *vsi, struct flow_cls_offload *cls_flower) { int tc = tc_classid_to_hwtc(vsi->netdev, cls_flower->classid); struct i40e_cloud_filter *filter = NULL; struct i40e_pf *pf = vsi->back; int err = 0; if (tc < 0) { dev_err(&vsi->back->pdev->dev, "Invalid traffic class\n"); return -EOPNOTSUPP; } if (!tc) { dev_err(&pf->pdev->dev, "Unable to add filter because of invalid destination"); return -EINVAL; } if (test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state) || test_bit(__I40E_RESET_INTR_RECEIVED, pf->state)) return -EBUSY; if (pf->fdir_pf_active_filters || (!hlist_empty(&pf->fdir_filter_list))) { dev_err(&vsi->back->pdev->dev, "Flow Director Sideband filters exists, turn ntuple off to configure cloud filters\n"); return -EINVAL; } if (vsi->back->flags & I40E_FLAG_FD_SB_ENABLED) { dev_err(&vsi->back->pdev->dev, "Disable Flow Director Sideband, configuring Cloud filters via tc-flower\n"); vsi->back->flags &= ~I40E_FLAG_FD_SB_ENABLED; vsi->back->flags |= I40E_FLAG_FD_SB_TO_CLOUD_FILTER; } filter = kzalloc(sizeof(*filter), GFP_KERNEL); if (!filter) return -ENOMEM; filter->cookie = cls_flower->cookie; err = i40e_parse_cls_flower(vsi, cls_flower, filter); if (err < 0) goto err; err = i40e_handle_tclass(vsi, tc, filter); if (err < 0) goto err; /* Add cloud filter */ if (filter->dst_port) err = i40e_add_del_cloud_filter_big_buf(vsi, filter, true); else err = i40e_add_del_cloud_filter(vsi, filter, true); if (err) { dev_err(&pf->pdev->dev, "Failed to add cloud filter, err %d\n", err); goto err; } /* add filter to the ordered list */ INIT_HLIST_NODE(&filter->cloud_node); hlist_add_head(&filter->cloud_node, &pf->cloud_filter_list); pf->num_cloud_filters++; return err; err: kfree(filter); return err; } /** * i40e_find_cloud_filter - Find the could filter in the list * @vsi: Pointer to VSI * @cookie: filter specific cookie * **/ static struct i40e_cloud_filter *i40e_find_cloud_filter(struct i40e_vsi *vsi, unsigned long *cookie) { struct i40e_cloud_filter *filter = NULL; struct hlist_node *node2; hlist_for_each_entry_safe(filter, node2, &vsi->back->cloud_filter_list, cloud_node) if (!memcmp(cookie, &filter->cookie, sizeof(filter->cookie))) return filter; return NULL; } /** * i40e_delete_clsflower - Remove tc flower filters * @vsi: Pointer to VSI * @cls_flower: Pointer to struct flow_cls_offload * **/ static int i40e_delete_clsflower(struct i40e_vsi *vsi, struct flow_cls_offload *cls_flower) { struct i40e_cloud_filter *filter = NULL; struct i40e_pf *pf = vsi->back; int err = 0; filter = i40e_find_cloud_filter(vsi, &cls_flower->cookie); if (!filter) return -EINVAL; hash_del(&filter->cloud_node); if (filter->dst_port) err = i40e_add_del_cloud_filter_big_buf(vsi, filter, false); else err = i40e_add_del_cloud_filter(vsi, filter, false); kfree(filter); if (err) { dev_err(&pf->pdev->dev, "Failed to delete cloud filter, err %pe\n", ERR_PTR(err)); return i40e_aq_rc_to_posix(err, pf->hw.aq.asq_last_status); } pf->num_cloud_filters--; if (!pf->num_cloud_filters) if ((pf->flags & I40E_FLAG_FD_SB_TO_CLOUD_FILTER) && !(pf->flags & I40E_FLAG_FD_SB_INACTIVE)) { pf->flags |= I40E_FLAG_FD_SB_ENABLED; pf->flags &= ~I40E_FLAG_FD_SB_TO_CLOUD_FILTER; pf->flags &= ~I40E_FLAG_FD_SB_INACTIVE; } return 0; } /** * i40e_setup_tc_cls_flower - flower classifier offloads * @np: net device to configure * @cls_flower: offload data **/ static int i40e_setup_tc_cls_flower(struct i40e_netdev_priv *np, struct flow_cls_offload *cls_flower) { struct i40e_vsi *vsi = np->vsi; switch (cls_flower->command) { case FLOW_CLS_REPLACE: return i40e_configure_clsflower(vsi, cls_flower); case FLOW_CLS_DESTROY: return i40e_delete_clsflower(vsi, cls_flower); case FLOW_CLS_STATS: return -EOPNOTSUPP; default: return -EOPNOTSUPP; } } static int i40e_setup_tc_block_cb(enum tc_setup_type type, void *type_data, void *cb_priv) { struct i40e_netdev_priv *np = cb_priv; if (!tc_cls_can_offload_and_chain0(np->vsi->netdev, type_data)) return -EOPNOTSUPP; switch (type) { case TC_SETUP_CLSFLOWER: return i40e_setup_tc_cls_flower(np, type_data); default: return -EOPNOTSUPP; } } static LIST_HEAD(i40e_block_cb_list); static int __i40e_setup_tc(struct net_device *netdev, enum tc_setup_type type, void *type_data) { struct i40e_netdev_priv *np = netdev_priv(netdev); switch (type) { case TC_SETUP_QDISC_MQPRIO: return i40e_setup_tc(netdev, type_data); case TC_SETUP_BLOCK: return flow_block_cb_setup_simple(type_data, &i40e_block_cb_list, i40e_setup_tc_block_cb, np, np, true); default: return -EOPNOTSUPP; } } /** * i40e_open - Called when a network interface is made active * @netdev: network interface device structure * * The open entry point is called when a network interface is made * active by the system (IFF_UP). At this point all resources needed * for transmit and receive operations are allocated, the interrupt * handler is registered with the OS, the netdev watchdog subtask is * enabled, and the stack is notified that the interface is ready. * * Returns 0 on success, negative value on failure **/ int i40e_open(struct net_device *netdev) { struct i40e_netdev_priv *np = netdev_priv(netdev); struct i40e_vsi *vsi = np->vsi; struct i40e_pf *pf = vsi->back; int err; /* disallow open during test or if eeprom is broken */ if (test_bit(__I40E_TESTING, pf->state) || test_bit(__I40E_BAD_EEPROM, pf->state)) return -EBUSY; netif_carrier_off(netdev); if (i40e_force_link_state(pf, true)) return -EAGAIN; err = i40e_vsi_open(vsi); if (err) return err; /* configure global TSO hardware offload settings */ wr32(&pf->hw, I40E_GLLAN_TSOMSK_F, be32_to_cpu(TCP_FLAG_PSH | TCP_FLAG_FIN) >> 16); wr32(&pf->hw, I40E_GLLAN_TSOMSK_M, be32_to_cpu(TCP_FLAG_PSH | TCP_FLAG_FIN | TCP_FLAG_CWR) >> 16); wr32(&pf->hw, I40E_GLLAN_TSOMSK_L, be32_to_cpu(TCP_FLAG_CWR) >> 16); udp_tunnel_get_rx_info(netdev); return 0; } /** * i40e_netif_set_realnum_tx_rx_queues - Update number of tx/rx queues * @vsi: vsi structure * * This updates netdev's number of tx/rx queues * * Returns status of setting tx/rx queues **/ static int i40e_netif_set_realnum_tx_rx_queues(struct i40e_vsi *vsi) { int ret; ret = netif_set_real_num_rx_queues(vsi->netdev, vsi->num_queue_pairs); if (ret) return ret; return netif_set_real_num_tx_queues(vsi->netdev, vsi->num_queue_pairs); } /** * i40e_vsi_open - * @vsi: the VSI to open * * Finish initialization of the VSI. * * Returns 0 on success, negative value on failure * * Note: expects to be called while under rtnl_lock() **/ int i40e_vsi_open(struct i40e_vsi *vsi) { struct i40e_pf *pf = vsi->back; char int_name[I40E_INT_NAME_STR_LEN]; int err; /* allocate descriptors */ err = i40e_vsi_setup_tx_resources(vsi); if (err) goto err_setup_tx; err = i40e_vsi_setup_rx_resources(vsi); if (err) goto err_setup_rx; err = i40e_vsi_configure(vsi); if (err) goto err_setup_rx; if (vsi->netdev) { snprintf(int_name, sizeof(int_name) - 1, "%s-%s", dev_driver_string(&pf->pdev->dev), vsi->netdev->name); err = i40e_vsi_request_irq(vsi, int_name); if (err) goto err_setup_rx; /* Notify the stack of the actual queue counts. */ err = i40e_netif_set_realnum_tx_rx_queues(vsi); if (err) goto err_set_queues; } else if (vsi->type == I40E_VSI_FDIR) { snprintf(int_name, sizeof(int_name) - 1, "%s-%s:fdir", dev_driver_string(&pf->pdev->dev), dev_name(&pf->pdev->dev)); err = i40e_vsi_request_irq(vsi, int_name); if (err) goto err_setup_rx; } else { err = -EINVAL; goto err_setup_rx; } err = i40e_up_complete(vsi); if (err) goto err_up_complete; return 0; err_up_complete: i40e_down(vsi); err_set_queues: i40e_vsi_free_irq(vsi); err_setup_rx: i40e_vsi_free_rx_resources(vsi); err_setup_tx: i40e_vsi_free_tx_resources(vsi); if (vsi == pf->vsi[pf->lan_vsi]) i40e_do_reset(pf, I40E_PF_RESET_FLAG, true); return err; } /** * i40e_fdir_filter_exit - Cleans up the Flow Director accounting * @pf: Pointer to PF * * This function destroys the hlist where all the Flow Director * filters were saved. **/ static void i40e_fdir_filter_exit(struct i40e_pf *pf) { struct i40e_fdir_filter *filter; struct i40e_flex_pit *pit_entry, *tmp; struct hlist_node *node2; hlist_for_each_entry_safe(filter, node2, &pf->fdir_filter_list, fdir_node) { hlist_del(&filter->fdir_node); kfree(filter); } list_for_each_entry_safe(pit_entry, tmp, &pf->l3_flex_pit_list, list) { list_del(&pit_entry->list); kfree(pit_entry); } INIT_LIST_HEAD(&pf->l3_flex_pit_list); list_for_each_entry_safe(pit_entry, tmp, &pf->l4_flex_pit_list, list) { list_del(&pit_entry->list); kfree(pit_entry); } INIT_LIST_HEAD(&pf->l4_flex_pit_list); pf->fdir_pf_active_filters = 0; i40e_reset_fdir_filter_cnt(pf); /* Reprogram the default input set for TCP/IPv4 */ i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_NONF_IPV4_TCP, I40E_L3_SRC_MASK | I40E_L3_DST_MASK | I40E_L4_SRC_MASK | I40E_L4_DST_MASK); /* Reprogram the default input set for TCP/IPv6 */ i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_NONF_IPV6_TCP, I40E_L3_V6_SRC_MASK | I40E_L3_V6_DST_MASK | I40E_L4_SRC_MASK | I40E_L4_DST_MASK); /* Reprogram the default input set for UDP/IPv4 */ i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_NONF_IPV4_UDP, I40E_L3_SRC_MASK | I40E_L3_DST_MASK | I40E_L4_SRC_MASK | I40E_L4_DST_MASK); /* Reprogram the default input set for UDP/IPv6 */ i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_NONF_IPV6_UDP, I40E_L3_V6_SRC_MASK | I40E_L3_V6_DST_MASK | I40E_L4_SRC_MASK | I40E_L4_DST_MASK); /* Reprogram the default input set for SCTP/IPv4 */ i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_NONF_IPV4_SCTP, I40E_L3_SRC_MASK | I40E_L3_DST_MASK | I40E_L4_SRC_MASK | I40E_L4_DST_MASK); /* Reprogram the default input set for SCTP/IPv6 */ i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_NONF_IPV6_SCTP, I40E_L3_V6_SRC_MASK | I40E_L3_V6_DST_MASK | I40E_L4_SRC_MASK | I40E_L4_DST_MASK); /* Reprogram the default input set for Other/IPv4 */ i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_NONF_IPV4_OTHER, I40E_L3_SRC_MASK | I40E_L3_DST_MASK); i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_FRAG_IPV4, I40E_L3_SRC_MASK | I40E_L3_DST_MASK); /* Reprogram the default input set for Other/IPv6 */ i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_NONF_IPV6_OTHER, I40E_L3_SRC_MASK | I40E_L3_DST_MASK); i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_FRAG_IPV6, I40E_L3_SRC_MASK | I40E_L3_DST_MASK); } /** * i40e_cloud_filter_exit - Cleans up the cloud filters * @pf: Pointer to PF * * This function destroys the hlist where all the cloud filters * were saved. **/ static void i40e_cloud_filter_exit(struct i40e_pf *pf) { struct i40e_cloud_filter *cfilter; struct hlist_node *node; hlist_for_each_entry_safe(cfilter, node, &pf->cloud_filter_list, cloud_node) { hlist_del(&cfilter->cloud_node); kfree(cfilter); } pf->num_cloud_filters = 0; if ((pf->flags & I40E_FLAG_FD_SB_TO_CLOUD_FILTER) && !(pf->flags & I40E_FLAG_FD_SB_INACTIVE)) { pf->flags |= I40E_FLAG_FD_SB_ENABLED; pf->flags &= ~I40E_FLAG_FD_SB_TO_CLOUD_FILTER; pf->flags &= ~I40E_FLAG_FD_SB_INACTIVE; } } /** * i40e_close - Disables a network interface * @netdev: network interface device structure * * The close entry point is called when an interface is de-activated * by the OS. The hardware is still under the driver's control, but * this netdev interface is disabled. * * Returns 0, this is not allowed to fail **/ int i40e_close(struct net_device *netdev) { struct i40e_netdev_priv *np = netdev_priv(netdev); struct i40e_vsi *vsi = np->vsi; i40e_vsi_close(vsi); return 0; } /** * i40e_do_reset - Start a PF or Core Reset sequence * @pf: board private structure * @reset_flags: which reset is requested * @lock_acquired: indicates whether or not the lock has been acquired * before this function was called. * * The essential difference in resets is that the PF Reset * doesn't clear the packet buffers, doesn't reset the PE * firmware, and doesn't bother the other PFs on the chip. **/ void i40e_do_reset(struct i40e_pf *pf, u32 reset_flags, bool lock_acquired) { u32 val; /* do the biggest reset indicated */ if (reset_flags & BIT_ULL(__I40E_GLOBAL_RESET_REQUESTED)) { /* Request a Global Reset * * This will start the chip's countdown to the actual full * chip reset event, and a warning interrupt to be sent * to all PFs, including the requestor. Our handler * for the warning interrupt will deal with the shutdown * and recovery of the switch setup. */ dev_dbg(&pf->pdev->dev, "GlobalR requested\n"); val = rd32(&pf->hw, I40E_GLGEN_RTRIG); val |= I40E_GLGEN_RTRIG_GLOBR_MASK; wr32(&pf->hw, I40E_GLGEN_RTRIG, val); } else if (reset_flags & BIT_ULL(__I40E_CORE_RESET_REQUESTED)) { /* Request a Core Reset * * Same as Global Reset, except does *not* include the MAC/PHY */ dev_dbg(&pf->pdev->dev, "CoreR requested\n"); val = rd32(&pf->hw, I40E_GLGEN_RTRIG); val |= I40E_GLGEN_RTRIG_CORER_MASK; wr32(&pf->hw, I40E_GLGEN_RTRIG, val); i40e_flush(&pf->hw); } else if (reset_flags & I40E_PF_RESET_FLAG) { /* Request a PF Reset * * Resets only the PF-specific registers * * This goes directly to the tear-down and rebuild of * the switch, since we need to do all the recovery as * for the Core Reset. */ dev_dbg(&pf->pdev->dev, "PFR requested\n"); i40e_handle_reset_warning(pf, lock_acquired); } else if (reset_flags & I40E_PF_RESET_AND_REBUILD_FLAG) { /* Request a PF Reset * * Resets PF and reinitializes PFs VSI. */ i40e_prep_for_reset(pf); i40e_reset_and_rebuild(pf, true, lock_acquired); dev_info(&pf->pdev->dev, pf->flags & I40E_FLAG_DISABLE_FW_LLDP ? "FW LLDP is disabled\n" : "FW LLDP is enabled\n"); } else if (reset_flags & BIT_ULL(__I40E_REINIT_REQUESTED)) { int v; /* Find the VSI(s) that requested a re-init */ dev_info(&pf->pdev->dev, "VSI reinit requested\n"); for (v = 0; v < pf->num_alloc_vsi; v++) { struct i40e_vsi *vsi = pf->vsi[v]; if (vsi != NULL && test_and_clear_bit(__I40E_VSI_REINIT_REQUESTED, vsi->state)) i40e_vsi_reinit_locked(pf->vsi[v]); } } else if (reset_flags & BIT_ULL(__I40E_DOWN_REQUESTED)) { int v; /* Find the VSI(s) that needs to be brought down */ dev_info(&pf->pdev->dev, "VSI down requested\n"); for (v = 0; v < pf->num_alloc_vsi; v++) { struct i40e_vsi *vsi = pf->vsi[v]; if (vsi != NULL && test_and_clear_bit(__I40E_VSI_DOWN_REQUESTED, vsi->state)) { set_bit(__I40E_VSI_DOWN, vsi->state); i40e_down(vsi); } } } else { dev_info(&pf->pdev->dev, "bad reset request 0x%08x\n", reset_flags); } } #ifdef CONFIG_I40E_DCB /** * i40e_dcb_need_reconfig - Check if DCB needs reconfig * @pf: board private structure * @old_cfg: current DCB config * @new_cfg: new DCB config **/ bool i40e_dcb_need_reconfig(struct i40e_pf *pf, struct i40e_dcbx_config *old_cfg, struct i40e_dcbx_config *new_cfg) { bool need_reconfig = false; /* Check if ETS configuration has changed */ if (memcmp(&new_cfg->etscfg, &old_cfg->etscfg, sizeof(new_cfg->etscfg))) { /* If Priority Table has changed reconfig is needed */ if (memcmp(&new_cfg->etscfg.prioritytable, &old_cfg->etscfg.prioritytable, sizeof(new_cfg->etscfg.prioritytable))) { need_reconfig = true; dev_dbg(&pf->pdev->dev, "ETS UP2TC changed.\n"); } if (memcmp(&new_cfg->etscfg.tcbwtable, &old_cfg->etscfg.tcbwtable, sizeof(new_cfg->etscfg.tcbwtable))) dev_dbg(&pf->pdev->dev, "ETS TC BW Table changed.\n"); if (memcmp(&new_cfg->etscfg.tsatable, &old_cfg->etscfg.tsatable, sizeof(new_cfg->etscfg.tsatable))) dev_dbg(&pf->pdev->dev, "ETS TSA Table changed.\n"); } /* Check if PFC configuration has changed */ if (memcmp(&new_cfg->pfc, &old_cfg->pfc, sizeof(new_cfg->pfc))) { need_reconfig = true; dev_dbg(&pf->pdev->dev, "PFC config change detected.\n"); } /* Check if APP Table has changed */ if (memcmp(&new_cfg->app, &old_cfg->app, sizeof(new_cfg->app))) { need_reconfig = true; dev_dbg(&pf->pdev->dev, "APP Table change detected.\n"); } dev_dbg(&pf->pdev->dev, "dcb need_reconfig=%d\n", need_reconfig); return need_reconfig; } /** * i40e_handle_lldp_event - Handle LLDP Change MIB event * @pf: board private structure * @e: event info posted on ARQ **/ static int i40e_handle_lldp_event(struct i40e_pf *pf, struct i40e_arq_event_info *e) { struct i40e_aqc_lldp_get_mib *mib = (struct i40e_aqc_lldp_get_mib *)&e->desc.params.raw; struct i40e_hw *hw = &pf->hw; struct i40e_dcbx_config tmp_dcbx_cfg; bool need_reconfig = false; int ret = 0; u8 type; /* X710-T*L 2.5G and 5G speeds don't support DCB */ if (I40E_IS_X710TL_DEVICE(hw->device_id) && (hw->phy.link_info.link_speed & ~(I40E_LINK_SPEED_2_5GB | I40E_LINK_SPEED_5GB)) && !(pf->flags & I40E_FLAG_DCB_CAPABLE)) /* let firmware decide if the DCB should be disabled */ pf->flags |= I40E_FLAG_DCB_CAPABLE; /* Not DCB capable or capability disabled */ if (!(pf->flags & I40E_FLAG_DCB_CAPABLE)) return ret; /* Ignore if event is not for Nearest Bridge */ type = ((mib->type >> I40E_AQ_LLDP_BRIDGE_TYPE_SHIFT) & I40E_AQ_LLDP_BRIDGE_TYPE_MASK); dev_dbg(&pf->pdev->dev, "LLDP event mib bridge type 0x%x\n", type); if (type != I40E_AQ_LLDP_BRIDGE_TYPE_NEAREST_BRIDGE) return ret; /* Check MIB Type and return if event for Remote MIB update */ type = mib->type & I40E_AQ_LLDP_MIB_TYPE_MASK; dev_dbg(&pf->pdev->dev, "LLDP event mib type %s\n", type ? "remote" : "local"); if (type == I40E_AQ_LLDP_MIB_REMOTE) { /* Update the remote cached instance and return */ ret = i40e_aq_get_dcb_config(hw, I40E_AQ_LLDP_MIB_REMOTE, I40E_AQ_LLDP_BRIDGE_TYPE_NEAREST_BRIDGE, &hw->remote_dcbx_config); goto exit; } /* Store the old configuration */ tmp_dcbx_cfg = hw->local_dcbx_config; /* Reset the old DCBx configuration data */ memset(&hw->local_dcbx_config, 0, sizeof(hw->local_dcbx_config)); /* Get updated DCBX data from firmware */ ret = i40e_get_dcb_config(&pf->hw); if (ret) { /* X710-T*L 2.5G and 5G speeds don't support DCB */ if (I40E_IS_X710TL_DEVICE(hw->device_id) && (hw->phy.link_info.link_speed & (I40E_LINK_SPEED_2_5GB | I40E_LINK_SPEED_5GB))) { dev_warn(&pf->pdev->dev, "DCB is not supported for X710-T*L 2.5/5G speeds\n"); pf->flags &= ~I40E_FLAG_DCB_CAPABLE; } else { dev_info(&pf->pdev->dev, "Failed querying DCB configuration data from firmware, err %pe aq_err %s\n", ERR_PTR(ret), i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); } goto exit; } /* No change detected in DCBX configs */ if (!memcmp(&tmp_dcbx_cfg, &hw->local_dcbx_config, sizeof(tmp_dcbx_cfg))) { dev_dbg(&pf->pdev->dev, "No change detected in DCBX configuration.\n"); goto exit; } need_reconfig = i40e_dcb_need_reconfig(pf, &tmp_dcbx_cfg, &hw->local_dcbx_config); i40e_dcbnl_flush_apps(pf, &tmp_dcbx_cfg, &hw->local_dcbx_config); if (!need_reconfig) goto exit; /* Enable DCB tagging only when more than one TC */ if (i40e_dcb_get_num_tc(&hw->local_dcbx_config) > 1) pf->flags |= I40E_FLAG_DCB_ENABLED; else pf->flags &= ~I40E_FLAG_DCB_ENABLED; set_bit(__I40E_PORT_SUSPENDED, pf->state); /* Reconfiguration needed quiesce all VSIs */ i40e_pf_quiesce_all_vsi(pf); /* Changes in configuration update VEB/VSI */ i40e_dcb_reconfigure(pf); ret = i40e_resume_port_tx(pf); clear_bit(__I40E_PORT_SUSPENDED, pf->state); /* In case of error no point in resuming VSIs */ if (ret) goto exit; /* Wait for the PF's queues to be disabled */ ret = i40e_pf_wait_queues_disabled(pf); if (ret) { /* Schedule PF reset to recover */ set_bit(__I40E_PF_RESET_REQUESTED, pf->state); i40e_service_event_schedule(pf); } else { i40e_pf_unquiesce_all_vsi(pf); set_bit(__I40E_CLIENT_SERVICE_REQUESTED, pf->state); set_bit(__I40E_CLIENT_L2_CHANGE, pf->state); } exit: return ret; } #endif /* CONFIG_I40E_DCB */ /** * i40e_do_reset_safe - Protected reset path for userland calls. * @pf: board private structure * @reset_flags: which reset is requested * **/ void i40e_do_reset_safe(struct i40e_pf *pf, u32 reset_flags) { rtnl_lock(); i40e_do_reset(pf, reset_flags, true); rtnl_unlock(); } /** * i40e_handle_lan_overflow_event - Handler for LAN queue overflow event * @pf: board private structure * @e: event info posted on ARQ * * Handler for LAN Queue Overflow Event generated by the firmware for PF * and VF queues **/ static void i40e_handle_lan_overflow_event(struct i40e_pf *pf, struct i40e_arq_event_info *e) { struct i40e_aqc_lan_overflow *data = (struct i40e_aqc_lan_overflow *)&e->desc.params.raw; u32 queue = le32_to_cpu(data->prtdcb_rupto); u32 qtx_ctl = le32_to_cpu(data->otx_ctl); struct i40e_hw *hw = &pf->hw; struct i40e_vf *vf; u16 vf_id; dev_dbg(&pf->pdev->dev, "overflow Rx Queue Number = %d QTX_CTL=0x%08x\n", queue, qtx_ctl); /* Queue belongs to VF, find the VF and issue VF reset */ if (((qtx_ctl & I40E_QTX_CTL_PFVF_Q_MASK) >> I40E_QTX_CTL_PFVF_Q_SHIFT) == I40E_QTX_CTL_VF_QUEUE) { vf_id = (u16)((qtx_ctl & I40E_QTX_CTL_VFVM_INDX_MASK) >> I40E_QTX_CTL_VFVM_INDX_SHIFT); vf_id -= hw->func_caps.vf_base_id; vf = &pf->vf[vf_id]; i40e_vc_notify_vf_reset(vf); /* Allow VF to process pending reset notification */ msleep(20); i40e_reset_vf(vf, false); } } /** * i40e_get_cur_guaranteed_fd_count - Get the consumed guaranteed FD filters * @pf: board private structure **/ u32 i40e_get_cur_guaranteed_fd_count(struct i40e_pf *pf) { u32 val, fcnt_prog; val = rd32(&pf->hw, I40E_PFQF_FDSTAT); fcnt_prog = (val & I40E_PFQF_FDSTAT_GUARANT_CNT_MASK); return fcnt_prog; } /** * i40e_get_current_fd_count - Get total FD filters programmed for this PF * @pf: board private structure **/ u32 i40e_get_current_fd_count(struct i40e_pf *pf) { u32 val, fcnt_prog; val = rd32(&pf->hw, I40E_PFQF_FDSTAT); fcnt_prog = (val & I40E_PFQF_FDSTAT_GUARANT_CNT_MASK) + ((val & I40E_PFQF_FDSTAT_BEST_CNT_MASK) >> I40E_PFQF_FDSTAT_BEST_CNT_SHIFT); return fcnt_prog; } /** * i40e_get_global_fd_count - Get total FD filters programmed on device * @pf: board private structure **/ u32 i40e_get_global_fd_count(struct i40e_pf *pf) { u32 val, fcnt_prog; val = rd32(&pf->hw, I40E_GLQF_FDCNT_0); fcnt_prog = (val & I40E_GLQF_FDCNT_0_GUARANT_CNT_MASK) + ((val & I40E_GLQF_FDCNT_0_BESTCNT_MASK) >> I40E_GLQF_FDCNT_0_BESTCNT_SHIFT); return fcnt_prog; } /** * i40e_reenable_fdir_sb - Restore FDir SB capability * @pf: board private structure **/ static void i40e_reenable_fdir_sb(struct i40e_pf *pf) { if (test_and_clear_bit(__I40E_FD_SB_AUTO_DISABLED, pf->state)) if ((pf->flags & I40E_FLAG_FD_SB_ENABLED) && (I40E_DEBUG_FD & pf->hw.debug_mask)) dev_info(&pf->pdev->dev, "FD Sideband/ntuple is being enabled since we have space in the table now\n"); } /** * i40e_reenable_fdir_atr - Restore FDir ATR capability * @pf: board private structure **/ static void i40e_reenable_fdir_atr(struct i40e_pf *pf) { if (test_and_clear_bit(__I40E_FD_ATR_AUTO_DISABLED, pf->state)) { /* ATR uses the same filtering logic as SB rules. It only * functions properly if the input set mask is at the default * settings. It is safe to restore the default input set * because there are no active TCPv4 filter rules. */ i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_NONF_IPV4_TCP, I40E_L3_SRC_MASK | I40E_L3_DST_MASK | I40E_L4_SRC_MASK | I40E_L4_DST_MASK); if ((pf->flags & I40E_FLAG_FD_ATR_ENABLED) && (I40E_DEBUG_FD & pf->hw.debug_mask)) dev_info(&pf->pdev->dev, "ATR is being enabled since we have space in the table and there are no conflicting ntuple rules\n"); } } /** * i40e_delete_invalid_filter - Delete an invalid FDIR filter * @pf: board private structure * @filter: FDir filter to remove */ static void i40e_delete_invalid_filter(struct i40e_pf *pf, struct i40e_fdir_filter *filter) { /* Update counters */ pf->fdir_pf_active_filters--; pf->fd_inv = 0; switch (filter->flow_type) { case TCP_V4_FLOW: pf->fd_tcp4_filter_cnt--; break; case UDP_V4_FLOW: pf->fd_udp4_filter_cnt--; break; case SCTP_V4_FLOW: pf->fd_sctp4_filter_cnt--; break; case TCP_V6_FLOW: pf->fd_tcp6_filter_cnt--; break; case UDP_V6_FLOW: pf->fd_udp6_filter_cnt--; break; case SCTP_V6_FLOW: pf->fd_udp6_filter_cnt--; break; case IP_USER_FLOW: switch (filter->ipl4_proto) { case IPPROTO_TCP: pf->fd_tcp4_filter_cnt--; break; case IPPROTO_UDP: pf->fd_udp4_filter_cnt--; break; case IPPROTO_SCTP: pf->fd_sctp4_filter_cnt--; break; case IPPROTO_IP: pf->fd_ip4_filter_cnt--; break; } break; case IPV6_USER_FLOW: switch (filter->ipl4_proto) { case IPPROTO_TCP: pf->fd_tcp6_filter_cnt--; break; case IPPROTO_UDP: pf->fd_udp6_filter_cnt--; break; case IPPROTO_SCTP: pf->fd_sctp6_filter_cnt--; break; case IPPROTO_IP: pf->fd_ip6_filter_cnt--; break; } break; } /* Remove the filter from the list and free memory */ hlist_del(&filter->fdir_node); kfree(filter); } /** * i40e_fdir_check_and_reenable - Function to reenabe FD ATR or SB if disabled * @pf: board private structure **/ void i40e_fdir_check_and_reenable(struct i40e_pf *pf) { struct i40e_fdir_filter *filter; u32 fcnt_prog, fcnt_avail; struct hlist_node *node; if (test_bit(__I40E_FD_FLUSH_REQUESTED, pf->state)) return; /* Check if we have enough room to re-enable FDir SB capability. */ fcnt_prog = i40e_get_global_fd_count(pf); fcnt_avail = pf->fdir_pf_filter_count; if ((fcnt_prog < (fcnt_avail - I40E_FDIR_BUFFER_HEAD_ROOM)) || (pf->fd_add_err == 0) || (i40e_get_current_atr_cnt(pf) < pf->fd_atr_cnt)) i40e_reenable_fdir_sb(pf); /* We should wait for even more space before re-enabling ATR. * Additionally, we cannot enable ATR as long as we still have TCP SB * rules active. */ if ((fcnt_prog < (fcnt_avail - I40E_FDIR_BUFFER_HEAD_ROOM_FOR_ATR)) && pf->fd_tcp4_filter_cnt == 0 && pf->fd_tcp6_filter_cnt == 0) i40e_reenable_fdir_atr(pf); /* if hw had a problem adding a filter, delete it */ if (pf->fd_inv > 0) { hlist_for_each_entry_safe(filter, node, &pf->fdir_filter_list, fdir_node) if (filter->fd_id == pf->fd_inv) i40e_delete_invalid_filter(pf, filter); } } #define I40E_MIN_FD_FLUSH_INTERVAL 10 #define I40E_MIN_FD_FLUSH_SB_ATR_UNSTABLE 30 /** * i40e_fdir_flush_and_replay - Function to flush all FD filters and replay SB * @pf: board private structure **/ static void i40e_fdir_flush_and_replay(struct i40e_pf *pf) { unsigned long min_flush_time; int flush_wait_retry = 50; bool disable_atr = false; int fd_room; int reg; if (!time_after(jiffies, pf->fd_flush_timestamp + (I40E_MIN_FD_FLUSH_INTERVAL * HZ))) return; /* If the flush is happening too quick and we have mostly SB rules we * should not re-enable ATR for some time. */ min_flush_time = pf->fd_flush_timestamp + (I40E_MIN_FD_FLUSH_SB_ATR_UNSTABLE * HZ); fd_room = pf->fdir_pf_filter_count - pf->fdir_pf_active_filters; if (!(time_after(jiffies, min_flush_time)) && (fd_room < I40E_FDIR_BUFFER_HEAD_ROOM_FOR_ATR)) { if (I40E_DEBUG_FD & pf->hw.debug_mask) dev_info(&pf->pdev->dev, "ATR disabled, not enough FD filter space.\n"); disable_atr = true; } pf->fd_flush_timestamp = jiffies; set_bit(__I40E_FD_ATR_AUTO_DISABLED, pf->state); /* flush all filters */ wr32(&pf->hw, I40E_PFQF_CTL_1, I40E_PFQF_CTL_1_CLEARFDTABLE_MASK); i40e_flush(&pf->hw); pf->fd_flush_cnt++; pf->fd_add_err = 0; do { /* Check FD flush status every 5-6msec */ usleep_range(5000, 6000); reg = rd32(&pf->hw, I40E_PFQF_CTL_1); if (!(reg & I40E_PFQF_CTL_1_CLEARFDTABLE_MASK)) break; } while (flush_wait_retry--); if (reg & I40E_PFQF_CTL_1_CLEARFDTABLE_MASK) { dev_warn(&pf->pdev->dev, "FD table did not flush, needs more time\n"); } else { /* replay sideband filters */ i40e_fdir_filter_restore(pf->vsi[pf->lan_vsi]); if (!disable_atr && !pf->fd_tcp4_filter_cnt) clear_bit(__I40E_FD_ATR_AUTO_DISABLED, pf->state); clear_bit(__I40E_FD_FLUSH_REQUESTED, pf->state); if (I40E_DEBUG_FD & pf->hw.debug_mask) dev_info(&pf->pdev->dev, "FD Filter table flushed and FD-SB replayed.\n"); } } /** * i40e_get_current_atr_cnt - Get the count of total FD ATR filters programmed * @pf: board private structure **/ u32 i40e_get_current_atr_cnt(struct i40e_pf *pf) { return i40e_get_current_fd_count(pf) - pf->fdir_pf_active_filters; } /** * i40e_fdir_reinit_subtask - Worker thread to reinit FDIR filter table * @pf: board private structure **/ static void i40e_fdir_reinit_subtask(struct i40e_pf *pf) { /* if interface is down do nothing */ if (test_bit(__I40E_DOWN, pf->state)) return; if (test_bit(__I40E_FD_FLUSH_REQUESTED, pf->state)) i40e_fdir_flush_and_replay(pf); i40e_fdir_check_and_reenable(pf); } /** * i40e_vsi_link_event - notify VSI of a link event * @vsi: vsi to be notified * @link_up: link up or down **/ static void i40e_vsi_link_event(struct i40e_vsi *vsi, bool link_up) { if (!vsi || test_bit(__I40E_VSI_DOWN, vsi->state)) return; switch (vsi->type) { case I40E_VSI_MAIN: if (!vsi->netdev || !vsi->netdev_registered) break; if (link_up) { netif_carrier_on(vsi->netdev); netif_tx_wake_all_queues(vsi->netdev); } else { netif_carrier_off(vsi->netdev); netif_tx_stop_all_queues(vsi->netdev); } break; case I40E_VSI_SRIOV: case I40E_VSI_VMDQ2: case I40E_VSI_CTRL: case I40E_VSI_IWARP: case I40E_VSI_MIRROR: default: /* there is no notification for other VSIs */ break; } } /** * i40e_veb_link_event - notify elements on the veb of a link event * @veb: veb to be notified * @link_up: link up or down **/ static void i40e_veb_link_event(struct i40e_veb *veb, bool link_up) { struct i40e_pf *pf; int i; if (!veb || !veb->pf) return; pf = veb->pf; /* depth first... */ for (i = 0; i < I40E_MAX_VEB; i++) if (pf->veb[i] && (pf->veb[i]->uplink_seid == veb->seid)) i40e_veb_link_event(pf->veb[i], link_up); /* ... now the local VSIs */ for (i = 0; i < pf->num_alloc_vsi; i++) if (pf->vsi[i] && (pf->vsi[i]->uplink_seid == veb->seid)) i40e_vsi_link_event(pf->vsi[i], link_up); } /** * i40e_link_event - Update netif_carrier status * @pf: board private structure **/ static void i40e_link_event(struct i40e_pf *pf) { struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi]; u8 new_link_speed, old_link_speed; bool new_link, old_link; int status; #ifdef CONFIG_I40E_DCB int err; #endif /* CONFIG_I40E_DCB */ /* set this to force the get_link_status call to refresh state */ pf->hw.phy.get_link_info = true; old_link = (pf->hw.phy.link_info_old.link_info & I40E_AQ_LINK_UP); status = i40e_get_link_status(&pf->hw, &new_link); /* On success, disable temp link polling */ if (status == 0) { clear_bit(__I40E_TEMP_LINK_POLLING, pf->state); } else { /* Enable link polling temporarily until i40e_get_link_status * returns 0 */ set_bit(__I40E_TEMP_LINK_POLLING, pf->state); dev_dbg(&pf->pdev->dev, "couldn't get link state, status: %d\n", status); return; } old_link_speed = pf->hw.phy.link_info_old.link_speed; new_link_speed = pf->hw.phy.link_info.link_speed; if (new_link == old_link && new_link_speed == old_link_speed && (test_bit(__I40E_VSI_DOWN, vsi->state) || new_link == netif_carrier_ok(vsi->netdev))) return; i40e_print_link_message(vsi, new_link); /* Notify the base of the switch tree connected to * the link. Floating VEBs are not notified. */ if (pf->lan_veb < I40E_MAX_VEB && pf->veb[pf->lan_veb]) i40e_veb_link_event(pf->veb[pf->lan_veb], new_link); else i40e_vsi_link_event(vsi, new_link); if (pf->vf) i40e_vc_notify_link_state(pf); if (pf->flags & I40E_FLAG_PTP) i40e_ptp_set_increment(pf); #ifdef CONFIG_I40E_DCB if (new_link == old_link) return; /* Not SW DCB so firmware will take care of default settings */ if (pf->dcbx_cap & DCB_CAP_DCBX_LLD_MANAGED) return; /* We cover here only link down, as after link up in case of SW DCB * SW LLDP agent will take care of setting it up */ if (!new_link) { dev_dbg(&pf->pdev->dev, "Reconfig DCB to single TC as result of Link Down\n"); memset(&pf->tmp_cfg, 0, sizeof(pf->tmp_cfg)); err = i40e_dcb_sw_default_config(pf); if (err) { pf->flags &= ~(I40E_FLAG_DCB_CAPABLE | I40E_FLAG_DCB_ENABLED); } else { pf->dcbx_cap = DCB_CAP_DCBX_HOST | DCB_CAP_DCBX_VER_IEEE; pf->flags |= I40E_FLAG_DCB_CAPABLE; pf->flags &= ~I40E_FLAG_DCB_ENABLED; } } #endif /* CONFIG_I40E_DCB */ } /** * i40e_watchdog_subtask - periodic checks not using event driven response * @pf: board private structure **/ static void i40e_watchdog_subtask(struct i40e_pf *pf) { int i; /* if interface is down do nothing */ if (test_bit(__I40E_DOWN, pf->state) || test_bit(__I40E_CONFIG_BUSY, pf->state)) return; /* make sure we don't do these things too often */ if (time_before(jiffies, (pf->service_timer_previous + pf->service_timer_period))) return; pf->service_timer_previous = jiffies; if ((pf->flags & I40E_FLAG_LINK_POLLING_ENABLED) || test_bit(__I40E_TEMP_LINK_POLLING, pf->state)) i40e_link_event(pf); /* Update the stats for active netdevs so the network stack * can look at updated numbers whenever it cares to */ for (i = 0; i < pf->num_alloc_vsi; i++) if (pf->vsi[i] && pf->vsi[i]->netdev) i40e_update_stats(pf->vsi[i]); if (pf->flags & I40E_FLAG_VEB_STATS_ENABLED) { /* Update the stats for the active switching components */ for (i = 0; i < I40E_MAX_VEB; i++) if (pf->veb[i]) i40e_update_veb_stats(pf->veb[i]); } i40e_ptp_rx_hang(pf); i40e_ptp_tx_hang(pf); } /** * i40e_reset_subtask - Set up for resetting the device and driver * @pf: board private structure **/ static void i40e_reset_subtask(struct i40e_pf *pf) { u32 reset_flags = 0; if (test_bit(__I40E_REINIT_REQUESTED, pf->state)) { reset_flags |= BIT(__I40E_REINIT_REQUESTED); clear_bit(__I40E_REINIT_REQUESTED, pf->state); } if (test_bit(__I40E_PF_RESET_REQUESTED, pf->state)) { reset_flags |= BIT(__I40E_PF_RESET_REQUESTED); clear_bit(__I40E_PF_RESET_REQUESTED, pf->state); } if (test_bit(__I40E_CORE_RESET_REQUESTED, pf->state)) { reset_flags |= BIT(__I40E_CORE_RESET_REQUESTED); clear_bit(__I40E_CORE_RESET_REQUESTED, pf->state); } if (test_bit(__I40E_GLOBAL_RESET_REQUESTED, pf->state)) { reset_flags |= BIT(__I40E_GLOBAL_RESET_REQUESTED); clear_bit(__I40E_GLOBAL_RESET_REQUESTED, pf->state); } if (test_bit(__I40E_DOWN_REQUESTED, pf->state)) { reset_flags |= BIT(__I40E_DOWN_REQUESTED); clear_bit(__I40E_DOWN_REQUESTED, pf->state); } /* If there's a recovery already waiting, it takes * precedence before starting a new reset sequence. */ if (test_bit(__I40E_RESET_INTR_RECEIVED, pf->state)) { i40e_prep_for_reset(pf); i40e_reset(pf); i40e_rebuild(pf, false, false); } /* If we're already down or resetting, just bail */ if (reset_flags && !test_bit(__I40E_DOWN, pf->state) && !test_bit(__I40E_CONFIG_BUSY, pf->state)) { i40e_do_reset(pf, reset_flags, false); } } /** * i40e_handle_link_event - Handle link event * @pf: board private structure * @e: event info posted on ARQ **/ static void i40e_handle_link_event(struct i40e_pf *pf, struct i40e_arq_event_info *e) { struct i40e_aqc_get_link_status *status = (struct i40e_aqc_get_link_status *)&e->desc.params.raw; /* Do a new status request to re-enable LSE reporting * and load new status information into the hw struct * This completely ignores any state information * in the ARQ event info, instead choosing to always * issue the AQ update link status command. */ i40e_link_event(pf); /* Check if module meets thermal requirements */ if (status->phy_type == I40E_PHY_TYPE_NOT_SUPPORTED_HIGH_TEMP) { dev_err(&pf->pdev->dev, "Rx/Tx is disabled on this device because the module does not meet thermal requirements.\n"); dev_err(&pf->pdev->dev, "Refer to the Intel(R) Ethernet Adapters and Devices User Guide for a list of supported modules.\n"); } else { /* check for unqualified module, if link is down, suppress * the message if link was forced to be down. */ if ((status->link_info & I40E_AQ_MEDIA_AVAILABLE) && (!(status->an_info & I40E_AQ_QUALIFIED_MODULE)) && (!(status->link_info & I40E_AQ_LINK_UP)) && (!(pf->flags & I40E_FLAG_LINK_DOWN_ON_CLOSE_ENABLED))) { dev_err(&pf->pdev->dev, "Rx/Tx is disabled on this device because an unsupported SFP module type was detected.\n"); dev_err(&pf->pdev->dev, "Refer to the Intel(R) Ethernet Adapters and Devices User Guide for a list of supported modules.\n"); } } } /** * i40e_clean_adminq_subtask - Clean the AdminQ rings * @pf: board private structure **/ static void i40e_clean_adminq_subtask(struct i40e_pf *pf) { struct i40e_arq_event_info event; struct i40e_hw *hw = &pf->hw; u16 pending, i = 0; u16 opcode; u32 oldval; int ret; u32 val; /* Do not run clean AQ when PF reset fails */ if (test_bit(__I40E_RESET_FAILED, pf->state)) return; /* check for error indications */ val = rd32(&pf->hw, pf->hw.aq.arq.len); oldval = val; if (val & I40E_PF_ARQLEN_ARQVFE_MASK) { if (hw->debug_mask & I40E_DEBUG_AQ) dev_info(&pf->pdev->dev, "ARQ VF Error detected\n"); val &= ~I40E_PF_ARQLEN_ARQVFE_MASK; } if (val & I40E_PF_ARQLEN_ARQOVFL_MASK) { if (hw->debug_mask & I40E_DEBUG_AQ) dev_info(&pf->pdev->dev, "ARQ Overflow Error detected\n"); val &= ~I40E_PF_ARQLEN_ARQOVFL_MASK; pf->arq_overflows++; } if (val & I40E_PF_ARQLEN_ARQCRIT_MASK) { if (hw->debug_mask & I40E_DEBUG_AQ) dev_info(&pf->pdev->dev, "ARQ Critical Error detected\n"); val &= ~I40E_PF_ARQLEN_ARQCRIT_MASK; } if (oldval != val) wr32(&pf->hw, pf->hw.aq.arq.len, val); val = rd32(&pf->hw, pf->hw.aq.asq.len); oldval = val; if (val & I40E_PF_ATQLEN_ATQVFE_MASK) { if (pf->hw.debug_mask & I40E_DEBUG_AQ) dev_info(&pf->pdev->dev, "ASQ VF Error detected\n"); val &= ~I40E_PF_ATQLEN_ATQVFE_MASK; } if (val & I40E_PF_ATQLEN_ATQOVFL_MASK) { if (pf->hw.debug_mask & I40E_DEBUG_AQ) dev_info(&pf->pdev->dev, "ASQ Overflow Error detected\n"); val &= ~I40E_PF_ATQLEN_ATQOVFL_MASK; } if (val & I40E_PF_ATQLEN_ATQCRIT_MASK) { if (pf->hw.debug_mask & I40E_DEBUG_AQ) dev_info(&pf->pdev->dev, "ASQ Critical Error detected\n"); val &= ~I40E_PF_ATQLEN_ATQCRIT_MASK; } if (oldval != val) wr32(&pf->hw, pf->hw.aq.asq.len, val); event.buf_len = I40E_MAX_AQ_BUF_SIZE; event.msg_buf = kzalloc(event.buf_len, GFP_KERNEL); if (!event.msg_buf) return; do { ret = i40e_clean_arq_element(hw, &event, &pending); if (ret == -EALREADY) break; else if (ret) { dev_info(&pf->pdev->dev, "ARQ event error %d\n", ret); break; } opcode = le16_to_cpu(event.desc.opcode); switch (opcode) { case i40e_aqc_opc_get_link_status: rtnl_lock(); i40e_handle_link_event(pf, &event); rtnl_unlock(); break; case i40e_aqc_opc_send_msg_to_pf: ret = i40e_vc_process_vf_msg(pf, le16_to_cpu(event.desc.retval), le32_to_cpu(event.desc.cookie_high), le32_to_cpu(event.desc.cookie_low), event.msg_buf, event.msg_len); break; case i40e_aqc_opc_lldp_update_mib: dev_dbg(&pf->pdev->dev, "ARQ: Update LLDP MIB event received\n"); #ifdef CONFIG_I40E_DCB rtnl_lock(); i40e_handle_lldp_event(pf, &event); rtnl_unlock(); #endif /* CONFIG_I40E_DCB */ break; case i40e_aqc_opc_event_lan_overflow: dev_dbg(&pf->pdev->dev, "ARQ LAN queue overflow event received\n"); i40e_handle_lan_overflow_event(pf, &event); break; case i40e_aqc_opc_send_msg_to_peer: dev_info(&pf->pdev->dev, "ARQ: Msg from other pf\n"); break; case i40e_aqc_opc_nvm_erase: case i40e_aqc_opc_nvm_update: case i40e_aqc_opc_oem_post_update: i40e_debug(&pf->hw, I40E_DEBUG_NVM, "ARQ NVM operation 0x%04x completed\n", opcode); break; default: dev_info(&pf->pdev->dev, "ARQ: Unknown event 0x%04x ignored\n", opcode); break; } } while (i++ < pf->adminq_work_limit); if (i < pf->adminq_work_limit) clear_bit(__I40E_ADMINQ_EVENT_PENDING, pf->state); /* re-enable Admin queue interrupt cause */ val = rd32(hw, I40E_PFINT_ICR0_ENA); val |= I40E_PFINT_ICR0_ENA_ADMINQ_MASK; wr32(hw, I40E_PFINT_ICR0_ENA, val); i40e_flush(hw); kfree(event.msg_buf); } /** * i40e_verify_eeprom - make sure eeprom is good to use * @pf: board private structure **/ static void i40e_verify_eeprom(struct i40e_pf *pf) { int err; err = i40e_diag_eeprom_test(&pf->hw); if (err) { /* retry in case of garbage read */ err = i40e_diag_eeprom_test(&pf->hw); if (err) { dev_info(&pf->pdev->dev, "eeprom check failed (%d), Tx/Rx traffic disabled\n", err); set_bit(__I40E_BAD_EEPROM, pf->state); } } if (!err && test_bit(__I40E_BAD_EEPROM, pf->state)) { dev_info(&pf->pdev->dev, "eeprom check passed, Tx/Rx traffic enabled\n"); clear_bit(__I40E_BAD_EEPROM, pf->state); } } /** * i40e_enable_pf_switch_lb * @pf: pointer to the PF structure * * enable switch loop back or die - no point in a return value **/ static void i40e_enable_pf_switch_lb(struct i40e_pf *pf) { struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi]; struct i40e_vsi_context ctxt; int ret; ctxt.seid = pf->main_vsi_seid; ctxt.pf_num = pf->hw.pf_id; ctxt.vf_num = 0; ret = i40e_aq_get_vsi_params(&pf->hw, &ctxt, NULL); if (ret) { dev_info(&pf->pdev->dev, "couldn't get PF vsi config, err %pe aq_err %s\n", ERR_PTR(ret), i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); return; } ctxt.flags = I40E_AQ_VSI_TYPE_PF; ctxt.info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID); ctxt.info.switch_id |= cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB); ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL); if (ret) { dev_info(&pf->pdev->dev, "update vsi switch failed, err %pe aq_err %s\n", ERR_PTR(ret), i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); } } /** * i40e_disable_pf_switch_lb * @pf: pointer to the PF structure * * disable switch loop back or die - no point in a return value **/ static void i40e_disable_pf_switch_lb(struct i40e_pf *pf) { struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi]; struct i40e_vsi_context ctxt; int ret; ctxt.seid = pf->main_vsi_seid; ctxt.pf_num = pf->hw.pf_id; ctxt.vf_num = 0; ret = i40e_aq_get_vsi_params(&pf->hw, &ctxt, NULL); if (ret) { dev_info(&pf->pdev->dev, "couldn't get PF vsi config, err %pe aq_err %s\n", ERR_PTR(ret), i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); return; } ctxt.flags = I40E_AQ_VSI_TYPE_PF; ctxt.info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID); ctxt.info.switch_id &= ~cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB); ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL); if (ret) { dev_info(&pf->pdev->dev, "update vsi switch failed, err %pe aq_err %s\n", ERR_PTR(ret), i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); } } /** * i40e_config_bridge_mode - Configure the HW bridge mode * @veb: pointer to the bridge instance * * Configure the loop back mode for the LAN VSI that is downlink to the * specified HW bridge instance. It is expected this function is called * when a new HW bridge is instantiated. **/ static void i40e_config_bridge_mode(struct i40e_veb *veb) { struct i40e_pf *pf = veb->pf; if (pf->hw.debug_mask & I40E_DEBUG_LAN) dev_info(&pf->pdev->dev, "enabling bridge mode: %s\n", veb->bridge_mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB"); if (veb->bridge_mode & BRIDGE_MODE_VEPA) i40e_disable_pf_switch_lb(pf); else i40e_enable_pf_switch_lb(pf); } /** * i40e_reconstitute_veb - rebuild the VEB and anything connected to it * @veb: pointer to the VEB instance * * This is a recursive function that first builds the attached VSIs then * recurses in to build the next layer of VEB. We track the connections * through our own index numbers because the seid's from the HW could * change across the reset. **/ static int i40e_reconstitute_veb(struct i40e_veb *veb) { struct i40e_vsi *ctl_vsi = NULL; struct i40e_pf *pf = veb->pf; int v, veb_idx; int ret; /* build VSI that owns this VEB, temporarily attached to base VEB */ for (v = 0; v < pf->num_alloc_vsi && !ctl_vsi; v++) { if (pf->vsi[v] && pf->vsi[v]->veb_idx == veb->idx && pf->vsi[v]->flags & I40E_VSI_FLAG_VEB_OWNER) { ctl_vsi = pf->vsi[v]; break; } } if (!ctl_vsi) { dev_info(&pf->pdev->dev, "missing owner VSI for veb_idx %d\n", veb->idx); ret = -ENOENT; goto end_reconstitute; } if (ctl_vsi != pf->vsi[pf->lan_vsi]) ctl_vsi->uplink_seid = pf->vsi[pf->lan_vsi]->uplink_seid; ret = i40e_add_vsi(ctl_vsi); if (ret) { dev_info(&pf->pdev->dev, "rebuild of veb_idx %d owner VSI failed: %d\n", veb->idx, ret); goto end_reconstitute; } i40e_vsi_reset_stats(ctl_vsi); /* create the VEB in the switch and move the VSI onto the VEB */ ret = i40e_add_veb(veb, ctl_vsi); if (ret) goto end_reconstitute; if (pf->flags & I40E_FLAG_VEB_MODE_ENABLED) veb->bridge_mode = BRIDGE_MODE_VEB; else veb->bridge_mode = BRIDGE_MODE_VEPA; i40e_config_bridge_mode(veb); /* create the remaining VSIs attached to this VEB */ for (v = 0; v < pf->num_alloc_vsi; v++) { if (!pf->vsi[v] || pf->vsi[v] == ctl_vsi) continue; if (pf->vsi[v]->veb_idx == veb->idx) { struct i40e_vsi *vsi = pf->vsi[v]; vsi->uplink_seid = veb->seid; ret = i40e_add_vsi(vsi); if (ret) { dev_info(&pf->pdev->dev, "rebuild of vsi_idx %d failed: %d\n", v, ret); goto end_reconstitute; } i40e_vsi_reset_stats(vsi); } } /* create any VEBs attached to this VEB - RECURSION */ for (veb_idx = 0; veb_idx < I40E_MAX_VEB; veb_idx++) { if (pf->veb[veb_idx] && pf->veb[veb_idx]->veb_idx == veb->idx) { pf->veb[veb_idx]->uplink_seid = veb->seid; ret = i40e_reconstitute_veb(pf->veb[veb_idx]); if (ret) break; } } end_reconstitute: return ret; } /** * i40e_get_capabilities - get info about the HW * @pf: the PF struct * @list_type: AQ capability to be queried **/ static int i40e_get_capabilities(struct i40e_pf *pf, enum i40e_admin_queue_opc list_type) { struct i40e_aqc_list_capabilities_element_resp *cap_buf; u16 data_size; int buf_len; int err; buf_len = 40 * sizeof(struct i40e_aqc_list_capabilities_element_resp); do { cap_buf = kzalloc(buf_len, GFP_KERNEL); if (!cap_buf) return -ENOMEM; /* this loads the data into the hw struct for us */ err = i40e_aq_discover_capabilities(&pf->hw, cap_buf, buf_len, &data_size, list_type, NULL); /* data loaded, buffer no longer needed */ kfree(cap_buf); if (pf->hw.aq.asq_last_status == I40E_AQ_RC_ENOMEM) { /* retry with a larger buffer */ buf_len = data_size; } else if (pf->hw.aq.asq_last_status != I40E_AQ_RC_OK || err) { dev_info(&pf->pdev->dev, "capability discovery failed, err %pe aq_err %s\n", ERR_PTR(err), i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); return -ENODEV; } } while (err); if (pf->hw.debug_mask & I40E_DEBUG_USER) { if (list_type == i40e_aqc_opc_list_func_capabilities) { dev_info(&pf->pdev->dev, "pf=%d, num_vfs=%d, msix_pf=%d, msix_vf=%d, fd_g=%d, fd_b=%d, pf_max_q=%d num_vsi=%d\n", pf->hw.pf_id, pf->hw.func_caps.num_vfs, pf->hw.func_caps.num_msix_vectors, pf->hw.func_caps.num_msix_vectors_vf, pf->hw.func_caps.fd_filters_guaranteed, pf->hw.func_caps.fd_filters_best_effort, pf->hw.func_caps.num_tx_qp, pf->hw.func_caps.num_vsis); } else if (list_type == i40e_aqc_opc_list_dev_capabilities) { dev_info(&pf->pdev->dev, "switch_mode=0x%04x, function_valid=0x%08x\n", pf->hw.dev_caps.switch_mode, pf->hw.dev_caps.valid_functions); dev_info(&pf->pdev->dev, "SR-IOV=%d, num_vfs for all function=%u\n", pf->hw.dev_caps.sr_iov_1_1, pf->hw.dev_caps.num_vfs); dev_info(&pf->pdev->dev, "num_vsis=%u, num_rx:%u, num_tx=%u\n", pf->hw.dev_caps.num_vsis, pf->hw.dev_caps.num_rx_qp, pf->hw.dev_caps.num_tx_qp); } } if (list_type == i40e_aqc_opc_list_func_capabilities) { #define DEF_NUM_VSI (1 + (pf->hw.func_caps.fcoe ? 1 : 0) \ + pf->hw.func_caps.num_vfs) if (pf->hw.revision_id == 0 && pf->hw.func_caps.num_vsis < DEF_NUM_VSI) { dev_info(&pf->pdev->dev, "got num_vsis %d, setting num_vsis to %d\n", pf->hw.func_caps.num_vsis, DEF_NUM_VSI); pf->hw.func_caps.num_vsis = DEF_NUM_VSI; } } return 0; } static int i40e_vsi_clear(struct i40e_vsi *vsi); /** * i40e_fdir_sb_setup - initialize the Flow Director resources for Sideband * @pf: board private structure **/ static void i40e_fdir_sb_setup(struct i40e_pf *pf) { struct i40e_vsi *vsi; /* quick workaround for an NVM issue that leaves a critical register * uninitialized */ if (!rd32(&pf->hw, I40E_GLQF_HKEY(0))) { static const u32 hkey[] = { 0xe640d33f, 0xcdfe98ab, 0x73fa7161, 0x0d7a7d36, 0xeacb7d61, 0xaa4f05b6, 0x9c5c89ed, 0xfc425ddb, 0xa4654832, 0xfc7461d4, 0x8f827619, 0xf5c63c21, 0x95b3a76d}; int i; for (i = 0; i <= I40E_GLQF_HKEY_MAX_INDEX; i++) wr32(&pf->hw, I40E_GLQF_HKEY(i), hkey[i]); } if (!(pf->flags & I40E_FLAG_FD_SB_ENABLED)) return; /* find existing VSI and see if it needs configuring */ vsi = i40e_find_vsi_by_type(pf, I40E_VSI_FDIR); /* create a new VSI if none exists */ if (!vsi) { vsi = i40e_vsi_setup(pf, I40E_VSI_FDIR, pf->vsi[pf->lan_vsi]->seid, 0); if (!vsi) { dev_info(&pf->pdev->dev, "Couldn't create FDir VSI\n"); pf->flags &= ~I40E_FLAG_FD_SB_ENABLED; pf->flags |= I40E_FLAG_FD_SB_INACTIVE; return; } } i40e_vsi_setup_irqhandler(vsi, i40e_fdir_clean_ring); } /** * i40e_fdir_teardown - release the Flow Director resources * @pf: board private structure **/ static void i40e_fdir_teardown(struct i40e_pf *pf) { struct i40e_vsi *vsi; i40e_fdir_filter_exit(pf); vsi = i40e_find_vsi_by_type(pf, I40E_VSI_FDIR); if (vsi) i40e_vsi_release(vsi); } /** * i40e_rebuild_cloud_filters - Rebuilds cloud filters for VSIs * @vsi: PF main vsi * @seid: seid of main or channel VSIs * * Rebuilds cloud filters associated with main VSI and channel VSIs if they * existed before reset **/ static int i40e_rebuild_cloud_filters(struct i40e_vsi *vsi, u16 seid) { struct i40e_cloud_filter *cfilter; struct i40e_pf *pf = vsi->back; struct hlist_node *node; int ret; /* Add cloud filters back if they exist */ hlist_for_each_entry_safe(cfilter, node, &pf->cloud_filter_list, cloud_node) { if (cfilter->seid != seid) continue; if (cfilter->dst_port) ret = i40e_add_del_cloud_filter_big_buf(vsi, cfilter, true); else ret = i40e_add_del_cloud_filter(vsi, cfilter, true); if (ret) { dev_dbg(&pf->pdev->dev, "Failed to rebuild cloud filter, err %pe aq_err %s\n", ERR_PTR(ret), i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); return ret; } } return 0; } /** * i40e_rebuild_channels - Rebuilds channel VSIs if they existed before reset * @vsi: PF main vsi * * Rebuilds channel VSIs if they existed before reset **/ static int i40e_rebuild_channels(struct i40e_vsi *vsi) { struct i40e_channel *ch, *ch_tmp; int ret; if (list_empty(&vsi->ch_list)) return 0; list_for_each_entry_safe(ch, ch_tmp, &vsi->ch_list, list) { if (!ch->initialized) break; /* Proceed with creation of channel (VMDq2) VSI */ ret = i40e_add_channel(vsi->back, vsi->uplink_seid, ch); if (ret) { dev_info(&vsi->back->pdev->dev, "failed to rebuild channels using uplink_seid %u\n", vsi->uplink_seid); return ret; } /* Reconfigure TX queues using QTX_CTL register */ ret = i40e_channel_config_tx_ring(vsi->back, vsi, ch); if (ret) { dev_info(&vsi->back->pdev->dev, "failed to configure TX rings for channel %u\n", ch->seid); return ret; } /* update 'next_base_queue' */ vsi->next_base_queue = vsi->next_base_queue + ch->num_queue_pairs; if (ch->max_tx_rate) { u64 credits = ch->max_tx_rate; if (i40e_set_bw_limit(vsi, ch->seid, ch->max_tx_rate)) return -EINVAL; do_div(credits, I40E_BW_CREDIT_DIVISOR); dev_dbg(&vsi->back->pdev->dev, "Set tx rate of %llu Mbps (count of 50Mbps %llu) for vsi->seid %u\n", ch->max_tx_rate, credits, ch->seid); } ret = i40e_rebuild_cloud_filters(vsi, ch->seid); if (ret) { dev_dbg(&vsi->back->pdev->dev, "Failed to rebuild cloud filters for channel VSI %u\n", ch->seid); return ret; } } return 0; } /** * i40e_clean_xps_state - clean xps state for every tx_ring * @vsi: ptr to the VSI **/ static void i40e_clean_xps_state(struct i40e_vsi *vsi) { int i; if (vsi->tx_rings) for (i = 0; i < vsi->num_queue_pairs; i++) if (vsi->tx_rings[i]) clear_bit(__I40E_TX_XPS_INIT_DONE, vsi->tx_rings[i]->state); } /** * i40e_prep_for_reset - prep for the core to reset * @pf: board private structure * * Close up the VFs and other things in prep for PF Reset. **/ static void i40e_prep_for_reset(struct i40e_pf *pf) { struct i40e_hw *hw = &pf->hw; int ret = 0; u32 v; clear_bit(__I40E_RESET_INTR_RECEIVED, pf->state); if (test_and_set_bit(__I40E_RESET_RECOVERY_PENDING, pf->state)) return; if (i40e_check_asq_alive(&pf->hw)) i40e_vc_notify_reset(pf); dev_dbg(&pf->pdev->dev, "Tearing down internal switch for reset\n"); /* quiesce the VSIs and their queues that are not already DOWN */ i40e_pf_quiesce_all_vsi(pf); for (v = 0; v < pf->num_alloc_vsi; v++) { if (pf->vsi[v]) { i40e_clean_xps_state(pf->vsi[v]); pf->vsi[v]->seid = 0; } } i40e_shutdown_adminq(&pf->hw); /* call shutdown HMC */ if (hw->hmc.hmc_obj) { ret = i40e_shutdown_lan_hmc(hw); if (ret) dev_warn(&pf->pdev->dev, "shutdown_lan_hmc failed: %d\n", ret); } /* Save the current PTP time so that we can restore the time after the * reset completes. */ i40e_ptp_save_hw_time(pf); } /** * i40e_send_version - update firmware with driver version * @pf: PF struct */ static void i40e_send_version(struct i40e_pf *pf) { struct i40e_driver_version dv; dv.major_version = 0xff; dv.minor_version = 0xff; dv.build_version = 0xff; dv.subbuild_version = 0; strscpy(dv.driver_string, UTS_RELEASE, sizeof(dv.driver_string)); i40e_aq_send_driver_version(&pf->hw, &dv, NULL); } /** * i40e_get_oem_version - get OEM specific version information * @hw: pointer to the hardware structure **/ static void i40e_get_oem_version(struct i40e_hw *hw) { u16 block_offset = 0xffff; u16 block_length = 0; u16 capabilities = 0; u16 gen_snap = 0; u16 release = 0; #define I40E_SR_NVM_OEM_VERSION_PTR 0x1B #define I40E_NVM_OEM_LENGTH_OFFSET 0x00 #define I40E_NVM_OEM_CAPABILITIES_OFFSET 0x01 #define I40E_NVM_OEM_GEN_OFFSET 0x02 #define I40E_NVM_OEM_RELEASE_OFFSET 0x03 #define I40E_NVM_OEM_CAPABILITIES_MASK 0x000F #define I40E_NVM_OEM_LENGTH 3 /* Check if pointer to OEM version block is valid. */ i40e_read_nvm_word(hw, I40E_SR_NVM_OEM_VERSION_PTR, &block_offset); if (block_offset == 0xffff) return; /* Check if OEM version block has correct length. */ i40e_read_nvm_word(hw, block_offset + I40E_NVM_OEM_LENGTH_OFFSET, &block_length); if (block_length < I40E_NVM_OEM_LENGTH) return; /* Check if OEM version format is as expected. */ i40e_read_nvm_word(hw, block_offset + I40E_NVM_OEM_CAPABILITIES_OFFSET, &capabilities); if ((capabilities & I40E_NVM_OEM_CAPABILITIES_MASK) != 0) return; i40e_read_nvm_word(hw, block_offset + I40E_NVM_OEM_GEN_OFFSET, &gen_snap); i40e_read_nvm_word(hw, block_offset + I40E_NVM_OEM_RELEASE_OFFSET, &release); hw->nvm.oem_ver = (gen_snap << I40E_OEM_SNAP_SHIFT) | release; hw->nvm.eetrack = I40E_OEM_EETRACK_ID; } /** * i40e_reset - wait for core reset to finish reset, reset pf if corer not seen * @pf: board private structure **/ static int i40e_reset(struct i40e_pf *pf) { struct i40e_hw *hw = &pf->hw; int ret; ret = i40e_pf_reset(hw); if (ret) { dev_info(&pf->pdev->dev, "PF reset failed, %d\n", ret); set_bit(__I40E_RESET_FAILED, pf->state); clear_bit(__I40E_RESET_RECOVERY_PENDING, pf->state); } else { pf->pfr_count++; } return ret; } /** * i40e_rebuild - rebuild using a saved config * @pf: board private structure * @reinit: if the Main VSI needs to re-initialized. * @lock_acquired: indicates whether or not the lock has been acquired * before this function was called. **/ static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired) { const bool is_recovery_mode_reported = i40e_check_recovery_mode(pf); struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi]; struct i40e_hw *hw = &pf->hw; int ret; u32 val; int v; if (test_bit(__I40E_EMP_RESET_INTR_RECEIVED, pf->state) && is_recovery_mode_reported) i40e_set_ethtool_ops(pf->vsi[pf->lan_vsi]->netdev); if (test_bit(__I40E_DOWN, pf->state) && !test_bit(__I40E_RECOVERY_MODE, pf->state)) goto clear_recovery; dev_dbg(&pf->pdev->dev, "Rebuilding internal switch\n"); /* rebuild the basics for the AdminQ, HMC, and initial HW switch */ ret = i40e_init_adminq(&pf->hw); if (ret) { dev_info(&pf->pdev->dev, "Rebuild AdminQ failed, err %pe aq_err %s\n", ERR_PTR(ret), i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); goto clear_recovery; } i40e_get_oem_version(&pf->hw); if (test_and_clear_bit(__I40E_EMP_RESET_INTR_RECEIVED, pf->state)) { /* The following delay is necessary for firmware update. */ mdelay(1000); } /* re-verify the eeprom if we just had an EMP reset */ if (test_and_clear_bit(__I40E_EMP_RESET_INTR_RECEIVED, pf->state)) i40e_verify_eeprom(pf); /* if we are going out of or into recovery mode we have to act * accordingly with regard to resources initialization * and deinitialization */ if (test_bit(__I40E_RECOVERY_MODE, pf->state)) { if (i40e_get_capabilities(pf, i40e_aqc_opc_list_func_capabilities)) goto end_unlock; if (is_recovery_mode_reported) { /* we're staying in recovery mode so we'll reinitialize * misc vector here */ if (i40e_setup_misc_vector_for_recovery_mode(pf)) goto end_unlock; } else { if (!lock_acquired) rtnl_lock(); /* we're going out of recovery mode so we'll free * the IRQ allocated specifically for recovery mode * and restore the interrupt scheme */ free_irq(pf->pdev->irq, pf); i40e_clear_interrupt_scheme(pf); if (i40e_restore_interrupt_scheme(pf)) goto end_unlock; } /* tell the firmware that we're starting */ i40e_send_version(pf); /* bail out in case recovery mode was detected, as there is * no need for further configuration. */ goto end_unlock; } i40e_clear_pxe_mode(hw); ret = i40e_get_capabilities(pf, i40e_aqc_opc_list_func_capabilities); if (ret) goto end_core_reset; ret = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp, hw->func_caps.num_rx_qp, 0, 0); if (ret) { dev_info(&pf->pdev->dev, "init_lan_hmc failed: %d\n", ret); goto end_core_reset; } ret = i40e_configure_lan_hmc(hw, I40E_HMC_MODEL_DIRECT_ONLY); if (ret) { dev_info(&pf->pdev->dev, "configure_lan_hmc failed: %d\n", ret); goto end_core_reset; } #ifdef CONFIG_I40E_DCB /* Enable FW to write a default DCB config on link-up * unless I40E_FLAG_TC_MQPRIO was enabled or DCB * is not supported with new link speed */ if (i40e_is_tc_mqprio_enabled(pf)) { i40e_aq_set_dcb_parameters(hw, false, NULL); } else { if (I40E_IS_X710TL_DEVICE(hw->device_id) && (hw->phy.link_info.link_speed & (I40E_LINK_SPEED_2_5GB | I40E_LINK_SPEED_5GB))) { i40e_aq_set_dcb_parameters(hw, false, NULL); dev_warn(&pf->pdev->dev, "DCB is not supported for X710-T*L 2.5/5G speeds\n"); pf->flags &= ~I40E_FLAG_DCB_CAPABLE; } else { i40e_aq_set_dcb_parameters(hw, true, NULL); ret = i40e_init_pf_dcb(pf); if (ret) { dev_info(&pf->pdev->dev, "DCB init failed %d, disabled\n", ret); pf->flags &= ~I40E_FLAG_DCB_CAPABLE; /* Continue without DCB enabled */ } } } #endif /* CONFIG_I40E_DCB */ if (!lock_acquired) rtnl_lock(); ret = i40e_setup_pf_switch(pf, reinit, true); if (ret) goto end_unlock; /* The driver only wants link up/down and module qualification * reports from firmware. Note the negative logic. */ ret = i40e_aq_set_phy_int_mask(&pf->hw, ~(I40E_AQ_EVENT_LINK_UPDOWN | I40E_AQ_EVENT_MEDIA_NA | I40E_AQ_EVENT_MODULE_QUAL_FAIL), NULL); if (ret) dev_info(&pf->pdev->dev, "set phy mask fail, err %pe aq_err %s\n", ERR_PTR(ret), i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); /* Rebuild the VSIs and VEBs that existed before reset. * They are still in our local switch element arrays, so only * need to rebuild the switch model in the HW. * * If there were VEBs but the reconstitution failed, we'll try * to recover minimal use by getting the basic PF VSI working. */ if (vsi->uplink_seid != pf->mac_seid) { dev_dbg(&pf->pdev->dev, "attempting to rebuild switch\n"); /* find the one VEB connected to the MAC, and find orphans */ for (v = 0; v < I40E_MAX_VEB; v++) { if (!pf->veb[v]) continue; if (pf->veb[v]->uplink_seid == pf->mac_seid || pf->veb[v]->uplink_seid == 0) { ret = i40e_reconstitute_veb(pf->veb[v]); if (!ret) continue; /* If Main VEB failed, we're in deep doodoo, * so give up rebuilding the switch and set up * for minimal rebuild of PF VSI. * If orphan failed, we'll report the error * but try to keep going. */ if (pf->veb[v]->uplink_seid == pf->mac_seid) { dev_info(&pf->pdev->dev, "rebuild of switch failed: %d, will try to set up simple PF connection\n", ret); vsi->uplink_seid = pf->mac_seid; break; } else if (pf->veb[v]->uplink_seid == 0) { dev_info(&pf->pdev->dev, "rebuild of orphan VEB failed: %d\n", ret); } } } } if (vsi->uplink_seid == pf->mac_seid) { dev_dbg(&pf->pdev->dev, "attempting to rebuild PF VSI\n"); /* no VEB, so rebuild only the Main VSI */ ret = i40e_add_vsi(vsi); if (ret) { dev_info(&pf->pdev->dev, "rebuild of Main VSI failed: %d\n", ret); goto end_unlock; } } if (vsi->mqprio_qopt.max_rate[0]) { u64 max_tx_rate = i40e_bw_bytes_to_mbits(vsi, vsi->mqprio_qopt.max_rate[0]); u64 credits = 0; ret = i40e_set_bw_limit(vsi, vsi->seid, max_tx_rate); if (ret) goto end_unlock; credits = max_tx_rate; do_div(credits, I40E_BW_CREDIT_DIVISOR); dev_dbg(&vsi->back->pdev->dev, "Set tx rate of %llu Mbps (count of 50Mbps %llu) for vsi->seid %u\n", max_tx_rate, credits, vsi->seid); } ret = i40e_rebuild_cloud_filters(vsi, vsi->seid); if (ret) goto end_unlock; /* PF Main VSI is rebuild by now, go ahead and rebuild channel VSIs * for this main VSI if they exist */ ret = i40e_rebuild_channels(vsi); if (ret) goto end_unlock; /* Reconfigure hardware for allowing smaller MSS in the case * of TSO, so that we avoid the MDD being fired and causing * a reset in the case of small MSS+TSO. */ #define I40E_REG_MSS 0x000E64DC #define I40E_REG_MSS_MIN_MASK 0x3FF0000 #define I40E_64BYTE_MSS 0x400000 val = rd32(hw, I40E_REG_MSS); if ((val & I40E_REG_MSS_MIN_MASK) > I40E_64BYTE_MSS) { val &= ~I40E_REG_MSS_MIN_MASK; val |= I40E_64BYTE_MSS; wr32(hw, I40E_REG_MSS, val); } if (pf->hw_features & I40E_HW_RESTART_AUTONEG) { msleep(75); ret = i40e_aq_set_link_restart_an(&pf->hw, true, NULL); if (ret) dev_info(&pf->pdev->dev, "link restart failed, err %pe aq_err %s\n", ERR_PTR(ret), i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); } /* reinit the misc interrupt */ if (pf->flags & I40E_FLAG_MSIX_ENABLED) { ret = i40e_setup_misc_vector(pf); if (ret) goto end_unlock; } /* Add a filter to drop all Flow control frames from any VSI from being * transmitted. By doing so we stop a malicious VF from sending out * PAUSE or PFC frames and potentially controlling traffic for other * PF/VF VSIs. * The FW can still send Flow control frames if enabled. */ i40e_add_filter_to_drop_tx_flow_control_frames(&pf->hw, pf->main_vsi_seid); /* restart the VSIs that were rebuilt and running before the reset */ i40e_pf_unquiesce_all_vsi(pf); /* Release the RTNL lock before we start resetting VFs */ if (!lock_acquired) rtnl_unlock(); /* Restore promiscuous settings */ ret = i40e_set_promiscuous(pf, pf->cur_promisc); if (ret) dev_warn(&pf->pdev->dev, "Failed to restore promiscuous setting: %s, err %pe aq_err %s\n", pf->cur_promisc ? "on" : "off", ERR_PTR(ret), i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); i40e_reset_all_vfs(pf, true); /* tell the firmware that we're starting */ i40e_send_version(pf); /* We've already released the lock, so don't do it again */ goto end_core_reset; end_unlock: if (!lock_acquired) rtnl_unlock(); end_core_reset: clear_bit(__I40E_RESET_FAILED, pf->state); clear_recovery: clear_bit(__I40E_RESET_RECOVERY_PENDING, pf->state); clear_bit(__I40E_TIMEOUT_RECOVERY_PENDING, pf->state); } /** * i40e_reset_and_rebuild - reset and rebuild using a saved config * @pf: board private structure * @reinit: if the Main VSI needs to re-initialized. * @lock_acquired: indicates whether or not the lock has been acquired * before this function was called. **/ static void i40e_reset_and_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired) { int ret; if (test_bit(__I40E_IN_REMOVE, pf->state)) return; /* Now we wait for GRST to settle out. * We don't have to delete the VEBs or VSIs from the hw switch * because the reset will make them disappear. */ ret = i40e_reset(pf); if (!ret) i40e_rebuild(pf, reinit, lock_acquired); } /** * i40e_handle_reset_warning - prep for the PF to reset, reset and rebuild * @pf: board private structure * * Close up the VFs and other things in prep for a Core Reset, * then get ready to rebuild the world. * @lock_acquired: indicates whether or not the lock has been acquired * before this function was called. **/ static void i40e_handle_reset_warning(struct i40e_pf *pf, bool lock_acquired) { i40e_prep_for_reset(pf); i40e_reset_and_rebuild(pf, false, lock_acquired); } /** * i40e_handle_mdd_event * @pf: pointer to the PF structure * * Called from the MDD irq handler to identify possibly malicious vfs **/ static void i40e_handle_mdd_event(struct i40e_pf *pf) { struct i40e_hw *hw = &pf->hw; bool mdd_detected = false; struct i40e_vf *vf; u32 reg; int i; if (!test_bit(__I40E_MDD_EVENT_PENDING, pf->state)) return; /* find what triggered the MDD event */ reg = rd32(hw, I40E_GL_MDET_TX); if (reg & I40E_GL_MDET_TX_VALID_MASK) { u8 pf_num = (reg & I40E_GL_MDET_TX_PF_NUM_MASK) >> I40E_GL_MDET_TX_PF_NUM_SHIFT; u16 vf_num = (reg & I40E_GL_MDET_TX_VF_NUM_MASK) >> I40E_GL_MDET_TX_VF_NUM_SHIFT; u8 event = (reg & I40E_GL_MDET_TX_EVENT_MASK) >> I40E_GL_MDET_TX_EVENT_SHIFT; u16 queue = ((reg & I40E_GL_MDET_TX_QUEUE_MASK) >> I40E_GL_MDET_TX_QUEUE_SHIFT) - pf->hw.func_caps.base_queue; if (netif_msg_tx_err(pf)) dev_info(&pf->pdev->dev, "Malicious Driver Detection event 0x%02x on TX queue %d PF number 0x%02x VF number 0x%02x\n", event, queue, pf_num, vf_num); wr32(hw, I40E_GL_MDET_TX, 0xffffffff); mdd_detected = true; } reg = rd32(hw, I40E_GL_MDET_RX); if (reg & I40E_GL_MDET_RX_VALID_MASK) { u8 func = (reg & I40E_GL_MDET_RX_FUNCTION_MASK) >> I40E_GL_MDET_RX_FUNCTION_SHIFT; u8 event = (reg & I40E_GL_MDET_RX_EVENT_MASK) >> I40E_GL_MDET_RX_EVENT_SHIFT; u16 queue = ((reg & I40E_GL_MDET_RX_QUEUE_MASK) >> I40E_GL_MDET_RX_QUEUE_SHIFT) - pf->hw.func_caps.base_queue; if (netif_msg_rx_err(pf)) dev_info(&pf->pdev->dev, "Malicious Driver Detection event 0x%02x on RX queue %d of function 0x%02x\n", event, queue, func); wr32(hw, I40E_GL_MDET_RX, 0xffffffff); mdd_detected = true; } if (mdd_detected) { reg = rd32(hw, I40E_PF_MDET_TX); if (reg & I40E_PF_MDET_TX_VALID_MASK) { wr32(hw, I40E_PF_MDET_TX, 0xFFFF); dev_dbg(&pf->pdev->dev, "TX driver issue detected on PF\n"); } reg = rd32(hw, I40E_PF_MDET_RX); if (reg & I40E_PF_MDET_RX_VALID_MASK) { wr32(hw, I40E_PF_MDET_RX, 0xFFFF); dev_dbg(&pf->pdev->dev, "RX driver issue detected on PF\n"); } } /* see if one of the VFs needs its hand slapped */ for (i = 0; i < pf->num_alloc_vfs && mdd_detected; i++) { vf = &(pf->vf[i]); reg = rd32(hw, I40E_VP_MDET_TX(i)); if (reg & I40E_VP_MDET_TX_VALID_MASK) { wr32(hw, I40E_VP_MDET_TX(i), 0xFFFF); vf->num_mdd_events++; dev_info(&pf->pdev->dev, "TX driver issue detected on VF %d\n", i); dev_info(&pf->pdev->dev, "Use PF Control I/F to re-enable the VF\n"); set_bit(I40E_VF_STATE_DISABLED, &vf->vf_states); } reg = rd32(hw, I40E_VP_MDET_RX(i)); if (reg & I40E_VP_MDET_RX_VALID_MASK) { wr32(hw, I40E_VP_MDET_RX(i), 0xFFFF); vf->num_mdd_events++; dev_info(&pf->pdev->dev, "RX driver issue detected on VF %d\n", i); dev_info(&pf->pdev->dev, "Use PF Control I/F to re-enable the VF\n"); set_bit(I40E_VF_STATE_DISABLED, &vf->vf_states); } } /* re-enable mdd interrupt cause */ clear_bit(__I40E_MDD_EVENT_PENDING, pf->state); reg = rd32(hw, I40E_PFINT_ICR0_ENA); reg |= I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK; wr32(hw, I40E_PFINT_ICR0_ENA, reg); i40e_flush(hw); } /** * i40e_service_task - Run the driver's async subtasks * @work: pointer to work_struct containing our data **/ static void i40e_service_task(struct work_struct *work) { struct i40e_pf *pf = container_of(work, struct i40e_pf, service_task); unsigned long start_time = jiffies; /* don't bother with service tasks if a reset is in progress */ if (test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state) || test_bit(__I40E_SUSPENDED, pf->state)) return; if (test_and_set_bit(__I40E_SERVICE_SCHED, pf->state)) return; if (!test_bit(__I40E_RECOVERY_MODE, pf->state)) { i40e_detect_recover_hung(pf->vsi[pf->lan_vsi]); i40e_sync_filters_subtask(pf); i40e_reset_subtask(pf); i40e_handle_mdd_event(pf); i40e_vc_process_vflr_event(pf); i40e_watchdog_subtask(pf); i40e_fdir_reinit_subtask(pf); if (test_and_clear_bit(__I40E_CLIENT_RESET, pf->state)) { /* Client subtask will reopen next time through. */ i40e_notify_client_of_netdev_close(pf->vsi[pf->lan_vsi], true); } else { i40e_client_subtask(pf); if (test_and_clear_bit(__I40E_CLIENT_L2_CHANGE, pf->state)) i40e_notify_client_of_l2_param_changes( pf->vsi[pf->lan_vsi]); } i40e_sync_filters_subtask(pf); } else { i40e_reset_subtask(pf); } i40e_clean_adminq_subtask(pf); /* flush memory to make sure state is correct before next watchdog */ smp_mb__before_atomic(); clear_bit(__I40E_SERVICE_SCHED, pf->state); /* If the tasks have taken longer than one timer cycle or there * is more work to be done, reschedule the service task now * rather than wait for the timer to tick again. */ if (time_after(jiffies, (start_time + pf->service_timer_period)) || test_bit(__I40E_ADMINQ_EVENT_PENDING, pf->state) || test_bit(__I40E_MDD_EVENT_PENDING, pf->state) || test_bit(__I40E_VFLR_EVENT_PENDING, pf->state)) i40e_service_event_schedule(pf); } /** * i40e_service_timer - timer callback * @t: timer list pointer **/ static void i40e_service_timer(struct timer_list *t) { struct i40e_pf *pf = from_timer(pf, t, service_timer); mod_timer(&pf->service_timer, round_jiffies(jiffies + pf->service_timer_period)); i40e_service_event_schedule(pf); } /** * i40e_set_num_rings_in_vsi - Determine number of rings in the VSI * @vsi: the VSI being configured **/ static int i40e_set_num_rings_in_vsi(struct i40e_vsi *vsi) { struct i40e_pf *pf = vsi->back; switch (vsi->type) { case I40E_VSI_MAIN: vsi->alloc_queue_pairs = pf->num_lan_qps; if (!vsi->num_tx_desc) vsi->num_tx_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS, I40E_REQ_DESCRIPTOR_MULTIPLE); if (!vsi->num_rx_desc) vsi->num_rx_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS, I40E_REQ_DESCRIPTOR_MULTIPLE); if (pf->flags & I40E_FLAG_MSIX_ENABLED) vsi->num_q_vectors = pf->num_lan_msix; else vsi->num_q_vectors = 1; break; case I40E_VSI_FDIR: vsi->alloc_queue_pairs = 1; vsi->num_tx_desc = ALIGN(I40E_FDIR_RING_COUNT, I40E_REQ_DESCRIPTOR_MULTIPLE); vsi->num_rx_desc = ALIGN(I40E_FDIR_RING_COUNT, I40E_REQ_DESCRIPTOR_MULTIPLE); vsi->num_q_vectors = pf->num_fdsb_msix; break; case I40E_VSI_VMDQ2: vsi->alloc_queue_pairs = pf->num_vmdq_qps; if (!vsi->num_tx_desc) vsi->num_tx_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS, I40E_REQ_DESCRIPTOR_MULTIPLE); if (!vsi->num_rx_desc) vsi->num_rx_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS, I40E_REQ_DESCRIPTOR_MULTIPLE); vsi->num_q_vectors = pf->num_vmdq_msix; break; case I40E_VSI_SRIOV: vsi->alloc_queue_pairs = pf->num_vf_qps; if (!vsi->num_tx_desc) vsi->num_tx_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS, I40E_REQ_DESCRIPTOR_MULTIPLE); if (!vsi->num_rx_desc) vsi->num_rx_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS, I40E_REQ_DESCRIPTOR_MULTIPLE); break; default: WARN_ON(1); return -ENODATA; } if (is_kdump_kernel()) { vsi->num_tx_desc = I40E_MIN_NUM_DESCRIPTORS; vsi->num_rx_desc = I40E_MIN_NUM_DESCRIPTORS; } return 0; } /** * i40e_vsi_alloc_arrays - Allocate queue and vector pointer arrays for the vsi * @vsi: VSI pointer * @alloc_qvectors: a bool to specify if q_vectors need to be allocated. * * On error: returns error code (negative) * On success: returns 0 **/ static int i40e_vsi_alloc_arrays(struct i40e_vsi *vsi, bool alloc_qvectors) { struct i40e_ring **next_rings; int size; int ret = 0; /* allocate memory for both Tx, XDP Tx and Rx ring pointers */ size = sizeof(struct i40e_ring *) * vsi->alloc_queue_pairs * (i40e_enabled_xdp_vsi(vsi) ? 3 : 2); vsi->tx_rings = kzalloc(size, GFP_KERNEL); if (!vsi->tx_rings) return -ENOMEM; next_rings = vsi->tx_rings + vsi->alloc_queue_pairs; if (i40e_enabled_xdp_vsi(vsi)) { vsi->xdp_rings = next_rings; next_rings += vsi->alloc_queue_pairs; } vsi->rx_rings = next_rings; if (alloc_qvectors) { /* allocate memory for q_vector pointers */ size = sizeof(struct i40e_q_vector *) * vsi->num_q_vectors; vsi->q_vectors = kzalloc(size, GFP_KERNEL); if (!vsi->q_vectors) { ret = -ENOMEM; goto err_vectors; } } return ret; err_vectors: kfree(vsi->tx_rings); return ret; } /** * i40e_vsi_mem_alloc - Allocates the next available struct vsi in the PF * @pf: board private structure * @type: type of VSI * * On error: returns error code (negative) * On success: returns vsi index in PF (positive) **/ static int i40e_vsi_mem_alloc(struct i40e_pf *pf, enum i40e_vsi_type type) { int ret = -ENODEV; struct i40e_vsi *vsi; int vsi_idx; int i; /* Need to protect the allocation of the VSIs at the PF level */ mutex_lock(&pf->switch_mutex); /* VSI list may be fragmented if VSI creation/destruction has * been happening. We can afford to do a quick scan to look * for any free VSIs in the list. * * find next empty vsi slot, looping back around if necessary */ i = pf->next_vsi; while (i < pf->num_alloc_vsi && pf->vsi[i]) i++; if (i >= pf->num_alloc_vsi) { i = 0; while (i < pf->next_vsi && pf->vsi[i]) i++; } if (i < pf->num_alloc_vsi && !pf->vsi[i]) { vsi_idx = i; /* Found one! */ } else { ret = -ENODEV; goto unlock_pf; /* out of VSI slots! */ } pf->next_vsi = ++i; vsi = kzalloc(sizeof(*vsi), GFP_KERNEL); if (!vsi) { ret = -ENOMEM; goto unlock_pf; } vsi->type = type; vsi->back = pf; set_bit(__I40E_VSI_DOWN, vsi->state); vsi->flags = 0; vsi->idx = vsi_idx; vsi->int_rate_limit = 0; vsi->rss_table_size = (vsi->type == I40E_VSI_MAIN) ? pf->rss_table_size : 64; vsi->netdev_registered = false; vsi->work_limit = I40E_DEFAULT_IRQ_WORK; hash_init(vsi->mac_filter_hash); vsi->irqs_ready = false; if (type == I40E_VSI_MAIN) { vsi->af_xdp_zc_qps = bitmap_zalloc(pf->num_lan_qps, GFP_KERNEL); if (!vsi->af_xdp_zc_qps) goto err_rings; } ret = i40e_set_num_rings_in_vsi(vsi); if (ret) goto err_rings; ret = i40e_vsi_alloc_arrays(vsi, true); if (ret) goto err_rings; /* Setup default MSIX irq handler for VSI */ i40e_vsi_setup_irqhandler(vsi, i40e_msix_clean_rings); /* Initialize VSI lock */ spin_lock_init(&vsi->mac_filter_hash_lock); pf->vsi[vsi_idx] = vsi; ret = vsi_idx; goto unlock_pf; err_rings: bitmap_free(vsi->af_xdp_zc_qps); pf->next_vsi = i - 1; kfree(vsi); unlock_pf: mutex_unlock(&pf->switch_mutex); return ret; } /** * i40e_vsi_free_arrays - Free queue and vector pointer arrays for the VSI * @vsi: VSI pointer * @free_qvectors: a bool to specify if q_vectors need to be freed. * * On error: returns error code (negative) * On success: returns 0 **/ static void i40e_vsi_free_arrays(struct i40e_vsi *vsi, bool free_qvectors) { /* free the ring and vector containers */ if (free_qvectors) { kfree(vsi->q_vectors); vsi->q_vectors = NULL; } kfree(vsi->tx_rings); vsi->tx_rings = NULL; vsi->rx_rings = NULL; vsi->xdp_rings = NULL; } /** * i40e_clear_rss_config_user - clear the user configured RSS hash keys * and lookup table * @vsi: Pointer to VSI structure */ static void i40e_clear_rss_config_user(struct i40e_vsi *vsi) { if (!vsi) return; kfree(vsi->rss_hkey_user); vsi->rss_hkey_user = NULL; kfree(vsi->rss_lut_user); vsi->rss_lut_user = NULL; } /** * i40e_vsi_clear - Deallocate the VSI provided * @vsi: the VSI being un-configured **/ static int i40e_vsi_clear(struct i40e_vsi *vsi) { struct i40e_pf *pf; if (!vsi) return 0; if (!vsi->back) goto free_vsi; pf = vsi->back; mutex_lock(&pf->switch_mutex); if (!pf->vsi[vsi->idx]) { dev_err(&pf->pdev->dev, "pf->vsi[%d] is NULL, just free vsi[%d](type %d)\n", vsi->idx, vsi->idx, vsi->type); goto unlock_vsi; } if (pf->vsi[vsi->idx] != vsi) { dev_err(&pf->pdev->dev, "pf->vsi[%d](type %d) != vsi[%d](type %d): no free!\n", pf->vsi[vsi->idx]->idx, pf->vsi[vsi->idx]->type, vsi->idx, vsi->type); goto unlock_vsi; } /* updates the PF for this cleared vsi */ i40e_put_lump(pf->qp_pile, vsi->base_queue, vsi->idx); i40e_put_lump(pf->irq_pile, vsi->base_vector, vsi->idx); bitmap_free(vsi->af_xdp_zc_qps); i40e_vsi_free_arrays(vsi, true); i40e_clear_rss_config_user(vsi); pf->vsi[vsi->idx] = NULL; if (vsi->idx < pf->next_vsi) pf->next_vsi = vsi->idx; unlock_vsi: mutex_unlock(&pf->switch_mutex); free_vsi: kfree(vsi); return 0; } /** * i40e_vsi_clear_rings - Deallocates the Rx and Tx rings for the provided VSI * @vsi: the VSI being cleaned **/ static void i40e_vsi_clear_rings(struct i40e_vsi *vsi) { int i; if (vsi->tx_rings && vsi->tx_rings[0]) { for (i = 0; i < vsi->alloc_queue_pairs; i++) { kfree_rcu(vsi->tx_rings[i], rcu); WRITE_ONCE(vsi->tx_rings[i], NULL); WRITE_ONCE(vsi->rx_rings[i], NULL); if (vsi->xdp_rings) WRITE_ONCE(vsi->xdp_rings[i], NULL); } } } /** * i40e_alloc_rings - Allocates the Rx and Tx rings for the provided VSI * @vsi: the VSI being configured **/ static int i40e_alloc_rings(struct i40e_vsi *vsi) { int i, qpv = i40e_enabled_xdp_vsi(vsi) ? 3 : 2; struct i40e_pf *pf = vsi->back; struct i40e_ring *ring; /* Set basic values in the rings to be used later during open() */ for (i = 0; i < vsi->alloc_queue_pairs; i++) { /* allocate space for both Tx and Rx in one shot */ ring = kcalloc(qpv, sizeof(struct i40e_ring), GFP_KERNEL); if (!ring) goto err_out; ring->queue_index = i; ring->reg_idx = vsi->base_queue + i; ring->ring_active = false; ring->vsi = vsi; ring->netdev = vsi->netdev; ring->dev = &pf->pdev->dev; ring->count = vsi->num_tx_desc; ring->size = 0; ring->dcb_tc = 0; if (vsi->back->hw_features & I40E_HW_WB_ON_ITR_CAPABLE) ring->flags = I40E_TXR_FLAGS_WB_ON_ITR; ring->itr_setting = pf->tx_itr_default; WRITE_ONCE(vsi->tx_rings[i], ring++); if (!i40e_enabled_xdp_vsi(vsi)) goto setup_rx; ring->queue_index = vsi->alloc_queue_pairs + i; ring->reg_idx = vsi->base_queue + ring->queue_index; ring->ring_active = false; ring->vsi = vsi; ring->netdev = NULL; ring->dev = &pf->pdev->dev; ring->count = vsi->num_tx_desc; ring->size = 0; ring->dcb_tc = 0; if (vsi->back->hw_features & I40E_HW_WB_ON_ITR_CAPABLE) ring->flags = I40E_TXR_FLAGS_WB_ON_ITR; set_ring_xdp(ring); ring->itr_setting = pf->tx_itr_default; WRITE_ONCE(vsi->xdp_rings[i], ring++); setup_rx: ring->queue_index = i; ring->reg_idx = vsi->base_queue + i; ring->ring_active = false; ring->vsi = vsi; ring->netdev = vsi->netdev; ring->dev = &pf->pdev->dev; ring->count = vsi->num_rx_desc; ring->size = 0; ring->dcb_tc = 0; ring->itr_setting = pf->rx_itr_default; WRITE_ONCE(vsi->rx_rings[i], ring); } return 0; err_out: i40e_vsi_clear_rings(vsi); return -ENOMEM; } /** * i40e_reserve_msix_vectors - Reserve MSI-X vectors in the kernel * @pf: board private structure * @vectors: the number of MSI-X vectors to request * * Returns the number of vectors reserved, or error **/ static int i40e_reserve_msix_vectors(struct i40e_pf *pf, int vectors) { vectors = pci_enable_msix_range(pf->pdev, pf->msix_entries, I40E_MIN_MSIX, vectors); if (vectors < 0) { dev_info(&pf->pdev->dev, "MSI-X vector reservation failed: %d\n", vectors); vectors = 0; } return vectors; } /** * i40e_init_msix - Setup the MSIX capability * @pf: board private structure * * Work with the OS to set up the MSIX vectors needed. * * Returns the number of vectors reserved or negative on failure **/ static int i40e_init_msix(struct i40e_pf *pf) { struct i40e_hw *hw = &pf->hw; int cpus, extra_vectors; int vectors_left; int v_budget, i; int v_actual; int iwarp_requested = 0; if (!(pf->flags & I40E_FLAG_MSIX_ENABLED)) return -ENODEV; /* The number of vectors we'll request will be comprised of: * - Add 1 for "other" cause for Admin Queue events, etc. * - The number of LAN queue pairs * - Queues being used for RSS. * We don't need as many as max_rss_size vectors. * use rss_size instead in the calculation since that * is governed by number of cpus in the system. * - assumes symmetric Tx/Rx pairing * - The number of VMDq pairs * - The CPU count within the NUMA node if iWARP is enabled * Once we count this up, try the request. * * If we can't get what we want, we'll simplify to nearly nothing * and try again. If that still fails, we punt. */ vectors_left = hw->func_caps.num_msix_vectors; v_budget = 0; /* reserve one vector for miscellaneous handler */ if (vectors_left) { v_budget++; vectors_left--; } /* reserve some vectors for the main PF traffic queues. Initially we * only reserve at most 50% of the available vectors, in the case that * the number of online CPUs is large. This ensures that we can enable * extra features as well. Once we've enabled the other features, we * will use any remaining vectors to reach as close as we can to the * number of online CPUs. */ cpus = num_online_cpus(); pf->num_lan_msix = min_t(int, cpus, vectors_left / 2); vectors_left -= pf->num_lan_msix; /* reserve one vector for sideband flow director */ if (pf->flags & I40E_FLAG_FD_SB_ENABLED) { if (vectors_left) { pf->num_fdsb_msix = 1; v_budget++; vectors_left--; } else { pf->num_fdsb_msix = 0; } } /* can we reserve enough for iWARP? */ if (pf->flags & I40E_FLAG_IWARP_ENABLED) { iwarp_requested = pf->num_iwarp_msix; if (!vectors_left) pf->num_iwarp_msix = 0; else if (vectors_left < pf->num_iwarp_msix) pf->num_iwarp_msix = 1; v_budget += pf->num_iwarp_msix; vectors_left -= pf->num_iwarp_msix; } /* any vectors left over go for VMDq support */ if (pf->flags & I40E_FLAG_VMDQ_ENABLED) { if (!vectors_left) { pf->num_vmdq_msix = 0; pf->num_vmdq_qps = 0; } else { int vmdq_vecs_wanted = pf->num_vmdq_vsis * pf->num_vmdq_qps; int vmdq_vecs = min_t(int, vectors_left, vmdq_vecs_wanted); /* if we're short on vectors for what's desired, we limit * the queues per vmdq. If this is still more than are * available, the user will need to change the number of * queues/vectors used by the PF later with the ethtool * channels command */ if (vectors_left < vmdq_vecs_wanted) { pf->num_vmdq_qps = 1; vmdq_vecs_wanted = pf->num_vmdq_vsis; vmdq_vecs = min_t(int, vectors_left, vmdq_vecs_wanted); } pf->num_vmdq_msix = pf->num_vmdq_qps; v_budget += vmdq_vecs; vectors_left -= vmdq_vecs; } } /* On systems with a large number of SMP cores, we previously limited * the number of vectors for num_lan_msix to be at most 50% of the * available vectors, to allow for other features. Now, we add back * the remaining vectors. However, we ensure that the total * num_lan_msix will not exceed num_online_cpus(). To do this, we * calculate the number of vectors we can add without going over the * cap of CPUs. For systems with a small number of CPUs this will be * zero. */ extra_vectors = min_t(int, cpus - pf->num_lan_msix, vectors_left); pf->num_lan_msix += extra_vectors; vectors_left -= extra_vectors; WARN(vectors_left < 0, "Calculation of remaining vectors underflowed. This is an accounting bug when determining total MSI-X vectors.\n"); v_budget += pf->num_lan_msix; pf->msix_entries = kcalloc(v_budget, sizeof(struct msix_entry), GFP_KERNEL); if (!pf->msix_entries) return -ENOMEM; for (i = 0; i < v_budget; i++) pf->msix_entries[i].entry = i; v_actual = i40e_reserve_msix_vectors(pf, v_budget); if (v_actual < I40E_MIN_MSIX) { pf->flags &= ~I40E_FLAG_MSIX_ENABLED; kfree(pf->msix_entries); pf->msix_entries = NULL; pci_disable_msix(pf->pdev); return -ENODEV; } else if (v_actual == I40E_MIN_MSIX) { /* Adjust for minimal MSIX use */ pf->num_vmdq_vsis = 0; pf->num_vmdq_qps = 0; pf->num_lan_qps = 1; pf->num_lan_msix = 1; } else if (v_actual != v_budget) { /* If we have limited resources, we will start with no vectors * for the special features and then allocate vectors to some * of these features based on the policy and at the end disable * the features that did not get any vectors. */ int vec; dev_info(&pf->pdev->dev, "MSI-X vector limit reached with %d, wanted %d, attempting to redistribute vectors\n", v_actual, v_budget); /* reserve the misc vector */ vec = v_actual - 1; /* Scale vector usage down */ pf->num_vmdq_msix = 1; /* force VMDqs to only one vector */ pf->num_vmdq_vsis = 1; pf->num_vmdq_qps = 1; /* partition out the remaining vectors */ switch (vec) { case 2: pf->num_lan_msix = 1; break; case 3: if (pf->flags & I40E_FLAG_IWARP_ENABLED) { pf->num_lan_msix = 1; pf->num_iwarp_msix = 1; } else { pf->num_lan_msix = 2; } break; default: if (pf->flags & I40E_FLAG_IWARP_ENABLED) { pf->num_iwarp_msix = min_t(int, (vec / 3), iwarp_requested); pf->num_vmdq_vsis = min_t(int, (vec / 3), I40E_DEFAULT_NUM_VMDQ_VSI); } else { pf->num_vmdq_vsis = min_t(int, (vec / 2), I40E_DEFAULT_NUM_VMDQ_VSI); } if (pf->flags & I40E_FLAG_FD_SB_ENABLED) { pf->num_fdsb_msix = 1; vec--; } pf->num_lan_msix = min_t(int, (vec - (pf->num_iwarp_msix + pf->num_vmdq_vsis)), pf->num_lan_msix); pf->num_lan_qps = pf->num_lan_msix; break; } } if ((pf->flags & I40E_FLAG_FD_SB_ENABLED) && (pf->num_fdsb_msix == 0)) { dev_info(&pf->pdev->dev, "Sideband Flowdir disabled, not enough MSI-X vectors\n"); pf->flags &= ~I40E_FLAG_FD_SB_ENABLED; pf->flags |= I40E_FLAG_FD_SB_INACTIVE; } if ((pf->flags & I40E_FLAG_VMDQ_ENABLED) && (pf->num_vmdq_msix == 0)) { dev_info(&pf->pdev->dev, "VMDq disabled, not enough MSI-X vectors\n"); pf->flags &= ~I40E_FLAG_VMDQ_ENABLED; } if ((pf->flags & I40E_FLAG_IWARP_ENABLED) && (pf->num_iwarp_msix == 0)) { dev_info(&pf->pdev->dev, "IWARP disabled, not enough MSI-X vectors\n"); pf->flags &= ~I40E_FLAG_IWARP_ENABLED; } i40e_debug(&pf->hw, I40E_DEBUG_INIT, "MSI-X vector distribution: PF %d, VMDq %d, FDSB %d, iWARP %d\n", pf->num_lan_msix, pf->num_vmdq_msix * pf->num_vmdq_vsis, pf->num_fdsb_msix, pf->num_iwarp_msix); return v_actual; } /** * i40e_vsi_alloc_q_vector - Allocate memory for a single interrupt vector * @vsi: the VSI being configured * @v_idx: index of the vector in the vsi struct * * We allocate one q_vector. If allocation fails we return -ENOMEM. **/ static int i40e_vsi_alloc_q_vector(struct i40e_vsi *vsi, int v_idx) { struct i40e_q_vector *q_vector; /* allocate q_vector */ q_vector = kzalloc(sizeof(struct i40e_q_vector), GFP_KERNEL); if (!q_vector) return -ENOMEM; q_vector->vsi = vsi; q_vector->v_idx = v_idx; cpumask_copy(&q_vector->affinity_mask, cpu_possible_mask); if (vsi->netdev) netif_napi_add(vsi->netdev, &q_vector->napi, i40e_napi_poll); /* tie q_vector and vsi together */ vsi->q_vectors[v_idx] = q_vector; return 0; } /** * i40e_vsi_alloc_q_vectors - Allocate memory for interrupt vectors * @vsi: the VSI being configured * * We allocate one q_vector per queue interrupt. If allocation fails we * return -ENOMEM. **/ static int i40e_vsi_alloc_q_vectors(struct i40e_vsi *vsi) { struct i40e_pf *pf = vsi->back; int err, v_idx, num_q_vectors; /* if not MSIX, give the one vector only to the LAN VSI */ if (pf->flags & I40E_FLAG_MSIX_ENABLED) num_q_vectors = vsi->num_q_vectors; else if (vsi == pf->vsi[pf->lan_vsi]) num_q_vectors = 1; else return -EINVAL; for (v_idx = 0; v_idx < num_q_vectors; v_idx++) { err = i40e_vsi_alloc_q_vector(vsi, v_idx); if (err) goto err_out; } return 0; err_out: while (v_idx--) i40e_free_q_vector(vsi, v_idx); return err; } /** * i40e_init_interrupt_scheme - Determine proper interrupt scheme * @pf: board private structure to initialize **/ static int i40e_init_interrupt_scheme(struct i40e_pf *pf) { int vectors = 0; ssize_t size; if (pf->flags & I40E_FLAG_MSIX_ENABLED) { vectors = i40e_init_msix(pf); if (vectors < 0) { pf->flags &= ~(I40E_FLAG_MSIX_ENABLED | I40E_FLAG_IWARP_ENABLED | I40E_FLAG_RSS_ENABLED | I40E_FLAG_DCB_CAPABLE | I40E_FLAG_DCB_ENABLED | I40E_FLAG_SRIOV_ENABLED | I40E_FLAG_FD_SB_ENABLED | I40E_FLAG_FD_ATR_ENABLED | I40E_FLAG_VMDQ_ENABLED); pf->flags |= I40E_FLAG_FD_SB_INACTIVE; /* rework the queue expectations without MSIX */ i40e_determine_queue_usage(pf); } } if (!(pf->flags & I40E_FLAG_MSIX_ENABLED) && (pf->flags & I40E_FLAG_MSI_ENABLED)) { dev_info(&pf->pdev->dev, "MSI-X not available, trying MSI\n"); vectors = pci_enable_msi(pf->pdev); if (vectors < 0) { dev_info(&pf->pdev->dev, "MSI init failed - %d\n", vectors); pf->flags &= ~I40E_FLAG_MSI_ENABLED; } vectors = 1; /* one MSI or Legacy vector */ } if (!(pf->flags & (I40E_FLAG_MSIX_ENABLED | I40E_FLAG_MSI_ENABLED))) dev_info(&pf->pdev->dev, "MSI-X and MSI not available, falling back to Legacy IRQ\n"); /* set up vector assignment tracking */ size = sizeof(struct i40e_lump_tracking) + (sizeof(u16) * vectors); pf->irq_pile = kzalloc(size, GFP_KERNEL); if (!pf->irq_pile) return -ENOMEM; pf->irq_pile->num_entries = vectors; /* track first vector for misc interrupts, ignore return */ (void)i40e_get_lump(pf, pf->irq_pile, 1, I40E_PILE_VALID_BIT - 1); return 0; } /** * i40e_restore_interrupt_scheme - Restore the interrupt scheme * @pf: private board data structure * * Restore the interrupt scheme that was cleared when we suspended the * device. This should be called during resume to re-allocate the q_vectors * and reacquire IRQs. */ static int i40e_restore_interrupt_scheme(struct i40e_pf *pf) { int err, i; /* We cleared the MSI and MSI-X flags when disabling the old interrupt * scheme. We need to re-enabled them here in order to attempt to * re-acquire the MSI or MSI-X vectors */ pf->flags |= (I40E_FLAG_MSIX_ENABLED | I40E_FLAG_MSI_ENABLED); err = i40e_init_interrupt_scheme(pf); if (err) return err; /* Now that we've re-acquired IRQs, we need to remap the vectors and * rings together again. */ for (i = 0; i < pf->num_alloc_vsi; i++) { if (pf->vsi[i]) { err = i40e_vsi_alloc_q_vectors(pf->vsi[i]); if (err) goto err_unwind; i40e_vsi_map_rings_to_vectors(pf->vsi[i]); } } err = i40e_setup_misc_vector(pf); if (err) goto err_unwind; if (pf->flags & I40E_FLAG_IWARP_ENABLED) i40e_client_update_msix_info(pf); return 0; err_unwind: while (i--) { if (pf->vsi[i]) i40e_vsi_free_q_vectors(pf->vsi[i]); } return err; } /** * i40e_setup_misc_vector_for_recovery_mode - Setup the misc vector to handle * non queue events in recovery mode * @pf: board private structure * * This sets up the handler for MSIX 0 or MSI/legacy, which is used to manage * the non-queue interrupts, e.g. AdminQ and errors in recovery mode. * This is handled differently than in recovery mode since no Tx/Rx resources * are being allocated. **/ static int i40e_setup_misc_vector_for_recovery_mode(struct i40e_pf *pf) { int err; if (pf->flags & I40E_FLAG_MSIX_ENABLED) { err = i40e_setup_misc_vector(pf); if (err) { dev_info(&pf->pdev->dev, "MSI-X misc vector request failed, error %d\n", err); return err; } } else { u32 flags = pf->flags & I40E_FLAG_MSI_ENABLED ? 0 : IRQF_SHARED; err = request_irq(pf->pdev->irq, i40e_intr, flags, pf->int_name, pf); if (err) { dev_info(&pf->pdev->dev, "MSI/legacy misc vector request failed, error %d\n", err); return err; } i40e_enable_misc_int_causes(pf); i40e_irq_dynamic_enable_icr0(pf); } return 0; } /** * i40e_setup_misc_vector - Setup the misc vector to handle non queue events * @pf: board private structure * * This sets up the handler for MSIX 0, which is used to manage the * non-queue interrupts, e.g. AdminQ and errors. This is not used * when in MSI or Legacy interrupt mode. **/ static int i40e_setup_misc_vector(struct i40e_pf *pf) { struct i40e_hw *hw = &pf->hw; int err = 0; /* Only request the IRQ once, the first time through. */ if (!test_and_set_bit(__I40E_MISC_IRQ_REQUESTED, pf->state)) { err = request_irq(pf->msix_entries[0].vector, i40e_intr, 0, pf->int_name, pf); if (err) { clear_bit(__I40E_MISC_IRQ_REQUESTED, pf->state); dev_info(&pf->pdev->dev, "request_irq for %s failed: %d\n", pf->int_name, err); return -EFAULT; } } i40e_enable_misc_int_causes(pf); /* associate no queues to the misc vector */ wr32(hw, I40E_PFINT_LNKLST0, I40E_QUEUE_END_OF_LIST); wr32(hw, I40E_PFINT_ITR0(I40E_RX_ITR), I40E_ITR_8K >> 1); i40e_flush(hw); i40e_irq_dynamic_enable_icr0(pf); return err; } /** * i40e_get_rss_aq - Get RSS keys and lut by using AQ commands * @vsi: Pointer to vsi structure * @seed: Buffter to store the hash keys * @lut: Buffer to store the lookup table entries * @lut_size: Size of buffer to store the lookup table entries * * Return 0 on success, negative on failure */ static int i40e_get_rss_aq(struct i40e_vsi *vsi, const u8 *seed, u8 *lut, u16 lut_size) { struct i40e_pf *pf = vsi->back; struct i40e_hw *hw = &pf->hw; int ret = 0; if (seed) { ret = i40e_aq_get_rss_key(hw, vsi->id, (struct i40e_aqc_get_set_rss_key_data *)seed); if (ret) { dev_info(&pf->pdev->dev, "Cannot get RSS key, err %pe aq_err %s\n", ERR_PTR(ret), i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); return ret; } } if (lut) { bool pf_lut = vsi->type == I40E_VSI_MAIN; ret = i40e_aq_get_rss_lut(hw, vsi->id, pf_lut, lut, lut_size); if (ret) { dev_info(&pf->pdev->dev, "Cannot get RSS lut, err %pe aq_err %s\n", ERR_PTR(ret), i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); return ret; } } return ret; } /** * i40e_config_rss_reg - Configure RSS keys and lut by writing registers * @vsi: Pointer to vsi structure * @seed: RSS hash seed * @lut: Lookup table * @lut_size: Lookup table size * * Returns 0 on success, negative on failure **/ static int i40e_config_rss_reg(struct i40e_vsi *vsi, const u8 *seed, const u8 *lut, u16 lut_size) { struct i40e_pf *pf = vsi->back; struct i40e_hw *hw = &pf->hw; u16 vf_id = vsi->vf_id; u8 i; /* Fill out hash function seed */ if (seed) { u32 *seed_dw = (u32 *)seed; if (vsi->type == I40E_VSI_MAIN) { for (i = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++) wr32(hw, I40E_PFQF_HKEY(i), seed_dw[i]); } else if (vsi->type == I40E_VSI_SRIOV) { for (i = 0; i <= I40E_VFQF_HKEY1_MAX_INDEX; i++) wr32(hw, I40E_VFQF_HKEY1(i, vf_id), seed_dw[i]); } else { dev_err(&pf->pdev->dev, "Cannot set RSS seed - invalid VSI type\n"); } } if (lut) { u32 *lut_dw = (u32 *)lut; if (vsi->type == I40E_VSI_MAIN) { if (lut_size != I40E_HLUT_ARRAY_SIZE) return -EINVAL; for (i = 0; i <= I40E_PFQF_HLUT_MAX_INDEX; i++) wr32(hw, I40E_PFQF_HLUT(i), lut_dw[i]); } else if (vsi->type == I40E_VSI_SRIOV) { if (lut_size != I40E_VF_HLUT_ARRAY_SIZE) return -EINVAL; for (i = 0; i <= I40E_VFQF_HLUT_MAX_INDEX; i++) wr32(hw, I40E_VFQF_HLUT1(i, vf_id), lut_dw[i]); } else { dev_err(&pf->pdev->dev, "Cannot set RSS LUT - invalid VSI type\n"); } } i40e_flush(hw); return 0; } /** * i40e_get_rss_reg - Get the RSS keys and lut by reading registers * @vsi: Pointer to VSI structure * @seed: Buffer to store the keys * @lut: Buffer to store the lookup table entries * @lut_size: Size of buffer to store the lookup table entries * * Returns 0 on success, negative on failure */ static int i40e_get_rss_reg(struct i40e_vsi *vsi, u8 *seed, u8 *lut, u16 lut_size) { struct i40e_pf *pf = vsi->back; struct i40e_hw *hw = &pf->hw; u16 i; if (seed) { u32 *seed_dw = (u32 *)seed; for (i = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++) seed_dw[i] = i40e_read_rx_ctl(hw, I40E_PFQF_HKEY(i)); } if (lut) { u32 *lut_dw = (u32 *)lut; if (lut_size != I40E_HLUT_ARRAY_SIZE) return -EINVAL; for (i = 0; i <= I40E_PFQF_HLUT_MAX_INDEX; i++) lut_dw[i] = rd32(hw, I40E_PFQF_HLUT(i)); } return 0; } /** * i40e_config_rss - Configure RSS keys and lut * @vsi: Pointer to VSI structure * @seed: RSS hash seed * @lut: Lookup table * @lut_size: Lookup table size * * Returns 0 on success, negative on failure */ int i40e_config_rss(struct i40e_vsi *vsi, u8 *seed, u8 *lut, u16 lut_size) { struct i40e_pf *pf = vsi->back; if (pf->hw_features & I40E_HW_RSS_AQ_CAPABLE) return i40e_config_rss_aq(vsi, seed, lut, lut_size); else return i40e_config_rss_reg(vsi, seed, lut, lut_size); } /** * i40e_get_rss - Get RSS keys and lut * @vsi: Pointer to VSI structure * @seed: Buffer to store the keys * @lut: Buffer to store the lookup table entries * @lut_size: Size of buffer to store the lookup table entries * * Returns 0 on success, negative on failure */ int i40e_get_rss(struct i40e_vsi *vsi, u8 *seed, u8 *lut, u16 lut_size) { struct i40e_pf *pf = vsi->back; if (pf->hw_features & I40E_HW_RSS_AQ_CAPABLE) return i40e_get_rss_aq(vsi, seed, lut, lut_size); else return i40e_get_rss_reg(vsi, seed, lut, lut_size); } /** * i40e_fill_rss_lut - Fill the RSS lookup table with default values * @pf: Pointer to board private structure * @lut: Lookup table * @rss_table_size: Lookup table size * @rss_size: Range of queue number for hashing */ void i40e_fill_rss_lut(struct i40e_pf *pf, u8 *lut, u16 rss_table_size, u16 rss_size) { u16 i; for (i = 0; i < rss_table_size; i++) lut[i] = i % rss_size; } /** * i40e_pf_config_rss - Prepare for RSS if used * @pf: board private structure **/ static int i40e_pf_config_rss(struct i40e_pf *pf) { struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi]; u8 seed[I40E_HKEY_ARRAY_SIZE]; u8 *lut; struct i40e_hw *hw = &pf->hw; u32 reg_val; u64 hena; int ret; /* By default we enable TCP/UDP with IPv4/IPv6 ptypes */ hena = (u64)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(0)) | ((u64)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(1)) << 32); hena |= i40e_pf_get_default_rss_hena(pf); i40e_write_rx_ctl(hw, I40E_PFQF_HENA(0), (u32)hena); i40e_write_rx_ctl(hw, I40E_PFQF_HENA(1), (u32)(hena >> 32)); /* Determine the RSS table size based on the hardware capabilities */ reg_val = i40e_read_rx_ctl(hw, I40E_PFQF_CTL_0); reg_val = (pf->rss_table_size == 512) ? (reg_val | I40E_PFQF_CTL_0_HASHLUTSIZE_512) : (reg_val & ~I40E_PFQF_CTL_0_HASHLUTSIZE_512); i40e_write_rx_ctl(hw, I40E_PFQF_CTL_0, reg_val); /* Determine the RSS size of the VSI */ if (!vsi->rss_size) { u16 qcount; /* If the firmware does something weird during VSI init, we * could end up with zero TCs. Check for that to avoid * divide-by-zero. It probably won't pass traffic, but it also * won't panic. */ qcount = vsi->num_queue_pairs / (vsi->tc_config.numtc ? vsi->tc_config.numtc : 1); vsi->rss_size = min_t(int, pf->alloc_rss_size, qcount); } if (!vsi->rss_size) return -EINVAL; lut = kzalloc(vsi->rss_table_size, GFP_KERNEL); if (!lut) return -ENOMEM; /* Use user configured lut if there is one, otherwise use default */ if (vsi->rss_lut_user) memcpy(lut, vsi->rss_lut_user, vsi->rss_table_size); else i40e_fill_rss_lut(pf, lut, vsi->rss_table_size, vsi->rss_size); /* Use user configured hash key if there is one, otherwise * use default. */ if (vsi->rss_hkey_user) memcpy(seed, vsi->rss_hkey_user, I40E_HKEY_ARRAY_SIZE); else netdev_rss_key_fill((void *)seed, I40E_HKEY_ARRAY_SIZE); ret = i40e_config_rss(vsi, seed, lut, vsi->rss_table_size); kfree(lut); return ret; } /** * i40e_reconfig_rss_queues - change number of queues for rss and rebuild * @pf: board private structure * @queue_count: the requested queue count for rss. * * returns 0 if rss is not enabled, if enabled returns the final rss queue * count which may be different from the requested queue count. * Note: expects to be called while under rtnl_lock() **/ int i40e_reconfig_rss_queues(struct i40e_pf *pf, int queue_count) { struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi]; int new_rss_size; if (!(pf->flags & I40E_FLAG_RSS_ENABLED)) return 0; queue_count = min_t(int, queue_count, num_online_cpus()); new_rss_size = min_t(int, queue_count, pf->rss_size_max); if (queue_count != vsi->num_queue_pairs) { u16 qcount; vsi->req_queue_pairs = queue_count; i40e_prep_for_reset(pf); if (test_bit(__I40E_IN_REMOVE, pf->state)) return pf->alloc_rss_size; pf->alloc_rss_size = new_rss_size; i40e_reset_and_rebuild(pf, true, true); /* Discard the user configured hash keys and lut, if less * queues are enabled. */ if (queue_count < vsi->rss_size) { i40e_clear_rss_config_user(vsi); dev_dbg(&pf->pdev->dev, "discard user configured hash keys and lut\n"); } /* Reset vsi->rss_size, as number of enabled queues changed */ qcount = vsi->num_queue_pairs / vsi->tc_config.numtc; vsi->rss_size = min_t(int, pf->alloc_rss_size, qcount); i40e_pf_config_rss(pf); } dev_info(&pf->pdev->dev, "User requested queue count/HW max RSS count: %d/%d\n", vsi->req_queue_pairs, pf->rss_size_max); return pf->alloc_rss_size; } /** * i40e_get_partition_bw_setting - Retrieve BW settings for this PF partition * @pf: board private structure **/ int i40e_get_partition_bw_setting(struct i40e_pf *pf) { bool min_valid, max_valid; u32 max_bw, min_bw; int status; status = i40e_read_bw_from_alt_ram(&pf->hw, &max_bw, &min_bw, &min_valid, &max_valid); if (!status) { if (min_valid) pf->min_bw = min_bw; if (max_valid) pf->max_bw = max_bw; } return status; } /** * i40e_set_partition_bw_setting - Set BW settings for this PF partition * @pf: board private structure **/ int i40e_set_partition_bw_setting(struct i40e_pf *pf) { struct i40e_aqc_configure_partition_bw_data bw_data; int status; memset(&bw_data, 0, sizeof(bw_data)); /* Set the valid bit for this PF */ bw_data.pf_valid_bits = cpu_to_le16(BIT(pf->hw.pf_id)); bw_data.max_bw[pf->hw.pf_id] = pf->max_bw & I40E_ALT_BW_VALUE_MASK; bw_data.min_bw[pf->hw.pf_id] = pf->min_bw & I40E_ALT_BW_VALUE_MASK; /* Set the new bandwidths */ status = i40e_aq_configure_partition_bw(&pf->hw, &bw_data, NULL); return status; } /** * i40e_commit_partition_bw_setting - Commit BW settings for this PF partition * @pf: board private structure **/ int i40e_commit_partition_bw_setting(struct i40e_pf *pf) { /* Commit temporary BW setting to permanent NVM image */ enum i40e_admin_queue_err last_aq_status; u16 nvm_word; int ret; if (pf->hw.partition_id != 1) { dev_info(&pf->pdev->dev, "Commit BW only works on partition 1! This is partition %d", pf->hw.partition_id); ret = -EOPNOTSUPP; goto bw_commit_out; } /* Acquire NVM for read access */ ret = i40e_acquire_nvm(&pf->hw, I40E_RESOURCE_READ); last_aq_status = pf->hw.aq.asq_last_status; if (ret) { dev_info(&pf->pdev->dev, "Cannot acquire NVM for read access, err %pe aq_err %s\n", ERR_PTR(ret), i40e_aq_str(&pf->hw, last_aq_status)); goto bw_commit_out; } /* Read word 0x10 of NVM - SW compatibility word 1 */ ret = i40e_aq_read_nvm(&pf->hw, I40E_SR_NVM_CONTROL_WORD, 0x10, sizeof(nvm_word), &nvm_word, false, NULL); /* Save off last admin queue command status before releasing * the NVM */ last_aq_status = pf->hw.aq.asq_last_status; i40e_release_nvm(&pf->hw); if (ret) { dev_info(&pf->pdev->dev, "NVM read error, err %pe aq_err %s\n", ERR_PTR(ret), i40e_aq_str(&pf->hw, last_aq_status)); goto bw_commit_out; } /* Wait a bit for NVM release to complete */ msleep(50); /* Acquire NVM for write access */ ret = i40e_acquire_nvm(&pf->hw, I40E_RESOURCE_WRITE); last_aq_status = pf->hw.aq.asq_last_status; if (ret) { dev_info(&pf->pdev->dev, "Cannot acquire NVM for write access, err %pe aq_err %s\n", ERR_PTR(ret), i40e_aq_str(&pf->hw, last_aq_status)); goto bw_commit_out; } /* Write it back out unchanged to initiate update NVM, * which will force a write of the shadow (alt) RAM to * the NVM - thus storing the bandwidth values permanently. */ ret = i40e_aq_update_nvm(&pf->hw, I40E_SR_NVM_CONTROL_WORD, 0x10, sizeof(nvm_word), &nvm_word, true, 0, NULL); /* Save off last admin queue command status before releasing * the NVM */ last_aq_status = pf->hw.aq.asq_last_status; i40e_release_nvm(&pf->hw); if (ret) dev_info(&pf->pdev->dev, "BW settings NOT SAVED, err %pe aq_err %s\n", ERR_PTR(ret), i40e_aq_str(&pf->hw, last_aq_status)); bw_commit_out: return ret; } /** * i40e_is_total_port_shutdown_enabled - read NVM and return value * if total port shutdown feature is enabled for this PF * @pf: board private structure **/ static bool i40e_is_total_port_shutdown_enabled(struct i40e_pf *pf) { #define I40E_TOTAL_PORT_SHUTDOWN_ENABLED BIT(4) #define I40E_FEATURES_ENABLE_PTR 0x2A #define I40E_CURRENT_SETTING_PTR 0x2B #define I40E_LINK_BEHAVIOR_WORD_OFFSET 0x2D #define I40E_LINK_BEHAVIOR_WORD_LENGTH 0x1 #define I40E_LINK_BEHAVIOR_OS_FORCED_ENABLED BIT(0) #define I40E_LINK_BEHAVIOR_PORT_BIT_LENGTH 4 u16 sr_emp_sr_settings_ptr = 0; u16 features_enable = 0; u16 link_behavior = 0; int read_status = 0; bool ret = false; read_status = i40e_read_nvm_word(&pf->hw, I40E_SR_EMP_SR_SETTINGS_PTR, &sr_emp_sr_settings_ptr); if (read_status) goto err_nvm; read_status = i40e_read_nvm_word(&pf->hw, sr_emp_sr_settings_ptr + I40E_FEATURES_ENABLE_PTR, &features_enable); if (read_status) goto err_nvm; if (I40E_TOTAL_PORT_SHUTDOWN_ENABLED & features_enable) { read_status = i40e_read_nvm_module_data(&pf->hw, I40E_SR_EMP_SR_SETTINGS_PTR, I40E_CURRENT_SETTING_PTR, I40E_LINK_BEHAVIOR_WORD_OFFSET, I40E_LINK_BEHAVIOR_WORD_LENGTH, &link_behavior); if (read_status) goto err_nvm; link_behavior >>= (pf->hw.port * I40E_LINK_BEHAVIOR_PORT_BIT_LENGTH); ret = I40E_LINK_BEHAVIOR_OS_FORCED_ENABLED & link_behavior; } return ret; err_nvm: dev_warn(&pf->pdev->dev, "total-port-shutdown feature is off due to read nvm error: %pe\n", ERR_PTR(read_status)); return ret; } /** * i40e_sw_init - Initialize general software structures (struct i40e_pf) * @pf: board private structure to initialize * * i40e_sw_init initializes the Adapter private data structure. * Fields are initialized based on PCI device information and * OS network device settings (MTU size). **/ static int i40e_sw_init(struct i40e_pf *pf) { int err = 0; int size; u16 pow; /* Set default capability flags */ pf->flags = I40E_FLAG_RX_CSUM_ENABLED | I40E_FLAG_MSI_ENABLED | I40E_FLAG_MSIX_ENABLED; /* Set default ITR */ pf->rx_itr_default = I40E_ITR_RX_DEF; pf->tx_itr_default = I40E_ITR_TX_DEF; /* Depending on PF configurations, it is possible that the RSS * maximum might end up larger than the available queues */ pf->rss_size_max = BIT(pf->hw.func_caps.rss_table_entry_width); pf->alloc_rss_size = 1; pf->rss_table_size = pf->hw.func_caps.rss_table_size; pf->rss_size_max = min_t(int, pf->rss_size_max, pf->hw.func_caps.num_tx_qp); /* find the next higher power-of-2 of num cpus */ pow = roundup_pow_of_two(num_online_cpus()); pf->rss_size_max = min_t(int, pf->rss_size_max, pow); if (pf->hw.func_caps.rss) { pf->flags |= I40E_FLAG_RSS_ENABLED; pf->alloc_rss_size = min_t(int, pf->rss_size_max, num_online_cpus()); } /* MFP mode enabled */ if (pf->hw.func_caps.npar_enable || pf->hw.func_caps.flex10_enable) { pf->flags |= I40E_FLAG_MFP_ENABLED; dev_info(&pf->pdev->dev, "MFP mode Enabled\n"); if (i40e_get_partition_bw_setting(pf)) { dev_warn(&pf->pdev->dev, "Could not get partition bw settings\n"); } else { dev_info(&pf->pdev->dev, "Partition BW Min = %8.8x, Max = %8.8x\n", pf->min_bw, pf->max_bw); /* nudge the Tx scheduler */ i40e_set_partition_bw_setting(pf); } } if ((pf->hw.func_caps.fd_filters_guaranteed > 0) || (pf->hw.func_caps.fd_filters_best_effort > 0)) { pf->flags |= I40E_FLAG_FD_ATR_ENABLED; pf->atr_sample_rate = I40E_DEFAULT_ATR_SAMPLE_RATE; if (pf->flags & I40E_FLAG_MFP_ENABLED && pf->hw.num_partitions > 1) dev_info(&pf->pdev->dev, "Flow Director Sideband mode Disabled in MFP mode\n"); else pf->flags |= I40E_FLAG_FD_SB_ENABLED; pf->fdir_pf_filter_count = pf->hw.func_caps.fd_filters_guaranteed; pf->hw.fdir_shared_filter_count = pf->hw.func_caps.fd_filters_best_effort; } if (pf->hw.mac.type == I40E_MAC_X722) { pf->hw_features |= (I40E_HW_RSS_AQ_CAPABLE | I40E_HW_128_QP_RSS_CAPABLE | I40E_HW_ATR_EVICT_CAPABLE | I40E_HW_WB_ON_ITR_CAPABLE | I40E_HW_MULTIPLE_TCP_UDP_RSS_PCTYPE | I40E_HW_NO_PCI_LINK_CHECK | I40E_HW_USE_SET_LLDP_MIB | I40E_HW_GENEVE_OFFLOAD_CAPABLE | I40E_HW_PTP_L4_CAPABLE | I40E_HW_WOL_MC_MAGIC_PKT_WAKE | I40E_HW_OUTER_UDP_CSUM_CAPABLE); #define I40E_FDEVICT_PCTYPE_DEFAULT 0xc03 if (rd32(&pf->hw, I40E_GLQF_FDEVICTENA(1)) != I40E_FDEVICT_PCTYPE_DEFAULT) { dev_warn(&pf->pdev->dev, "FD EVICT PCTYPES are not right, disable FD HW EVICT\n"); pf->hw_features &= ~I40E_HW_ATR_EVICT_CAPABLE; } } else if ((pf->hw.aq.api_maj_ver > 1) || ((pf->hw.aq.api_maj_ver == 1) && (pf->hw.aq.api_min_ver > 4))) { /* Supported in FW API version higher than 1.4 */ pf->hw_features |= I40E_HW_GENEVE_OFFLOAD_CAPABLE; } /* Enable HW ATR eviction if possible */ if (pf->hw_features & I40E_HW_ATR_EVICT_CAPABLE) pf->flags |= I40E_FLAG_HW_ATR_EVICT_ENABLED; if ((pf->hw.mac.type == I40E_MAC_XL710) && (((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver < 33)) || (pf->hw.aq.fw_maj_ver < 4))) { pf->hw_features |= I40E_HW_RESTART_AUTONEG; /* No DCB support for FW < v4.33 */ pf->hw_features |= I40E_HW_NO_DCB_SUPPORT; } /* Disable FW LLDP if FW < v4.3 */ if ((pf->hw.mac.type == I40E_MAC_XL710) && (((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver < 3)) || (pf->hw.aq.fw_maj_ver < 4))) pf->hw_features |= I40E_HW_STOP_FW_LLDP; /* Use the FW Set LLDP MIB API if FW > v4.40 */ if ((pf->hw.mac.type == I40E_MAC_XL710) && (((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver >= 40)) || (pf->hw.aq.fw_maj_ver >= 5))) pf->hw_features |= I40E_HW_USE_SET_LLDP_MIB; /* Enable PTP L4 if FW > v6.0 */ if (pf->hw.mac.type == I40E_MAC_XL710 && pf->hw.aq.fw_maj_ver >= 6) pf->hw_features |= I40E_HW_PTP_L4_CAPABLE; if (pf->hw.func_caps.vmdq && num_online_cpus() != 1) { pf->num_vmdq_vsis = I40E_DEFAULT_NUM_VMDQ_VSI; pf->flags |= I40E_FLAG_VMDQ_ENABLED; pf->num_vmdq_qps = i40e_default_queues_per_vmdq(pf); } if (pf->hw.func_caps.iwarp && num_online_cpus() != 1) { pf->flags |= I40E_FLAG_IWARP_ENABLED; /* IWARP needs one extra vector for CQP just like MISC.*/ pf->num_iwarp_msix = (int)num_online_cpus() + 1; } /* Stopping FW LLDP engine is supported on XL710 and X722 * starting from FW versions determined in i40e_init_adminq. * Stopping the FW LLDP engine is not supported on XL710 * if NPAR is functioning so unset this hw flag in this case. */ if (pf->hw.mac.type == I40E_MAC_XL710 && pf->hw.func_caps.npar_enable && (pf->hw.flags & I40E_HW_FLAG_FW_LLDP_STOPPABLE)) pf->hw.flags &= ~I40E_HW_FLAG_FW_LLDP_STOPPABLE; #ifdef CONFIG_PCI_IOV if (pf->hw.func_caps.num_vfs && pf->hw.partition_id == 1) { pf->num_vf_qps = I40E_DEFAULT_QUEUES_PER_VF; pf->flags |= I40E_FLAG_SRIOV_ENABLED; pf->num_req_vfs = min_t(int, pf->hw.func_caps.num_vfs, I40E_MAX_VF_COUNT); } #endif /* CONFIG_PCI_IOV */ pf->eeprom_version = 0xDEAD; pf->lan_veb = I40E_NO_VEB; pf->lan_vsi = I40E_NO_VSI; /* By default FW has this off for performance reasons */ pf->flags &= ~I40E_FLAG_VEB_STATS_ENABLED; /* set up queue assignment tracking */ size = sizeof(struct i40e_lump_tracking) + (sizeof(u16) * pf->hw.func_caps.num_tx_qp); pf->qp_pile = kzalloc(size, GFP_KERNEL); if (!pf->qp_pile) { err = -ENOMEM; goto sw_init_done; } pf->qp_pile->num_entries = pf->hw.func_caps.num_tx_qp; pf->tx_timeout_recovery_level = 1; if (pf->hw.mac.type != I40E_MAC_X722 && i40e_is_total_port_shutdown_enabled(pf)) { /* Link down on close must be on when total port shutdown * is enabled for a given port */ pf->flags |= (I40E_FLAG_TOTAL_PORT_SHUTDOWN_ENABLED | I40E_FLAG_LINK_DOWN_ON_CLOSE_ENABLED); dev_info(&pf->pdev->dev, "total-port-shutdown was enabled, link-down-on-close is forced on\n"); } mutex_init(&pf->switch_mutex); sw_init_done: return err; } /** * i40e_set_ntuple - set the ntuple feature flag and take action * @pf: board private structure to initialize * @features: the feature set that the stack is suggesting * * returns a bool to indicate if reset needs to happen **/ bool i40e_set_ntuple(struct i40e_pf *pf, netdev_features_t features) { bool need_reset = false; /* Check if Flow Director n-tuple support was enabled or disabled. If * the state changed, we need to reset. */ if (features & NETIF_F_NTUPLE) { /* Enable filters and mark for reset */ if (!(pf->flags & I40E_FLAG_FD_SB_ENABLED)) need_reset = true; /* enable FD_SB only if there is MSI-X vector and no cloud * filters exist */ if (pf->num_fdsb_msix > 0 && !pf->num_cloud_filters) { pf->flags |= I40E_FLAG_FD_SB_ENABLED; pf->flags &= ~I40E_FLAG_FD_SB_INACTIVE; } } else { /* turn off filters, mark for reset and clear SW filter list */ if (pf->flags & I40E_FLAG_FD_SB_ENABLED) { need_reset = true; i40e_fdir_filter_exit(pf); } pf->flags &= ~I40E_FLAG_FD_SB_ENABLED; clear_bit(__I40E_FD_SB_AUTO_DISABLED, pf->state); pf->flags |= I40E_FLAG_FD_SB_INACTIVE; /* reset fd counters */ pf->fd_add_err = 0; pf->fd_atr_cnt = 0; /* if ATR was auto disabled it can be re-enabled. */ if (test_and_clear_bit(__I40E_FD_ATR_AUTO_DISABLED, pf->state)) if ((pf->flags & I40E_FLAG_FD_ATR_ENABLED) && (I40E_DEBUG_FD & pf->hw.debug_mask)) dev_info(&pf->pdev->dev, "ATR re-enabled.\n"); } return need_reset; } /** * i40e_clear_rss_lut - clear the rx hash lookup table * @vsi: the VSI being configured **/ static void i40e_clear_rss_lut(struct i40e_vsi *vsi) { struct i40e_pf *pf = vsi->back; struct i40e_hw *hw = &pf->hw; u16 vf_id = vsi->vf_id; u8 i; if (vsi->type == I40E_VSI_MAIN) { for (i = 0; i <= I40E_PFQF_HLUT_MAX_INDEX; i++) wr32(hw, I40E_PFQF_HLUT(i), 0); } else if (vsi->type == I40E_VSI_SRIOV) { for (i = 0; i <= I40E_VFQF_HLUT_MAX_INDEX; i++) i40e_write_rx_ctl(hw, I40E_VFQF_HLUT1(i, vf_id), 0); } else { dev_err(&pf->pdev->dev, "Cannot set RSS LUT - invalid VSI type\n"); } } /** * i40e_set_loopback - turn on/off loopback mode on underlying PF * @vsi: ptr to VSI * @ena: flag to indicate the on/off setting */ static int i40e_set_loopback(struct i40e_vsi *vsi, bool ena) { bool if_running = netif_running(vsi->netdev) && !test_and_set_bit(__I40E_VSI_DOWN, vsi->state); int ret; if (if_running) i40e_down(vsi); ret = i40e_aq_set_mac_loopback(&vsi->back->hw, ena, NULL); if (ret) netdev_err(vsi->netdev, "Failed to toggle loopback state\n"); if (if_running) i40e_up(vsi); return ret; } /** * i40e_set_features - set the netdev feature flags * @netdev: ptr to the netdev being adjusted * @features: the feature set that the stack is suggesting * Note: expects to be called while under rtnl_lock() **/ static int i40e_set_features(struct net_device *netdev, netdev_features_t features) { struct i40e_netdev_priv *np = netdev_priv(netdev); struct i40e_vsi *vsi = np->vsi; struct i40e_pf *pf = vsi->back; bool need_reset; if (features & NETIF_F_RXHASH && !(netdev->features & NETIF_F_RXHASH)) i40e_pf_config_rss(pf); else if (!(features & NETIF_F_RXHASH) && netdev->features & NETIF_F_RXHASH) i40e_clear_rss_lut(vsi); if (features & NETIF_F_HW_VLAN_CTAG_RX) i40e_vlan_stripping_enable(vsi); else i40e_vlan_stripping_disable(vsi); if (!(features & NETIF_F_HW_TC) && (netdev->features & NETIF_F_HW_TC) && pf->num_cloud_filters) { dev_err(&pf->pdev->dev, "Offloaded tc filters active, can't turn hw_tc_offload off"); return -EINVAL; } if (!(features & NETIF_F_HW_L2FW_DOFFLOAD) && vsi->macvlan_cnt) i40e_del_all_macvlans(vsi); need_reset = i40e_set_ntuple(pf, features); if (need_reset) i40e_do_reset(pf, I40E_PF_RESET_FLAG, true); if ((features ^ netdev->features) & NETIF_F_LOOPBACK) return i40e_set_loopback(vsi, !!(features & NETIF_F_LOOPBACK)); return 0; } static int i40e_udp_tunnel_set_port(struct net_device *netdev, unsigned int table, unsigned int idx, struct udp_tunnel_info *ti) { struct i40e_netdev_priv *np = netdev_priv(netdev); struct i40e_hw *hw = &np->vsi->back->hw; u8 type, filter_index; int ret; type = ti->type == UDP_TUNNEL_TYPE_VXLAN ? I40E_AQC_TUNNEL_TYPE_VXLAN : I40E_AQC_TUNNEL_TYPE_NGE; ret = i40e_aq_add_udp_tunnel(hw, ntohs(ti->port), type, &filter_index, NULL); if (ret) { netdev_info(netdev, "add UDP port failed, err %pe aq_err %s\n", ERR_PTR(ret), i40e_aq_str(hw, hw->aq.asq_last_status)); return -EIO; } udp_tunnel_nic_set_port_priv(netdev, table, idx, filter_index); return 0; } static int i40e_udp_tunnel_unset_port(struct net_device *netdev, unsigned int table, unsigned int idx, struct udp_tunnel_info *ti) { struct i40e_netdev_priv *np = netdev_priv(netdev); struct i40e_hw *hw = &np->vsi->back->hw; int ret; ret = i40e_aq_del_udp_tunnel(hw, ti->hw_priv, NULL); if (ret) { netdev_info(netdev, "delete UDP port failed, err %pe aq_err %s\n", ERR_PTR(ret), i40e_aq_str(hw, hw->aq.asq_last_status)); return -EIO; } return 0; } static int i40e_get_phys_port_id(struct net_device *netdev, struct netdev_phys_item_id *ppid) { struct i40e_netdev_priv *np = netdev_priv(netdev); struct i40e_pf *pf = np->vsi->back; struct i40e_hw *hw = &pf->hw; if (!(pf->hw_features & I40E_HW_PORT_ID_VALID)) return -EOPNOTSUPP; ppid->id_len = min_t(int, sizeof(hw->mac.port_addr), sizeof(ppid->id)); memcpy(ppid->id, hw->mac.port_addr, ppid->id_len); return 0; } /** * i40e_ndo_fdb_add - add an entry to the hardware database * @ndm: the input from the stack * @tb: pointer to array of nladdr (unused) * @dev: the net device pointer * @addr: the MAC address entry being added * @vid: VLAN ID * @flags: instructions from stack about fdb operation * @extack: netlink extended ack, unused currently */ static int i40e_ndo_fdb_add(struct ndmsg *ndm, struct nlattr *tb[], struct net_device *dev, const unsigned char *addr, u16 vid, u16 flags, struct netlink_ext_ack *extack) { struct i40e_netdev_priv *np = netdev_priv(dev); struct i40e_pf *pf = np->vsi->back; int err = 0; if (!(pf->flags & I40E_FLAG_SRIOV_ENABLED)) return -EOPNOTSUPP; if (vid) { pr_info("%s: vlans aren't supported yet for dev_uc|mc_add()\n", dev->name); return -EINVAL; } /* Hardware does not support aging addresses so if a * ndm_state is given only allow permanent addresses */ if (ndm->ndm_state && !(ndm->ndm_state & NUD_PERMANENT)) { netdev_info(dev, "FDB only supports static addresses\n"); return -EINVAL; } if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr)) err = dev_uc_add_excl(dev, addr); else if (is_multicast_ether_addr(addr)) err = dev_mc_add_excl(dev, addr); else err = -EINVAL; /* Only return duplicate errors if NLM_F_EXCL is set */ if (err == -EEXIST && !(flags & NLM_F_EXCL)) err = 0; return err; } /** * i40e_ndo_bridge_setlink - Set the hardware bridge mode * @dev: the netdev being configured * @nlh: RTNL message * @flags: bridge flags * @extack: netlink extended ack * * Inserts a new hardware bridge if not already created and * enables the bridging mode requested (VEB or VEPA). If the * hardware bridge has already been inserted and the request * is to change the mode then that requires a PF reset to * allow rebuild of the components with required hardware * bridge mode enabled. * * Note: expects to be called while under rtnl_lock() **/ static int i40e_ndo_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh, u16 flags, struct netlink_ext_ack *extack) { struct i40e_netdev_priv *np = netdev_priv(dev); struct i40e_vsi *vsi = np->vsi; struct i40e_pf *pf = vsi->back; struct i40e_veb *veb = NULL; struct nlattr *attr, *br_spec; int i, rem; /* Only for PF VSI for now */ if (vsi->seid != pf->vsi[pf->lan_vsi]->seid) return -EOPNOTSUPP; /* Find the HW bridge for PF VSI */ for (i = 0; i < I40E_MAX_VEB && !veb; i++) { if (pf->veb[i] && pf->veb[i]->seid == vsi->uplink_seid) veb = pf->veb[i]; } br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC); if (!br_spec) return -EINVAL; nla_for_each_nested(attr, br_spec, rem) { __u16 mode; if (nla_type(attr) != IFLA_BRIDGE_MODE) continue; mode = nla_get_u16(attr); if ((mode != BRIDGE_MODE_VEPA) && (mode != BRIDGE_MODE_VEB)) return -EINVAL; /* Insert a new HW bridge */ if (!veb) { veb = i40e_veb_setup(pf, 0, vsi->uplink_seid, vsi->seid, vsi->tc_config.enabled_tc); if (veb) { veb->bridge_mode = mode; i40e_config_bridge_mode(veb); } else { /* No Bridge HW offload available */ return -ENOENT; } break; } else if (mode != veb->bridge_mode) { /* Existing HW bridge but different mode needs reset */ veb->bridge_mode = mode; /* TODO: If no VFs or VMDq VSIs, disallow VEB mode */ if (mode == BRIDGE_MODE_VEB) pf->flags |= I40E_FLAG_VEB_MODE_ENABLED; else pf->flags &= ~I40E_FLAG_VEB_MODE_ENABLED; i40e_do_reset(pf, I40E_PF_RESET_FLAG, true); break; } } return 0; } /** * i40e_ndo_bridge_getlink - Get the hardware bridge mode * @skb: skb buff * @pid: process id * @seq: RTNL message seq # * @dev: the netdev being configured * @filter_mask: unused * @nlflags: netlink flags passed in * * Return the mode in which the hardware bridge is operating in * i.e VEB or VEPA. **/ static int i40e_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq, struct net_device *dev, u32 __always_unused filter_mask, int nlflags) { struct i40e_netdev_priv *np = netdev_priv(dev); struct i40e_vsi *vsi = np->vsi; struct i40e_pf *pf = vsi->back; struct i40e_veb *veb = NULL; int i; /* Only for PF VSI for now */ if (vsi->seid != pf->vsi[pf->lan_vsi]->seid) return -EOPNOTSUPP; /* Find the HW bridge for the PF VSI */ for (i = 0; i < I40E_MAX_VEB && !veb; i++) { if (pf->veb[i] && pf->veb[i]->seid == vsi->uplink_seid) veb = pf->veb[i]; } if (!veb) return 0; return ndo_dflt_bridge_getlink(skb, pid, seq, dev, veb->bridge_mode, 0, 0, nlflags, filter_mask, NULL); } /** * i40e_features_check - Validate encapsulated packet conforms to limits * @skb: skb buff * @dev: This physical port's netdev * @features: Offload features that the stack believes apply **/ static netdev_features_t i40e_features_check(struct sk_buff *skb, struct net_device *dev, netdev_features_t features) { size_t len; /* No point in doing any of this if neither checksum nor GSO are * being requested for this frame. We can rule out both by just * checking for CHECKSUM_PARTIAL */ if (skb->ip_summed != CHECKSUM_PARTIAL) return features; /* We cannot support GSO if the MSS is going to be less than * 64 bytes. If it is then we need to drop support for GSO. */ if (skb_is_gso(skb) && (skb_shinfo(skb)->gso_size < 64)) features &= ~NETIF_F_GSO_MASK; /* MACLEN can support at most 63 words */ len = skb_network_header(skb) - skb->data; if (len & ~(63 * 2)) goto out_err; /* IPLEN and EIPLEN can support at most 127 dwords */ len = skb_transport_header(skb) - skb_network_header(skb); if (len & ~(127 * 4)) goto out_err; if (skb->encapsulation) { /* L4TUNLEN can support 127 words */ len = skb_inner_network_header(skb) - skb_transport_header(skb); if (len & ~(127 * 2)) goto out_err; /* IPLEN can support at most 127 dwords */ len = skb_inner_transport_header(skb) - skb_inner_network_header(skb); if (len & ~(127 * 4)) goto out_err; } /* No need to validate L4LEN as TCP is the only protocol with a * flexible value and we support all possible values supported * by TCP, which is at most 15 dwords */ return features; out_err: return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK); } /** * i40e_xdp_setup - add/remove an XDP program * @vsi: VSI to changed * @prog: XDP program * @extack: netlink extended ack **/ static int i40e_xdp_setup(struct i40e_vsi *vsi, struct bpf_prog *prog, struct netlink_ext_ack *extack) { int frame_size = i40e_max_vsi_frame_size(vsi, prog); struct i40e_pf *pf = vsi->back; struct bpf_prog *old_prog; bool need_reset; int i; /* Don't allow frames that span over multiple buffers */ if (vsi->netdev->mtu > frame_size - I40E_PACKET_HDR_PAD) { NL_SET_ERR_MSG_MOD(extack, "MTU too large for linear frames and XDP prog does not support frags"); return -EINVAL; } /* When turning XDP on->off/off->on we reset and rebuild the rings. */ need_reset = (i40e_enabled_xdp_vsi(vsi) != !!prog); if (need_reset) i40e_prep_for_reset(pf); /* VSI shall be deleted in a moment, just return EINVAL */ if (test_bit(__I40E_IN_REMOVE, pf->state)) return -EINVAL; old_prog = xchg(&vsi->xdp_prog, prog); if (need_reset) { if (!prog) { xdp_features_clear_redirect_target(vsi->netdev); /* Wait until ndo_xsk_wakeup completes. */ synchronize_rcu(); } i40e_reset_and_rebuild(pf, true, true); } if (!i40e_enabled_xdp_vsi(vsi) && prog) { if (i40e_realloc_rx_bi_zc(vsi, true)) return -ENOMEM; } else if (i40e_enabled_xdp_vsi(vsi) && !prog) { if (i40e_realloc_rx_bi_zc(vsi, false)) return -ENOMEM; } for (i = 0; i < vsi->num_queue_pairs; i++) WRITE_ONCE(vsi->rx_rings[i]->xdp_prog, vsi->xdp_prog); if (old_prog) bpf_prog_put(old_prog); /* Kick start the NAPI context if there is an AF_XDP socket open * on that queue id. This so that receiving will start. */ if (need_reset && prog) { for (i = 0; i < vsi->num_queue_pairs; i++) if (vsi->xdp_rings[i]->xsk_pool) (void)i40e_xsk_wakeup(vsi->netdev, i, XDP_WAKEUP_RX); xdp_features_set_redirect_target(vsi->netdev, true); } return 0; } /** * i40e_enter_busy_conf - Enters busy config state * @vsi: vsi * * Returns 0 on success, <0 for failure. **/ static int i40e_enter_busy_conf(struct i40e_vsi *vsi) { struct i40e_pf *pf = vsi->back; int timeout = 50; while (test_and_set_bit(__I40E_CONFIG_BUSY, pf->state)) { timeout--; if (!timeout) return -EBUSY; usleep_range(1000, 2000); } return 0; } /** * i40e_exit_busy_conf - Exits busy config state * @vsi: vsi **/ static void i40e_exit_busy_conf(struct i40e_vsi *vsi) { struct i40e_pf *pf = vsi->back; clear_bit(__I40E_CONFIG_BUSY, pf->state); } /** * i40e_queue_pair_reset_stats - Resets all statistics for a queue pair * @vsi: vsi * @queue_pair: queue pair **/ static void i40e_queue_pair_reset_stats(struct i40e_vsi *vsi, int queue_pair) { memset(&vsi->rx_rings[queue_pair]->rx_stats, 0, sizeof(vsi->rx_rings[queue_pair]->rx_stats)); memset(&vsi->tx_rings[queue_pair]->stats, 0, sizeof(vsi->tx_rings[queue_pair]->stats)); if (i40e_enabled_xdp_vsi(vsi)) { memset(&vsi->xdp_rings[queue_pair]->stats, 0, sizeof(vsi->xdp_rings[queue_pair]->stats)); } } /** * i40e_queue_pair_clean_rings - Cleans all the rings of a queue pair * @vsi: vsi * @queue_pair: queue pair **/ static void i40e_queue_pair_clean_rings(struct i40e_vsi *vsi, int queue_pair) { i40e_clean_tx_ring(vsi->tx_rings[queue_pair]); if (i40e_enabled_xdp_vsi(vsi)) { /* Make sure that in-progress ndo_xdp_xmit calls are * completed. */ synchronize_rcu(); i40e_clean_tx_ring(vsi->xdp_rings[queue_pair]); } i40e_clean_rx_ring(vsi->rx_rings[queue_pair]); } /** * i40e_queue_pair_toggle_napi - Enables/disables NAPI for a queue pair * @vsi: vsi * @queue_pair: queue pair * @enable: true for enable, false for disable **/ static void i40e_queue_pair_toggle_napi(struct i40e_vsi *vsi, int queue_pair, bool enable) { struct i40e_ring *rxr = vsi->rx_rings[queue_pair]; struct i40e_q_vector *q_vector = rxr->q_vector; if (!vsi->netdev) return; /* All rings in a qp belong to the same qvector. */ if (q_vector->rx.ring || q_vector->tx.ring) { if (enable) napi_enable(&q_vector->napi); else napi_disable(&q_vector->napi); } } /** * i40e_queue_pair_toggle_rings - Enables/disables all rings for a queue pair * @vsi: vsi * @queue_pair: queue pair * @enable: true for enable, false for disable * * Returns 0 on success, <0 on failure. **/ static int i40e_queue_pair_toggle_rings(struct i40e_vsi *vsi, int queue_pair, bool enable) { struct i40e_pf *pf = vsi->back; int pf_q, ret = 0; pf_q = vsi->base_queue + queue_pair; ret = i40e_control_wait_tx_q(vsi->seid, pf, pf_q, false /*is xdp*/, enable); if (ret) { dev_info(&pf->pdev->dev, "VSI seid %d Tx ring %d %sable timeout\n", vsi->seid, pf_q, (enable ? "en" : "dis")); return ret; } i40e_control_rx_q(pf, pf_q, enable); ret = i40e_pf_rxq_wait(pf, pf_q, enable); if (ret) { dev_info(&pf->pdev->dev, "VSI seid %d Rx ring %d %sable timeout\n", vsi->seid, pf_q, (enable ? "en" : "dis")); return ret; } /* Due to HW errata, on Rx disable only, the register can * indicate done before it really is. Needs 50ms to be sure */ if (!enable) mdelay(50); if (!i40e_enabled_xdp_vsi(vsi)) return ret; ret = i40e_control_wait_tx_q(vsi->seid, pf, pf_q + vsi->alloc_queue_pairs, true /*is xdp*/, enable); if (ret) { dev_info(&pf->pdev->dev, "VSI seid %d XDP Tx ring %d %sable timeout\n", vsi->seid, pf_q, (enable ? "en" : "dis")); } return ret; } /** * i40e_queue_pair_enable_irq - Enables interrupts for a queue pair * @vsi: vsi * @queue_pair: queue_pair **/ static void i40e_queue_pair_enable_irq(struct i40e_vsi *vsi, int queue_pair) { struct i40e_ring *rxr = vsi->rx_rings[queue_pair]; struct i40e_pf *pf = vsi->back; struct i40e_hw *hw = &pf->hw; /* All rings in a qp belong to the same qvector. */ if (pf->flags & I40E_FLAG_MSIX_ENABLED) i40e_irq_dynamic_enable(vsi, rxr->q_vector->v_idx); else i40e_irq_dynamic_enable_icr0(pf); i40e_flush(hw); } /** * i40e_queue_pair_disable_irq - Disables interrupts for a queue pair * @vsi: vsi * @queue_pair: queue_pair **/ static void i40e_queue_pair_disable_irq(struct i40e_vsi *vsi, int queue_pair) { struct i40e_ring *rxr = vsi->rx_rings[queue_pair]; struct i40e_pf *pf = vsi->back; struct i40e_hw *hw = &pf->hw; /* For simplicity, instead of removing the qp interrupt causes * from the interrupt linked list, we simply disable the interrupt, and * leave the list intact. * * All rings in a qp belong to the same qvector. */ if (pf->flags & I40E_FLAG_MSIX_ENABLED) { u32 intpf = vsi->base_vector + rxr->q_vector->v_idx; wr32(hw, I40E_PFINT_DYN_CTLN(intpf - 1), 0); i40e_flush(hw); synchronize_irq(pf->msix_entries[intpf].vector); } else { /* Legacy and MSI mode - this stops all interrupt handling */ wr32(hw, I40E_PFINT_ICR0_ENA, 0); wr32(hw, I40E_PFINT_DYN_CTL0, 0); i40e_flush(hw); synchronize_irq(pf->pdev->irq); } } /** * i40e_queue_pair_disable - Disables a queue pair * @vsi: vsi * @queue_pair: queue pair * * Returns 0 on success, <0 on failure. **/ int i40e_queue_pair_disable(struct i40e_vsi *vsi, int queue_pair) { int err; err = i40e_enter_busy_conf(vsi); if (err) return err; i40e_queue_pair_disable_irq(vsi, queue_pair); err = i40e_queue_pair_toggle_rings(vsi, queue_pair, false /* off */); i40e_clean_rx_ring(vsi->rx_rings[queue_pair]); i40e_queue_pair_toggle_napi(vsi, queue_pair, false /* off */); i40e_queue_pair_clean_rings(vsi, queue_pair); i40e_queue_pair_reset_stats(vsi, queue_pair); return err; } /** * i40e_queue_pair_enable - Enables a queue pair * @vsi: vsi * @queue_pair: queue pair * * Returns 0 on success, <0 on failure. **/ int i40e_queue_pair_enable(struct i40e_vsi *vsi, int queue_pair) { int err; err = i40e_configure_tx_ring(vsi->tx_rings[queue_pair]); if (err) return err; if (i40e_enabled_xdp_vsi(vsi)) { err = i40e_configure_tx_ring(vsi->xdp_rings[queue_pair]); if (err) return err; } err = i40e_configure_rx_ring(vsi->rx_rings[queue_pair]); if (err) return err; err = i40e_queue_pair_toggle_rings(vsi, queue_pair, true /* on */); i40e_queue_pair_toggle_napi(vsi, queue_pair, true /* on */); i40e_queue_pair_enable_irq(vsi, queue_pair); i40e_exit_busy_conf(vsi); return err; } /** * i40e_xdp - implements ndo_bpf for i40e * @dev: netdevice * @xdp: XDP command **/ static int i40e_xdp(struct net_device *dev, struct netdev_bpf *xdp) { struct i40e_netdev_priv *np = netdev_priv(dev); struct i40e_vsi *vsi = np->vsi; if (vsi->type != I40E_VSI_MAIN) return -EINVAL; switch (xdp->command) { case XDP_SETUP_PROG: return i40e_xdp_setup(vsi, xdp->prog, xdp->extack); case XDP_SETUP_XSK_POOL: return i40e_xsk_pool_setup(vsi, xdp->xsk.pool, xdp->xsk.queue_id); default: return -EINVAL; } } static const struct net_device_ops i40e_netdev_ops = { .ndo_open = i40e_open, .ndo_stop = i40e_close, .ndo_start_xmit = i40e_lan_xmit_frame, .ndo_get_stats64 = i40e_get_netdev_stats_struct, .ndo_set_rx_mode = i40e_set_rx_mode, .ndo_validate_addr = eth_validate_addr, .ndo_set_mac_address = i40e_set_mac, .ndo_change_mtu = i40e_change_mtu, .ndo_eth_ioctl = i40e_ioctl, .ndo_tx_timeout = i40e_tx_timeout, .ndo_vlan_rx_add_vid = i40e_vlan_rx_add_vid, .ndo_vlan_rx_kill_vid = i40e_vlan_rx_kill_vid, #ifdef CONFIG_NET_POLL_CONTROLLER .ndo_poll_controller = i40e_netpoll, #endif .ndo_setup_tc = __i40e_setup_tc, .ndo_select_queue = i40e_lan_select_queue, .ndo_set_features = i40e_set_features, .ndo_set_vf_mac = i40e_ndo_set_vf_mac, .ndo_set_vf_vlan = i40e_ndo_set_vf_port_vlan, .ndo_get_vf_stats = i40e_get_vf_stats, .ndo_set_vf_rate = i40e_ndo_set_vf_bw, .ndo_get_vf_config = i40e_ndo_get_vf_config, .ndo_set_vf_link_state = i40e_ndo_set_vf_link_state, .ndo_set_vf_spoofchk = i40e_ndo_set_vf_spoofchk, .ndo_set_vf_trust = i40e_ndo_set_vf_trust, .ndo_get_phys_port_id = i40e_get_phys_port_id, .ndo_fdb_add = i40e_ndo_fdb_add, .ndo_features_check = i40e_features_check, .ndo_bridge_getlink = i40e_ndo_bridge_getlink, .ndo_bridge_setlink = i40e_ndo_bridge_setlink, .ndo_bpf = i40e_xdp, .ndo_xdp_xmit = i40e_xdp_xmit, .ndo_xsk_wakeup = i40e_xsk_wakeup, .ndo_dfwd_add_station = i40e_fwd_add, .ndo_dfwd_del_station = i40e_fwd_del, }; /** * i40e_config_netdev - Setup the netdev flags * @vsi: the VSI being configured * * Returns 0 on success, negative value on failure **/ static int i40e_config_netdev(struct i40e_vsi *vsi) { struct i40e_pf *pf = vsi->back; struct i40e_hw *hw = &pf->hw; struct i40e_netdev_priv *np; struct net_device *netdev; u8 broadcast[ETH_ALEN]; u8 mac_addr[ETH_ALEN]; int etherdev_size; netdev_features_t hw_enc_features; netdev_features_t hw_features; etherdev_size = sizeof(struct i40e_netdev_priv); netdev = alloc_etherdev_mq(etherdev_size, vsi->alloc_queue_pairs); if (!netdev) return -ENOMEM; vsi->netdev = netdev; np = netdev_priv(netdev); np->vsi = vsi; hw_enc_features = NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_HIGHDMA | NETIF_F_SOFT_FEATURES | NETIF_F_TSO | NETIF_F_TSO_ECN | NETIF_F_TSO6 | NETIF_F_GSO_GRE | NETIF_F_GSO_GRE_CSUM | NETIF_F_GSO_PARTIAL | NETIF_F_GSO_IPXIP4 | NETIF_F_GSO_IPXIP6 | NETIF_F_GSO_UDP_TUNNEL | NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_GSO_UDP_L4 | NETIF_F_SCTP_CRC | NETIF_F_RXHASH | NETIF_F_RXCSUM | 0; if (!(pf->hw_features & I40E_HW_OUTER_UDP_CSUM_CAPABLE)) netdev->gso_partial_features |= NETIF_F_GSO_UDP_TUNNEL_CSUM; netdev->udp_tunnel_nic_info = &pf->udp_tunnel_nic; netdev->gso_partial_features |= NETIF_F_GSO_GRE_CSUM; netdev->hw_enc_features |= hw_enc_features; /* record features VLANs can make use of */ netdev->vlan_features |= hw_enc_features | NETIF_F_TSO_MANGLEID; #define I40E_GSO_PARTIAL_FEATURES (NETIF_F_GSO_GRE | \ NETIF_F_GSO_GRE_CSUM | \ NETIF_F_GSO_IPXIP4 | \ NETIF_F_GSO_IPXIP6 | \ NETIF_F_GSO_UDP_TUNNEL | \ NETIF_F_GSO_UDP_TUNNEL_CSUM) netdev->gso_partial_features = I40E_GSO_PARTIAL_FEATURES; netdev->features |= NETIF_F_GSO_PARTIAL | I40E_GSO_PARTIAL_FEATURES; netdev->mpls_features |= NETIF_F_SG; netdev->mpls_features |= NETIF_F_HW_CSUM; netdev->mpls_features |= NETIF_F_TSO; netdev->mpls_features |= NETIF_F_TSO6; netdev->mpls_features |= I40E_GSO_PARTIAL_FEATURES; /* enable macvlan offloads */ netdev->hw_features |= NETIF_F_HW_L2FW_DOFFLOAD; hw_features = hw_enc_features | NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX; if (!(pf->flags & I40E_FLAG_MFP_ENABLED)) hw_features |= NETIF_F_NTUPLE | NETIF_F_HW_TC; netdev->hw_features |= hw_features | NETIF_F_LOOPBACK; netdev->features |= hw_features | NETIF_F_HW_VLAN_CTAG_FILTER; netdev->hw_enc_features |= NETIF_F_TSO_MANGLEID; netdev->features &= ~NETIF_F_HW_TC; if (vsi->type == I40E_VSI_MAIN) { SET_NETDEV_DEV(netdev, &pf->pdev->dev); ether_addr_copy(mac_addr, hw->mac.perm_addr); /* The following steps are necessary for two reasons. First, * some older NVM configurations load a default MAC-VLAN * filter that will accept any tagged packet, and we want to * replace this with a normal filter. Additionally, it is * possible our MAC address was provided by the platform using * Open Firmware or similar. * * Thus, we need to remove the default filter and install one * specific to the MAC address. */ i40e_rm_default_mac_filter(vsi, mac_addr); spin_lock_bh(&vsi->mac_filter_hash_lock); i40e_add_mac_filter(vsi, mac_addr); spin_unlock_bh(&vsi->mac_filter_hash_lock); netdev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT | NETDEV_XDP_ACT_XSK_ZEROCOPY | NETDEV_XDP_ACT_RX_SG; netdev->xdp_zc_max_segs = I40E_MAX_BUFFER_TXD; } else { /* Relate the VSI_VMDQ name to the VSI_MAIN name. Note that we * are still limited by IFNAMSIZ, but we're adding 'v%d\0' to * the end, which is 4 bytes long, so force truncation of the * original name by IFNAMSIZ - 4 */ snprintf(netdev->name, IFNAMSIZ, "%.*sv%%d", IFNAMSIZ - 4, pf->vsi[pf->lan_vsi]->netdev->name); eth_random_addr(mac_addr); spin_lock_bh(&vsi->mac_filter_hash_lock); i40e_add_mac_filter(vsi, mac_addr); spin_unlock_bh(&vsi->mac_filter_hash_lock); } /* Add the broadcast filter so that we initially will receive * broadcast packets. Note that when a new VLAN is first added the * driver will convert all filters marked I40E_VLAN_ANY into VLAN * specific filters as part of transitioning into "vlan" operation. * When more VLANs are added, the driver will copy each existing MAC * filter and add it for the new VLAN. * * Broadcast filters are handled specially by * i40e_sync_filters_subtask, as the driver must to set the broadcast * promiscuous bit instead of adding this directly as a MAC/VLAN * filter. The subtask will update the correct broadcast promiscuous * bits as VLANs become active or inactive. */ eth_broadcast_addr(broadcast); spin_lock_bh(&vsi->mac_filter_hash_lock); i40e_add_mac_filter(vsi, broadcast); spin_unlock_bh(&vsi->mac_filter_hash_lock); eth_hw_addr_set(netdev, mac_addr); ether_addr_copy(netdev->perm_addr, mac_addr); /* i40iw_net_event() reads 16 bytes from neigh->primary_key */ netdev->neigh_priv_len = sizeof(u32) * 4; netdev->priv_flags |= IFF_UNICAST_FLT; netdev->priv_flags |= IFF_SUPP_NOFCS; /* Setup netdev TC information */ i40e_vsi_config_netdev_tc(vsi, vsi->tc_config.enabled_tc); netdev->netdev_ops = &i40e_netdev_ops; netdev->watchdog_timeo = 5 * HZ; i40e_set_ethtool_ops(netdev); /* MTU range: 68 - 9706 */ netdev->min_mtu = ETH_MIN_MTU; netdev->max_mtu = I40E_MAX_RXBUFFER - I40E_PACKET_HDR_PAD; return 0; } /** * i40e_vsi_delete - Delete a VSI from the switch * @vsi: the VSI being removed * * Returns 0 on success, negative value on failure **/ static void i40e_vsi_delete(struct i40e_vsi *vsi) { /* remove default VSI is not allowed */ if (vsi == vsi->back->vsi[vsi->back->lan_vsi]) return; i40e_aq_delete_element(&vsi->back->hw, vsi->seid, NULL); } /** * i40e_is_vsi_uplink_mode_veb - Check if the VSI's uplink bridge mode is VEB * @vsi: the VSI being queried * * Returns 1 if HW bridge mode is VEB and return 0 in case of VEPA mode **/ int i40e_is_vsi_uplink_mode_veb(struct i40e_vsi *vsi) { struct i40e_veb *veb; struct i40e_pf *pf = vsi->back; /* Uplink is not a bridge so default to VEB */ if (vsi->veb_idx >= I40E_MAX_VEB) return 1; veb = pf->veb[vsi->veb_idx]; if (!veb) { dev_info(&pf->pdev->dev, "There is no veb associated with the bridge\n"); return -ENOENT; } /* Uplink is a bridge in VEPA mode */ if (veb->bridge_mode & BRIDGE_MODE_VEPA) { return 0; } else { /* Uplink is a bridge in VEB mode */ return 1; } /* VEPA is now default bridge, so return 0 */ return 0; } /** * i40e_add_vsi - Add a VSI to the switch * @vsi: the VSI being configured * * This initializes a VSI context depending on the VSI type to be added and * passes it down to the add_vsi aq command. **/ static int i40e_add_vsi(struct i40e_vsi *vsi) { int ret = -ENODEV; struct i40e_pf *pf = vsi->back; struct i40e_hw *hw = &pf->hw; struct i40e_vsi_context ctxt; struct i40e_mac_filter *f; struct hlist_node *h; int bkt; u8 enabled_tc = 0x1; /* TC0 enabled */ int f_count = 0; memset(&ctxt, 0, sizeof(ctxt)); switch (vsi->type) { case I40E_VSI_MAIN: /* The PF's main VSI is already setup as part of the * device initialization, so we'll not bother with * the add_vsi call, but we will retrieve the current * VSI context. */ ctxt.seid = pf->main_vsi_seid; ctxt.pf_num = pf->hw.pf_id; ctxt.vf_num = 0; ret = i40e_aq_get_vsi_params(&pf->hw, &ctxt, NULL); ctxt.flags = I40E_AQ_VSI_TYPE_PF; if (ret) { dev_info(&pf->pdev->dev, "couldn't get PF vsi config, err %pe aq_err %s\n", ERR_PTR(ret), i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); return -ENOENT; } vsi->info = ctxt.info; vsi->info.valid_sections = 0; vsi->seid = ctxt.seid; vsi->id = ctxt.vsi_number; enabled_tc = i40e_pf_get_tc_map(pf); /* Source pruning is enabled by default, so the flag is * negative logic - if it's set, we need to fiddle with * the VSI to disable source pruning. */ if (pf->flags & I40E_FLAG_SOURCE_PRUNING_DISABLED) { memset(&ctxt, 0, sizeof(ctxt)); ctxt.seid = pf->main_vsi_seid; ctxt.pf_num = pf->hw.pf_id; ctxt.vf_num = 0; ctxt.info.valid_sections |= cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID); ctxt.info.switch_id = cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_LOCAL_LB); ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL); if (ret) { dev_info(&pf->pdev->dev, "update vsi failed, err %d aq_err %s\n", ret, i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); ret = -ENOENT; goto err; } } /* MFP mode setup queue map and update VSI */ if ((pf->flags & I40E_FLAG_MFP_ENABLED) && !(pf->hw.func_caps.iscsi)) { /* NIC type PF */ memset(&ctxt, 0, sizeof(ctxt)); ctxt.seid = pf->main_vsi_seid; ctxt.pf_num = pf->hw.pf_id; ctxt.vf_num = 0; i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, false); ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL); if (ret) { dev_info(&pf->pdev->dev, "update vsi failed, err %pe aq_err %s\n", ERR_PTR(ret), i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); ret = -ENOENT; goto err; } /* update the local VSI info queue map */ i40e_vsi_update_queue_map(vsi, &ctxt); vsi->info.valid_sections = 0; } else { /* Default/Main VSI is only enabled for TC0 * reconfigure it to enable all TCs that are * available on the port in SFP mode. * For MFP case the iSCSI PF would use this * flow to enable LAN+iSCSI TC. */ ret = i40e_vsi_config_tc(vsi, enabled_tc); if (ret) { /* Single TC condition is not fatal, * message and continue */ dev_info(&pf->pdev->dev, "failed to configure TCs for main VSI tc_map 0x%08x, err %pe aq_err %s\n", enabled_tc, ERR_PTR(ret), i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); } } break; case I40E_VSI_FDIR: ctxt.pf_num = hw->pf_id; ctxt.vf_num = 0; ctxt.uplink_seid = vsi->uplink_seid; ctxt.connection_type = I40E_AQ_VSI_CONN_TYPE_NORMAL; ctxt.flags = I40E_AQ_VSI_TYPE_PF; if ((pf->flags & I40E_FLAG_VEB_MODE_ENABLED) && (i40e_is_vsi_uplink_mode_veb(vsi))) { ctxt.info.valid_sections |= cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID); ctxt.info.switch_id = cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB); } i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, true); break; case I40E_VSI_VMDQ2: ctxt.pf_num = hw->pf_id; ctxt.vf_num = 0; ctxt.uplink_seid = vsi->uplink_seid; ctxt.connection_type = I40E_AQ_VSI_CONN_TYPE_NORMAL; ctxt.flags = I40E_AQ_VSI_TYPE_VMDQ2; /* This VSI is connected to VEB so the switch_id * should be set to zero by default. */ if (i40e_is_vsi_uplink_mode_veb(vsi)) { ctxt.info.valid_sections |= cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID); ctxt.info.switch_id = cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB); } /* Setup the VSI tx/rx queue map for TC0 only for now */ i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, true); break; case I40E_VSI_SRIOV: ctxt.pf_num = hw->pf_id; ctxt.vf_num = vsi->vf_id + hw->func_caps.vf_base_id; ctxt.uplink_seid = vsi->uplink_seid; ctxt.connection_type = I40E_AQ_VSI_CONN_TYPE_NORMAL; ctxt.flags = I40E_AQ_VSI_TYPE_VF; /* This VSI is connected to VEB so the switch_id * should be set to zero by default. */ if (i40e_is_vsi_uplink_mode_veb(vsi)) { ctxt.info.valid_sections |= cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID); ctxt.info.switch_id = cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB); } if (vsi->back->flags & I40E_FLAG_IWARP_ENABLED) { ctxt.info.valid_sections |= cpu_to_le16(I40E_AQ_VSI_PROP_QUEUE_OPT_VALID); ctxt.info.queueing_opt_flags |= (I40E_AQ_VSI_QUE_OPT_TCP_ENA | I40E_AQ_VSI_QUE_OPT_RSS_LUT_VSI); } ctxt.info.valid_sections |= cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID); ctxt.info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_MODE_ALL; if (pf->vf[vsi->vf_id].spoofchk) { ctxt.info.valid_sections |= cpu_to_le16(I40E_AQ_VSI_PROP_SECURITY_VALID); ctxt.info.sec_flags |= (I40E_AQ_VSI_SEC_FLAG_ENABLE_VLAN_CHK | I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK); } /* Setup the VSI tx/rx queue map for TC0 only for now */ i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, true); break; case I40E_VSI_IWARP: /* send down message to iWARP */ break; default: return -ENODEV; } if (vsi->type != I40E_VSI_MAIN) { ret = i40e_aq_add_vsi(hw, &ctxt, NULL); if (ret) { dev_info(&vsi->back->pdev->dev, "add vsi failed, err %pe aq_err %s\n", ERR_PTR(ret), i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); ret = -ENOENT; goto err; } vsi->info = ctxt.info; vsi->info.valid_sections = 0; vsi->seid = ctxt.seid; vsi->id = ctxt.vsi_number; } spin_lock_bh(&vsi->mac_filter_hash_lock); vsi->active_filters = 0; /* If macvlan filters already exist, force them to get loaded */ hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) { f->state = I40E_FILTER_NEW; f_count++; } spin_unlock_bh(&vsi->mac_filter_hash_lock); clear_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state); if (f_count) { vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED; set_bit(__I40E_MACVLAN_SYNC_PENDING, pf->state); } /* Update VSI BW information */ ret = i40e_vsi_get_bw_info(vsi); if (ret) { dev_info(&pf->pdev->dev, "couldn't get vsi bw info, err %pe aq_err %s\n", ERR_PTR(ret), i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); /* VSI is already added so not tearing that up */ ret = 0; } err: return ret; } /** * i40e_vsi_release - Delete a VSI and free its resources * @vsi: the VSI being removed * * Returns 0 on success or < 0 on error **/ int i40e_vsi_release(struct i40e_vsi *vsi) { struct i40e_mac_filter *f; struct hlist_node *h; struct i40e_veb *veb = NULL; struct i40e_pf *pf; u16 uplink_seid; int i, n, bkt; pf = vsi->back; /* release of a VEB-owner or last VSI is not allowed */ if (vsi->flags & I40E_VSI_FLAG_VEB_OWNER) { dev_info(&pf->pdev->dev, "VSI %d has existing VEB %d\n", vsi->seid, vsi->uplink_seid); return -ENODEV; } if (vsi == pf->vsi[pf->lan_vsi] && !test_bit(__I40E_DOWN, pf->state)) { dev_info(&pf->pdev->dev, "Can't remove PF VSI\n"); return -ENODEV; } set_bit(__I40E_VSI_RELEASING, vsi->state); uplink_seid = vsi->uplink_seid; if (vsi->type != I40E_VSI_SRIOV) { if (vsi->netdev_registered) { vsi->netdev_registered = false; if (vsi->netdev) { /* results in a call to i40e_close() */ unregister_netdev(vsi->netdev); } } else { i40e_vsi_close(vsi); } i40e_vsi_disable_irq(vsi); } spin_lock_bh(&vsi->mac_filter_hash_lock); /* clear the sync flag on all filters */ if (vsi->netdev) { __dev_uc_unsync(vsi->netdev, NULL); __dev_mc_unsync(vsi->netdev, NULL); } /* make sure any remaining filters are marked for deletion */ hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) __i40e_del_filter(vsi, f); spin_unlock_bh(&vsi->mac_filter_hash_lock); i40e_sync_vsi_filters(vsi); i40e_vsi_delete(vsi); i40e_vsi_free_q_vectors(vsi); if (vsi->netdev) { free_netdev(vsi->netdev); vsi->netdev = NULL; } i40e_vsi_clear_rings(vsi); i40e_vsi_clear(vsi); /* If this was the last thing on the VEB, except for the * controlling VSI, remove the VEB, which puts the controlling * VSI onto the next level down in the switch. * * Well, okay, there's one more exception here: don't remove * the orphan VEBs yet. We'll wait for an explicit remove request * from up the network stack. */ for (n = 0, i = 0; i < pf->num_alloc_vsi; i++) { if (pf->vsi[i] && pf->vsi[i]->uplink_seid == uplink_seid && (pf->vsi[i]->flags & I40E_VSI_FLAG_VEB_OWNER) == 0) { n++; /* count the VSIs */ } } for (i = 0; i < I40E_MAX_VEB; i++) { if (!pf->veb[i]) continue; if (pf->veb[i]->uplink_seid == uplink_seid) n++; /* count the VEBs */ if (pf->veb[i]->seid == uplink_seid) veb = pf->veb[i]; } if (n == 0 && veb && veb->uplink_seid != 0) i40e_veb_release(veb); return 0; } /** * i40e_vsi_setup_vectors - Set up the q_vectors for the given VSI * @vsi: ptr to the VSI * * This should only be called after i40e_vsi_mem_alloc() which allocates the * corresponding SW VSI structure and initializes num_queue_pairs for the * newly allocated VSI. * * Returns 0 on success or negative on failure **/ static int i40e_vsi_setup_vectors(struct i40e_vsi *vsi) { int ret = -ENOENT; struct i40e_pf *pf = vsi->back; if (vsi->q_vectors[0]) { dev_info(&pf->pdev->dev, "VSI %d has existing q_vectors\n", vsi->seid); return -EEXIST; } if (vsi->base_vector) { dev_info(&pf->pdev->dev, "VSI %d has non-zero base vector %d\n", vsi->seid, vsi->base_vector); return -EEXIST; } ret = i40e_vsi_alloc_q_vectors(vsi); if (ret) { dev_info(&pf->pdev->dev, "failed to allocate %d q_vector for VSI %d, ret=%d\n", vsi->num_q_vectors, vsi->seid, ret); vsi->num_q_vectors = 0; goto vector_setup_out; } /* In Legacy mode, we do not have to get any other vector since we * piggyback on the misc/ICR0 for queue interrupts. */ if (!(pf->flags & I40E_FLAG_MSIX_ENABLED)) return ret; if (vsi->num_q_vectors) vsi->base_vector = i40e_get_lump(pf, pf->irq_pile, vsi->num_q_vectors, vsi->idx); if (vsi->base_vector < 0) { dev_info(&pf->pdev->dev, "failed to get tracking for %d vectors for VSI %d, err=%d\n", vsi->num_q_vectors, vsi->seid, vsi->base_vector); i40e_vsi_free_q_vectors(vsi); ret = -ENOENT; goto vector_setup_out; } vector_setup_out: return ret; } /** * i40e_vsi_reinit_setup - return and reallocate resources for a VSI * @vsi: pointer to the vsi. * * This re-allocates a vsi's queue resources. * * Returns pointer to the successfully allocated and configured VSI sw struct * on success, otherwise returns NULL on failure. **/ static struct i40e_vsi *i40e_vsi_reinit_setup(struct i40e_vsi *vsi) { u16 alloc_queue_pairs; struct i40e_pf *pf; u8 enabled_tc; int ret; if (!vsi) return NULL; pf = vsi->back; i40e_put_lump(pf->qp_pile, vsi->base_queue, vsi->idx); i40e_vsi_clear_rings(vsi); i40e_vsi_free_arrays(vsi, false); i40e_set_num_rings_in_vsi(vsi); ret = i40e_vsi_alloc_arrays(vsi, false); if (ret) goto err_vsi; alloc_queue_pairs = vsi->alloc_queue_pairs * (i40e_enabled_xdp_vsi(vsi) ? 2 : 1); ret = i40e_get_lump(pf, pf->qp_pile, alloc_queue_pairs, vsi->idx); if (ret < 0) { dev_info(&pf->pdev->dev, "failed to get tracking for %d queues for VSI %d err %d\n", alloc_queue_pairs, vsi->seid, ret); goto err_vsi; } vsi->base_queue = ret; /* Update the FW view of the VSI. Force a reset of TC and queue * layout configurations. */ enabled_tc = pf->vsi[pf->lan_vsi]->tc_config.enabled_tc; pf->vsi[pf->lan_vsi]->tc_config.enabled_tc = 0; pf->vsi[pf->lan_vsi]->seid = pf->main_vsi_seid; i40e_vsi_config_tc(pf->vsi[pf->lan_vsi], enabled_tc); if (vsi->type == I40E_VSI_MAIN) i40e_rm_default_mac_filter(vsi, pf->hw.mac.perm_addr); /* assign it some queues */ ret = i40e_alloc_rings(vsi); if (ret) goto err_rings; /* map all of the rings to the q_vectors */ i40e_vsi_map_rings_to_vectors(vsi); return vsi; err_rings: i40e_vsi_free_q_vectors(vsi); if (vsi->netdev_registered) { vsi->netdev_registered = false; unregister_netdev(vsi->netdev); free_netdev(vsi->netdev); vsi->netdev = NULL; } i40e_aq_delete_element(&pf->hw, vsi->seid, NULL); err_vsi: i40e_vsi_clear(vsi); return NULL; } /** * i40e_vsi_setup - Set up a VSI by a given type * @pf: board private structure * @type: VSI type * @uplink_seid: the switch element to link to * @param1: usage depends upon VSI type. For VF types, indicates VF id * * This allocates the sw VSI structure and its queue resources, then add a VSI * to the identified VEB. * * Returns pointer to the successfully allocated and configure VSI sw struct on * success, otherwise returns NULL on failure. **/ struct i40e_vsi *i40e_vsi_setup(struct i40e_pf *pf, u8 type, u16 uplink_seid, u32 param1) { struct i40e_vsi *vsi = NULL; struct i40e_veb *veb = NULL; u16 alloc_queue_pairs; int ret, i; int v_idx; /* The requested uplink_seid must be either * - the PF's port seid * no VEB is needed because this is the PF * or this is a Flow Director special case VSI * - seid of an existing VEB * - seid of a VSI that owns an existing VEB * - seid of a VSI that doesn't own a VEB * a new VEB is created and the VSI becomes the owner * - seid of the PF VSI, which is what creates the first VEB * this is a special case of the previous * * Find which uplink_seid we were given and create a new VEB if needed */ for (i = 0; i < I40E_MAX_VEB; i++) { if (pf->veb[i] && pf->veb[i]->seid == uplink_seid) { veb = pf->veb[i]; break; } } if (!veb && uplink_seid != pf->mac_seid) { for (i = 0; i < pf->num_alloc_vsi; i++) { if (pf->vsi[i] && pf->vsi[i]->seid == uplink_seid) { vsi = pf->vsi[i]; break; } } if (!vsi) { dev_info(&pf->pdev->dev, "no such uplink_seid %d\n", uplink_seid); return NULL; } if (vsi->uplink_seid == pf->mac_seid) veb = i40e_veb_setup(pf, 0, pf->mac_seid, vsi->seid, vsi->tc_config.enabled_tc); else if ((vsi->flags & I40E_VSI_FLAG_VEB_OWNER) == 0) veb = i40e_veb_setup(pf, 0, vsi->uplink_seid, vsi->seid, vsi->tc_config.enabled_tc); if (veb) { if (vsi->seid != pf->vsi[pf->lan_vsi]->seid) { dev_info(&vsi->back->pdev->dev, "New VSI creation error, uplink seid of LAN VSI expected.\n"); return NULL; } /* We come up by default in VEPA mode if SRIOV is not * already enabled, in which case we can't force VEPA * mode. */ if (!(pf->flags & I40E_FLAG_VEB_MODE_ENABLED)) { veb->bridge_mode = BRIDGE_MODE_VEPA; pf->flags &= ~I40E_FLAG_VEB_MODE_ENABLED; } i40e_config_bridge_mode(veb); } for (i = 0; i < I40E_MAX_VEB && !veb; i++) { if (pf->veb[i] && pf->veb[i]->seid == vsi->uplink_seid) veb = pf->veb[i]; } if (!veb) { dev_info(&pf->pdev->dev, "couldn't add VEB\n"); return NULL; } vsi->flags |= I40E_VSI_FLAG_VEB_OWNER; uplink_seid = veb->seid; } /* get vsi sw struct */ v_idx = i40e_vsi_mem_alloc(pf, type); if (v_idx < 0) goto err_alloc; vsi = pf->vsi[v_idx]; if (!vsi) goto err_alloc; vsi->type = type; vsi->veb_idx = (veb ? veb->idx : I40E_NO_VEB); if (type == I40E_VSI_MAIN) pf->lan_vsi = v_idx; else if (type == I40E_VSI_SRIOV) vsi->vf_id = param1; /* assign it some queues */ alloc_queue_pairs = vsi->alloc_queue_pairs * (i40e_enabled_xdp_vsi(vsi) ? 2 : 1); ret = i40e_get_lump(pf, pf->qp_pile, alloc_queue_pairs, vsi->idx); if (ret < 0) { dev_info(&pf->pdev->dev, "failed to get tracking for %d queues for VSI %d err=%d\n", alloc_queue_pairs, vsi->seid, ret); goto err_vsi; } vsi->base_queue = ret; /* get a VSI from the hardware */ vsi->uplink_seid = uplink_seid; ret = i40e_add_vsi(vsi); if (ret) goto err_vsi; switch (vsi->type) { /* setup the netdev if needed */ case I40E_VSI_MAIN: case I40E_VSI_VMDQ2: ret = i40e_config_netdev(vsi); if (ret) goto err_netdev; ret = i40e_netif_set_realnum_tx_rx_queues(vsi); if (ret) goto err_netdev; ret = register_netdev(vsi->netdev); if (ret) goto err_netdev; vsi->netdev_registered = true; netif_carrier_off(vsi->netdev); #ifdef CONFIG_I40E_DCB /* Setup DCB netlink interface */ i40e_dcbnl_setup(vsi); #endif /* CONFIG_I40E_DCB */ fallthrough; case I40E_VSI_FDIR: /* set up vectors and rings if needed */ ret = i40e_vsi_setup_vectors(vsi); if (ret) goto err_msix; ret = i40e_alloc_rings(vsi); if (ret) goto err_rings; /* map all of the rings to the q_vectors */ i40e_vsi_map_rings_to_vectors(vsi); i40e_vsi_reset_stats(vsi); break; default: /* no netdev or rings for the other VSI types */ break; } if ((pf->hw_features & I40E_HW_RSS_AQ_CAPABLE) && (vsi->type == I40E_VSI_VMDQ2)) { ret = i40e_vsi_config_rss(vsi); } return vsi; err_rings: i40e_vsi_free_q_vectors(vsi); err_msix: if (vsi->netdev_registered) { vsi->netdev_registered = false; unregister_netdev(vsi->netdev); free_netdev(vsi->netdev); vsi->netdev = NULL; } err_netdev: i40e_aq_delete_element(&pf->hw, vsi->seid, NULL); err_vsi: i40e_vsi_clear(vsi); err_alloc: return NULL; } /** * i40e_veb_get_bw_info - Query VEB BW information * @veb: the veb to query * * Query the Tx scheduler BW configuration data for given VEB **/ static int i40e_veb_get_bw_info(struct i40e_veb *veb) { struct i40e_aqc_query_switching_comp_ets_config_resp ets_data; struct i40e_aqc_query_switching_comp_bw_config_resp bw_data; struct i40e_pf *pf = veb->pf; struct i40e_hw *hw = &pf->hw; u32 tc_bw_max; int ret = 0; int i; ret = i40e_aq_query_switch_comp_bw_config(hw, veb->seid, &bw_data, NULL); if (ret) { dev_info(&pf->pdev->dev, "query veb bw config failed, err %pe aq_err %s\n", ERR_PTR(ret), i40e_aq_str(&pf->hw, hw->aq.asq_last_status)); goto out; } ret = i40e_aq_query_switch_comp_ets_config(hw, veb->seid, &ets_data, NULL); if (ret) { dev_info(&pf->pdev->dev, "query veb bw ets config failed, err %pe aq_err %s\n", ERR_PTR(ret), i40e_aq_str(&pf->hw, hw->aq.asq_last_status)); goto out; } veb->bw_limit = le16_to_cpu(ets_data.port_bw_limit); veb->bw_max_quanta = ets_data.tc_bw_max; veb->is_abs_credits = bw_data.absolute_credits_enable; veb->enabled_tc = ets_data.tc_valid_bits; tc_bw_max = le16_to_cpu(bw_data.tc_bw_max[0]) | (le16_to_cpu(bw_data.tc_bw_max[1]) << 16); for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { veb->bw_tc_share_credits[i] = bw_data.tc_bw_share_credits[i]; veb->bw_tc_limit_credits[i] = le16_to_cpu(bw_data.tc_bw_limits[i]); veb->bw_tc_max_quanta[i] = ((tc_bw_max >> (i*4)) & 0x7); } out: return ret; } /** * i40e_veb_mem_alloc - Allocates the next available struct veb in the PF * @pf: board private structure * * On error: returns error code (negative) * On success: returns vsi index in PF (positive) **/ static int i40e_veb_mem_alloc(struct i40e_pf *pf) { int ret = -ENOENT; struct i40e_veb *veb; int i; /* Need to protect the allocation of switch elements at the PF level */ mutex_lock(&pf->switch_mutex); /* VEB list may be fragmented if VEB creation/destruction has * been happening. We can afford to do a quick scan to look * for any free slots in the list. * * find next empty veb slot, looping back around if necessary */ i = 0; while ((i < I40E_MAX_VEB) && (pf->veb[i] != NULL)) i++; if (i >= I40E_MAX_VEB) { ret = -ENOMEM; goto err_alloc_veb; /* out of VEB slots! */ } veb = kzalloc(sizeof(*veb), GFP_KERNEL); if (!veb) { ret = -ENOMEM; goto err_alloc_veb; } veb->pf = pf; veb->idx = i; veb->enabled_tc = 1; pf->veb[i] = veb; ret = i; err_alloc_veb: mutex_unlock(&pf->switch_mutex); return ret; } /** * i40e_switch_branch_release - Delete a branch of the switch tree * @branch: where to start deleting * * This uses recursion to find the tips of the branch to be * removed, deleting until we get back to and can delete this VEB. **/ static void i40e_switch_branch_release(struct i40e_veb *branch) { struct i40e_pf *pf = branch->pf; u16 branch_seid = branch->seid; u16 veb_idx = branch->idx; int i; /* release any VEBs on this VEB - RECURSION */ for (i = 0; i < I40E_MAX_VEB; i++) { if (!pf->veb[i]) continue; if (pf->veb[i]->uplink_seid == branch->seid) i40e_switch_branch_release(pf->veb[i]); } /* Release the VSIs on this VEB, but not the owner VSI. * * NOTE: Removing the last VSI on a VEB has the SIDE EFFECT of removing * the VEB itself, so don't use (*branch) after this loop. */ for (i = 0; i < pf->num_alloc_vsi; i++) { if (!pf->vsi[i]) continue; if (pf->vsi[i]->uplink_seid == branch_seid && (pf->vsi[i]->flags & I40E_VSI_FLAG_VEB_OWNER) == 0) { i40e_vsi_release(pf->vsi[i]); } } /* There's one corner case where the VEB might not have been * removed, so double check it here and remove it if needed. * This case happens if the veb was created from the debugfs * commands and no VSIs were added to it. */ if (pf->veb[veb_idx]) i40e_veb_release(pf->veb[veb_idx]); } /** * i40e_veb_clear - remove veb struct * @veb: the veb to remove **/ static void i40e_veb_clear(struct i40e_veb *veb) { if (!veb) return; if (veb->pf) { struct i40e_pf *pf = veb->pf; mutex_lock(&pf->switch_mutex); if (pf->veb[veb->idx] == veb) pf->veb[veb->idx] = NULL; mutex_unlock(&pf->switch_mutex); } kfree(veb); } /** * i40e_veb_release - Delete a VEB and free its resources * @veb: the VEB being removed **/ void i40e_veb_release(struct i40e_veb *veb) { struct i40e_vsi *vsi = NULL; struct i40e_pf *pf; int i, n = 0; pf = veb->pf; /* find the remaining VSI and check for extras */ for (i = 0; i < pf->num_alloc_vsi; i++) { if (pf->vsi[i] && pf->vsi[i]->uplink_seid == veb->seid) { n++; vsi = pf->vsi[i]; } } if (n != 1) { dev_info(&pf->pdev->dev, "can't remove VEB %d with %d VSIs left\n", veb->seid, n); return; } /* move the remaining VSI to uplink veb */ vsi->flags &= ~I40E_VSI_FLAG_VEB_OWNER; if (veb->uplink_seid) { vsi->uplink_seid = veb->uplink_seid; if (veb->uplink_seid == pf->mac_seid) vsi->veb_idx = I40E_NO_VEB; else vsi->veb_idx = veb->veb_idx; } else { /* floating VEB */ vsi->uplink_seid = pf->vsi[pf->lan_vsi]->uplink_seid; vsi->veb_idx = pf->vsi[pf->lan_vsi]->veb_idx; } i40e_aq_delete_element(&pf->hw, veb->seid, NULL); i40e_veb_clear(veb); } /** * i40e_add_veb - create the VEB in the switch * @veb: the VEB to be instantiated * @vsi: the controlling VSI **/ static int i40e_add_veb(struct i40e_veb *veb, struct i40e_vsi *vsi) { struct i40e_pf *pf = veb->pf; bool enable_stats = !!(pf->flags & I40E_FLAG_VEB_STATS_ENABLED); int ret; ret = i40e_aq_add_veb(&pf->hw, veb->uplink_seid, vsi->seid, veb->enabled_tc, false, &veb->seid, enable_stats, NULL); /* get a VEB from the hardware */ if (ret) { dev_info(&pf->pdev->dev, "couldn't add VEB, err %pe aq_err %s\n", ERR_PTR(ret), i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); return -EPERM; } /* get statistics counter */ ret = i40e_aq_get_veb_parameters(&pf->hw, veb->seid, NULL, NULL, &veb->stats_idx, NULL, NULL, NULL); if (ret) { dev_info(&pf->pdev->dev, "couldn't get VEB statistics idx, err %pe aq_err %s\n", ERR_PTR(ret), i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); return -EPERM; } ret = i40e_veb_get_bw_info(veb); if (ret) { dev_info(&pf->pdev->dev, "couldn't get VEB bw info, err %pe aq_err %s\n", ERR_PTR(ret), i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); i40e_aq_delete_element(&pf->hw, veb->seid, NULL); return -ENOENT; } vsi->uplink_seid = veb->seid; vsi->veb_idx = veb->idx; vsi->flags |= I40E_VSI_FLAG_VEB_OWNER; return 0; } /** * i40e_veb_setup - Set up a VEB * @pf: board private structure * @flags: VEB setup flags * @uplink_seid: the switch element to link to * @vsi_seid: the initial VSI seid * @enabled_tc: Enabled TC bit-map * * This allocates the sw VEB structure and links it into the switch * It is possible and legal for this to be a duplicate of an already * existing VEB. It is also possible for both uplink and vsi seids * to be zero, in order to create a floating VEB. * * Returns pointer to the successfully allocated VEB sw struct on * success, otherwise returns NULL on failure. **/ struct i40e_veb *i40e_veb_setup(struct i40e_pf *pf, u16 flags, u16 uplink_seid, u16 vsi_seid, u8 enabled_tc) { struct i40e_veb *veb, *uplink_veb = NULL; int vsi_idx, veb_idx; int ret; /* if one seid is 0, the other must be 0 to create a floating relay */ if ((uplink_seid == 0 || vsi_seid == 0) && (uplink_seid + vsi_seid != 0)) { dev_info(&pf->pdev->dev, "one, not both seid's are 0: uplink=%d vsi=%d\n", uplink_seid, vsi_seid); return NULL; } /* make sure there is such a vsi and uplink */ for (vsi_idx = 0; vsi_idx < pf->num_alloc_vsi; vsi_idx++) if (pf->vsi[vsi_idx] && pf->vsi[vsi_idx]->seid == vsi_seid) break; if (vsi_idx == pf->num_alloc_vsi && vsi_seid != 0) { dev_info(&pf->pdev->dev, "vsi seid %d not found\n", vsi_seid); return NULL; } if (uplink_seid && uplink_seid != pf->mac_seid) { for (veb_idx = 0; veb_idx < I40E_MAX_VEB; veb_idx++) { if (pf->veb[veb_idx] && pf->veb[veb_idx]->seid == uplink_seid) { uplink_veb = pf->veb[veb_idx]; break; } } if (!uplink_veb) { dev_info(&pf->pdev->dev, "uplink seid %d not found\n", uplink_seid); return NULL; } } /* get veb sw struct */ veb_idx = i40e_veb_mem_alloc(pf); if (veb_idx < 0) goto err_alloc; veb = pf->veb[veb_idx]; veb->flags = flags; veb->uplink_seid = uplink_seid; veb->veb_idx = (uplink_veb ? uplink_veb->idx : I40E_NO_VEB); veb->enabled_tc = (enabled_tc ? enabled_tc : 0x1); /* create the VEB in the switch */ ret = i40e_add_veb(veb, pf->vsi[vsi_idx]); if (ret) goto err_veb; if (vsi_idx == pf->lan_vsi) pf->lan_veb = veb->idx; return veb; err_veb: i40e_veb_clear(veb); err_alloc: return NULL; } /** * i40e_setup_pf_switch_element - set PF vars based on switch type * @pf: board private structure * @ele: element we are building info from * @num_reported: total number of elements * @printconfig: should we print the contents * * helper function to assist in extracting a few useful SEID values. **/ static void i40e_setup_pf_switch_element(struct i40e_pf *pf, struct i40e_aqc_switch_config_element_resp *ele, u16 num_reported, bool printconfig) { u16 downlink_seid = le16_to_cpu(ele->downlink_seid); u16 uplink_seid = le16_to_cpu(ele->uplink_seid); u8 element_type = ele->element_type; u16 seid = le16_to_cpu(ele->seid); if (printconfig) dev_info(&pf->pdev->dev, "type=%d seid=%d uplink=%d downlink=%d\n", element_type, seid, uplink_seid, downlink_seid); switch (element_type) { case I40E_SWITCH_ELEMENT_TYPE_MAC: pf->mac_seid = seid; break; case I40E_SWITCH_ELEMENT_TYPE_VEB: /* Main VEB? */ if (uplink_seid != pf->mac_seid) break; if (pf->lan_veb >= I40E_MAX_VEB) { int v; /* find existing or else empty VEB */ for (v = 0; v < I40E_MAX_VEB; v++) { if (pf->veb[v] && (pf->veb[v]->seid == seid)) { pf->lan_veb = v; break; } } if (pf->lan_veb >= I40E_MAX_VEB) { v = i40e_veb_mem_alloc(pf); if (v < 0) break; pf->lan_veb = v; } } if (pf->lan_veb >= I40E_MAX_VEB) break; pf->veb[pf->lan_veb]->seid = seid; pf->veb[pf->lan_veb]->uplink_seid = pf->mac_seid; pf->veb[pf->lan_veb]->pf = pf; pf->veb[pf->lan_veb]->veb_idx = I40E_NO_VEB; break; case I40E_SWITCH_ELEMENT_TYPE_VSI: if (num_reported != 1) break; /* This is immediately after a reset so we can assume this is * the PF's VSI */ pf->mac_seid = uplink_seid; pf->pf_seid = downlink_seid; pf->main_vsi_seid = seid; if (printconfig) dev_info(&pf->pdev->dev, "pf_seid=%d main_vsi_seid=%d\n", pf->pf_seid, pf->main_vsi_seid); break; case I40E_SWITCH_ELEMENT_TYPE_PF: case I40E_SWITCH_ELEMENT_TYPE_VF: case I40E_SWITCH_ELEMENT_TYPE_EMP: case I40E_SWITCH_ELEMENT_TYPE_BMC: case I40E_SWITCH_ELEMENT_TYPE_PE: case I40E_SWITCH_ELEMENT_TYPE_PA: /* ignore these for now */ break; default: dev_info(&pf->pdev->dev, "unknown element type=%d seid=%d\n", element_type, seid); break; } } /** * i40e_fetch_switch_configuration - Get switch config from firmware * @pf: board private structure * @printconfig: should we print the contents * * Get the current switch configuration from the device and * extract a few useful SEID values. **/ int i40e_fetch_switch_configuration(struct i40e_pf *pf, bool printconfig) { struct i40e_aqc_get_switch_config_resp *sw_config; u16 next_seid = 0; int ret = 0; u8 *aq_buf; int i; aq_buf = kzalloc(I40E_AQ_LARGE_BUF, GFP_KERNEL); if (!aq_buf) return -ENOMEM; sw_config = (struct i40e_aqc_get_switch_config_resp *)aq_buf; do { u16 num_reported, num_total; ret = i40e_aq_get_switch_config(&pf->hw, sw_config, I40E_AQ_LARGE_BUF, &next_seid, NULL); if (ret) { dev_info(&pf->pdev->dev, "get switch config failed err %d aq_err %s\n", ret, i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); kfree(aq_buf); return -ENOENT; } num_reported = le16_to_cpu(sw_config->header.num_reported); num_total = le16_to_cpu(sw_config->header.num_total); if (printconfig) dev_info(&pf->pdev->dev, "header: %d reported %d total\n", num_reported, num_total); for (i = 0; i < num_reported; i++) { struct i40e_aqc_switch_config_element_resp *ele = &sw_config->element[i]; i40e_setup_pf_switch_element(pf, ele, num_reported, printconfig); } } while (next_seid != 0); kfree(aq_buf); return ret; } /** * i40e_setup_pf_switch - Setup the HW switch on startup or after reset * @pf: board private structure * @reinit: if the Main VSI needs to re-initialized. * @lock_acquired: indicates whether or not the lock has been acquired * * Returns 0 on success, negative value on failure **/ static int i40e_setup_pf_switch(struct i40e_pf *pf, bool reinit, bool lock_acquired) { u16 flags = 0; int ret; /* find out what's out there already */ ret = i40e_fetch_switch_configuration(pf, false); if (ret) { dev_info(&pf->pdev->dev, "couldn't fetch switch config, err %pe aq_err %s\n", ERR_PTR(ret), i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); return ret; } i40e_pf_reset_stats(pf); /* set the switch config bit for the whole device to * support limited promisc or true promisc * when user requests promisc. The default is limited * promisc. */ if ((pf->hw.pf_id == 0) && !(pf->flags & I40E_FLAG_TRUE_PROMISC_SUPPORT)) { flags = I40E_AQ_SET_SWITCH_CFG_PROMISC; pf->last_sw_conf_flags = flags; } if (pf->hw.pf_id == 0) { u16 valid_flags; valid_flags = I40E_AQ_SET_SWITCH_CFG_PROMISC; ret = i40e_aq_set_switch_config(&pf->hw, flags, valid_flags, 0, NULL); if (ret && pf->hw.aq.asq_last_status != I40E_AQ_RC_ESRCH) { dev_info(&pf->pdev->dev, "couldn't set switch config bits, err %pe aq_err %s\n", ERR_PTR(ret), i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); /* not a fatal problem, just keep going */ } pf->last_sw_conf_valid_flags = valid_flags; } /* first time setup */ if (pf->lan_vsi == I40E_NO_VSI || reinit) { struct i40e_vsi *vsi = NULL; u16 uplink_seid; /* Set up the PF VSI associated with the PF's main VSI * that is already in the HW switch */ if (pf->lan_veb < I40E_MAX_VEB && pf->veb[pf->lan_veb]) uplink_seid = pf->veb[pf->lan_veb]->seid; else uplink_seid = pf->mac_seid; if (pf->lan_vsi == I40E_NO_VSI) vsi = i40e_vsi_setup(pf, I40E_VSI_MAIN, uplink_seid, 0); else if (reinit) vsi = i40e_vsi_reinit_setup(pf->vsi[pf->lan_vsi]); if (!vsi) { dev_info(&pf->pdev->dev, "setup of MAIN VSI failed\n"); i40e_cloud_filter_exit(pf); i40e_fdir_teardown(pf); return -EAGAIN; } } else { /* force a reset of TC and queue layout configurations */ u8 enabled_tc = pf->vsi[pf->lan_vsi]->tc_config.enabled_tc; pf->vsi[pf->lan_vsi]->tc_config.enabled_tc = 0; pf->vsi[pf->lan_vsi]->seid = pf->main_vsi_seid; i40e_vsi_config_tc(pf->vsi[pf->lan_vsi], enabled_tc); } i40e_vlan_stripping_disable(pf->vsi[pf->lan_vsi]); i40e_fdir_sb_setup(pf); /* Setup static PF queue filter control settings */ ret = i40e_setup_pf_filter_control(pf); if (ret) { dev_info(&pf->pdev->dev, "setup_pf_filter_control failed: %d\n", ret); /* Failure here should not stop continuing other steps */ } /* enable RSS in the HW, even for only one queue, as the stack can use * the hash */ if ((pf->flags & I40E_FLAG_RSS_ENABLED)) i40e_pf_config_rss(pf); /* fill in link information and enable LSE reporting */ i40e_link_event(pf); /* Initialize user-specific link properties */ pf->fc_autoneg_status = ((pf->hw.phy.link_info.an_info & I40E_AQ_AN_COMPLETED) ? true : false); i40e_ptp_init(pf); if (!lock_acquired) rtnl_lock(); /* repopulate tunnel port filters */ udp_tunnel_nic_reset_ntf(pf->vsi[pf->lan_vsi]->netdev); if (!lock_acquired) rtnl_unlock(); return ret; } /** * i40e_determine_queue_usage - Work out queue distribution * @pf: board private structure **/ static void i40e_determine_queue_usage(struct i40e_pf *pf) { int queues_left; int q_max; pf->num_lan_qps = 0; /* Find the max queues to be put into basic use. We'll always be * using TC0, whether or not DCB is running, and TC0 will get the * big RSS set. */ queues_left = pf->hw.func_caps.num_tx_qp; if ((queues_left == 1) || !(pf->flags & I40E_FLAG_MSIX_ENABLED)) { /* one qp for PF, no queues for anything else */ queues_left = 0; pf->alloc_rss_size = pf->num_lan_qps = 1; /* make sure all the fancies are disabled */ pf->flags &= ~(I40E_FLAG_RSS_ENABLED | I40E_FLAG_IWARP_ENABLED | I40E_FLAG_FD_SB_ENABLED | I40E_FLAG_FD_ATR_ENABLED | I40E_FLAG_DCB_CAPABLE | I40E_FLAG_DCB_ENABLED | I40E_FLAG_SRIOV_ENABLED | I40E_FLAG_VMDQ_ENABLED); pf->flags |= I40E_FLAG_FD_SB_INACTIVE; } else if (!(pf->flags & (I40E_FLAG_RSS_ENABLED | I40E_FLAG_FD_SB_ENABLED | I40E_FLAG_FD_ATR_ENABLED | I40E_FLAG_DCB_CAPABLE))) { /* one qp for PF */ pf->alloc_rss_size = pf->num_lan_qps = 1; queues_left -= pf->num_lan_qps; pf->flags &= ~(I40E_FLAG_RSS_ENABLED | I40E_FLAG_IWARP_ENABLED | I40E_FLAG_FD_SB_ENABLED | I40E_FLAG_FD_ATR_ENABLED | I40E_FLAG_DCB_ENABLED | I40E_FLAG_VMDQ_ENABLED); pf->flags |= I40E_FLAG_FD_SB_INACTIVE; } else { /* Not enough queues for all TCs */ if ((pf->flags & I40E_FLAG_DCB_CAPABLE) && (queues_left < I40E_MAX_TRAFFIC_CLASS)) { pf->flags &= ~(I40E_FLAG_DCB_CAPABLE | I40E_FLAG_DCB_ENABLED); dev_info(&pf->pdev->dev, "not enough queues for DCB. DCB is disabled.\n"); } /* limit lan qps to the smaller of qps, cpus or msix */ q_max = max_t(int, pf->rss_size_max, num_online_cpus()); q_max = min_t(int, q_max, pf->hw.func_caps.num_tx_qp); q_max = min_t(int, q_max, pf->hw.func_caps.num_msix_vectors); pf->num_lan_qps = q_max; queues_left -= pf->num_lan_qps; } if (pf->flags & I40E_FLAG_FD_SB_ENABLED) { if (queues_left > 1) { queues_left -= 1; /* save 1 queue for FD */ } else { pf->flags &= ~I40E_FLAG_FD_SB_ENABLED; pf->flags |= I40E_FLAG_FD_SB_INACTIVE; dev_info(&pf->pdev->dev, "not enough queues for Flow Director. Flow Director feature is disabled\n"); } } if ((pf->flags & I40E_FLAG_SRIOV_ENABLED) && pf->num_vf_qps && pf->num_req_vfs && queues_left) { pf->num_req_vfs = min_t(int, pf->num_req_vfs, (queues_left / pf->num_vf_qps)); queues_left -= (pf->num_req_vfs * pf->num_vf_qps); } if ((pf->flags & I40E_FLAG_VMDQ_ENABLED) && pf->num_vmdq_vsis && pf->num_vmdq_qps && queues_left) { pf->num_vmdq_vsis = min_t(int, pf->num_vmdq_vsis, (queues_left / pf->num_vmdq_qps)); queues_left -= (pf->num_vmdq_vsis * pf->num_vmdq_qps); } pf->queues_left = queues_left; dev_dbg(&pf->pdev->dev, "qs_avail=%d FD SB=%d lan_qs=%d lan_tc0=%d vf=%d*%d vmdq=%d*%d, remaining=%d\n", pf->hw.func_caps.num_tx_qp, !!(pf->flags & I40E_FLAG_FD_SB_ENABLED), pf->num_lan_qps, pf->alloc_rss_size, pf->num_req_vfs, pf->num_vf_qps, pf->num_vmdq_vsis, pf->num_vmdq_qps, queues_left); } /** * i40e_setup_pf_filter_control - Setup PF static filter control * @pf: PF to be setup * * i40e_setup_pf_filter_control sets up a PF's initial filter control * settings. If PE/FCoE are enabled then it will also set the per PF * based filter sizes required for them. It also enables Flow director, * ethertype and macvlan type filter settings for the pf. * * Returns 0 on success, negative on failure **/ static int i40e_setup_pf_filter_control(struct i40e_pf *pf) { struct i40e_filter_control_settings *settings = &pf->filter_settings; settings->hash_lut_size = I40E_HASH_LUT_SIZE_128; /* Flow Director is enabled */ if (pf->flags & (I40E_FLAG_FD_SB_ENABLED | I40E_FLAG_FD_ATR_ENABLED)) settings->enable_fdir = true; /* Ethtype and MACVLAN filters enabled for PF */ settings->enable_ethtype = true; settings->enable_macvlan = true; if (i40e_set_filter_control(&pf->hw, settings)) return -ENOENT; return 0; } #define INFO_STRING_LEN 255 #define REMAIN(__x) (INFO_STRING_LEN - (__x)) static void i40e_print_features(struct i40e_pf *pf) { struct i40e_hw *hw = &pf->hw; char *buf; int i; buf = kmalloc(INFO_STRING_LEN, GFP_KERNEL); if (!buf) return; i = snprintf(buf, INFO_STRING_LEN, "Features: PF-id[%d]", hw->pf_id); #ifdef CONFIG_PCI_IOV i += scnprintf(&buf[i], REMAIN(i), " VFs: %d", pf->num_req_vfs); #endif i += scnprintf(&buf[i], REMAIN(i), " VSIs: %d QP: %d", pf->hw.func_caps.num_vsis, pf->vsi[pf->lan_vsi]->num_queue_pairs); if (pf->flags & I40E_FLAG_RSS_ENABLED) i += scnprintf(&buf[i], REMAIN(i), " RSS"); if (pf->flags & I40E_FLAG_FD_ATR_ENABLED) i += scnprintf(&buf[i], REMAIN(i), " FD_ATR"); if (pf->flags & I40E_FLAG_FD_SB_ENABLED) { i += scnprintf(&buf[i], REMAIN(i), " FD_SB"); i += scnprintf(&buf[i], REMAIN(i), " NTUPLE"); } if (pf->flags & I40E_FLAG_DCB_CAPABLE) i += scnprintf(&buf[i], REMAIN(i), " DCB"); i += scnprintf(&buf[i], REMAIN(i), " VxLAN"); i += scnprintf(&buf[i], REMAIN(i), " Geneve"); if (pf->flags & I40E_FLAG_PTP) i += scnprintf(&buf[i], REMAIN(i), " PTP"); if (pf->flags & I40E_FLAG_VEB_MODE_ENABLED) i += scnprintf(&buf[i], REMAIN(i), " VEB"); else i += scnprintf(&buf[i], REMAIN(i), " VEPA"); dev_info(&pf->pdev->dev, "%s\n", buf); kfree(buf); WARN_ON(i > INFO_STRING_LEN); } /** * i40e_get_platform_mac_addr - get platform-specific MAC address * @pdev: PCI device information struct * @pf: board private structure * * Look up the MAC address for the device. First we'll try * eth_platform_get_mac_address, which will check Open Firmware, or arch * specific fallback. Otherwise, we'll default to the stored value in * firmware. **/ static void i40e_get_platform_mac_addr(struct pci_dev *pdev, struct i40e_pf *pf) { if (eth_platform_get_mac_address(&pdev->dev, pf->hw.mac.addr)) i40e_get_mac_addr(&pf->hw, pf->hw.mac.addr); } /** * i40e_set_fec_in_flags - helper function for setting FEC options in flags * @fec_cfg: FEC option to set in flags * @flags: ptr to flags in which we set FEC option **/ void i40e_set_fec_in_flags(u8 fec_cfg, u32 *flags) { if (fec_cfg & I40E_AQ_SET_FEC_AUTO) *flags |= I40E_FLAG_RS_FEC | I40E_FLAG_BASE_R_FEC; if ((fec_cfg & I40E_AQ_SET_FEC_REQUEST_RS) || (fec_cfg & I40E_AQ_SET_FEC_ABILITY_RS)) { *flags |= I40E_FLAG_RS_FEC; *flags &= ~I40E_FLAG_BASE_R_FEC; } if ((fec_cfg & I40E_AQ_SET_FEC_REQUEST_KR) || (fec_cfg & I40E_AQ_SET_FEC_ABILITY_KR)) { *flags |= I40E_FLAG_BASE_R_FEC; *flags &= ~I40E_FLAG_RS_FEC; } if (fec_cfg == 0) *flags &= ~(I40E_FLAG_RS_FEC | I40E_FLAG_BASE_R_FEC); } /** * i40e_check_recovery_mode - check if we are running transition firmware * @pf: board private structure * * Check registers indicating the firmware runs in recovery mode. Sets the * appropriate driver state. * * Returns true if the recovery mode was detected, false otherwise **/ static bool i40e_check_recovery_mode(struct i40e_pf *pf) { u32 val = rd32(&pf->hw, I40E_GL_FWSTS); if (val & I40E_GL_FWSTS_FWS1B_MASK) { dev_crit(&pf->pdev->dev, "Firmware recovery mode detected. Limiting functionality.\n"); dev_crit(&pf->pdev->dev, "Refer to the Intel(R) Ethernet Adapters and Devices User Guide for details on firmware recovery mode.\n"); set_bit(__I40E_RECOVERY_MODE, pf->state); return true; } if (test_bit(__I40E_RECOVERY_MODE, pf->state)) dev_info(&pf->pdev->dev, "Please do Power-On Reset to initialize adapter in normal mode with full functionality.\n"); return false; } /** * i40e_pf_loop_reset - perform reset in a loop. * @pf: board private structure * * This function is useful when a NIC is about to enter recovery mode. * When a NIC's internal data structures are corrupted the NIC's * firmware is going to enter recovery mode. * Right after a POR it takes about 7 minutes for firmware to enter * recovery mode. Until that time a NIC is in some kind of intermediate * state. After that time period the NIC almost surely enters * recovery mode. The only way for a driver to detect intermediate * state is to issue a series of pf-resets and check a return value. * If a PF reset returns success then the firmware could be in recovery * mode so the caller of this code needs to check for recovery mode * if this function returns success. There is a little chance that * firmware will hang in intermediate state forever. * Since waiting 7 minutes is quite a lot of time this function waits * 10 seconds and then gives up by returning an error. * * Return 0 on success, negative on failure. **/ static int i40e_pf_loop_reset(struct i40e_pf *pf) { /* wait max 10 seconds for PF reset to succeed */ const unsigned long time_end = jiffies + 10 * HZ; struct i40e_hw *hw = &pf->hw; int ret; ret = i40e_pf_reset(hw); while (ret != 0 && time_before(jiffies, time_end)) { usleep_range(10000, 20000); ret = i40e_pf_reset(hw); } if (ret == 0) pf->pfr_count++; else dev_info(&pf->pdev->dev, "PF reset failed: %d\n", ret); return ret; } /** * i40e_check_fw_empr - check if FW issued unexpected EMP Reset * @pf: board private structure * * Check FW registers to determine if FW issued unexpected EMP Reset. * Every time when unexpected EMP Reset occurs the FW increments * a counter of unexpected EMP Resets. When the counter reaches 10 * the FW should enter the Recovery mode * * Returns true if FW issued unexpected EMP Reset **/ static bool i40e_check_fw_empr(struct i40e_pf *pf) { const u32 fw_sts = rd32(&pf->hw, I40E_GL_FWSTS) & I40E_GL_FWSTS_FWS1B_MASK; return (fw_sts > I40E_GL_FWSTS_FWS1B_EMPR_0) && (fw_sts <= I40E_GL_FWSTS_FWS1B_EMPR_10); } /** * i40e_handle_resets - handle EMP resets and PF resets * @pf: board private structure * * Handle both EMP resets and PF resets and conclude whether there are * any issues regarding these resets. If there are any issues then * generate log entry. * * Return 0 if NIC is healthy or negative value when there are issues * with resets **/ static int i40e_handle_resets(struct i40e_pf *pf) { const int pfr = i40e_pf_loop_reset(pf); const bool is_empr = i40e_check_fw_empr(pf); if (is_empr || pfr != 0) dev_crit(&pf->pdev->dev, "Entering recovery mode due to repeated FW resets. This may take several minutes. Refer to the Intel(R) Ethernet Adapters and Devices User Guide.\n"); return is_empr ? -EIO : pfr; } /** * i40e_init_recovery_mode - initialize subsystems needed in recovery mode * @pf: board private structure * @hw: ptr to the hardware info * * This function does a minimal setup of all subsystems needed for running * recovery mode. * * Returns 0 on success, negative on failure **/ static int i40e_init_recovery_mode(struct i40e_pf *pf, struct i40e_hw *hw) { struct i40e_vsi *vsi; int err; int v_idx; pci_set_drvdata(pf->pdev, pf); pci_save_state(pf->pdev); /* set up periodic task facility */ timer_setup(&pf->service_timer, i40e_service_timer, 0); pf->service_timer_period = HZ; INIT_WORK(&pf->service_task, i40e_service_task); clear_bit(__I40E_SERVICE_SCHED, pf->state); err = i40e_init_interrupt_scheme(pf); if (err) goto err_switch_setup; /* The number of VSIs reported by the FW is the minimum guaranteed * to us; HW supports far more and we share the remaining pool with * the other PFs. We allocate space for more than the guarantee with * the understanding that we might not get them all later. */ if (pf->hw.func_caps.num_vsis < I40E_MIN_VSI_ALLOC) pf->num_alloc_vsi = I40E_MIN_VSI_ALLOC; else pf->num_alloc_vsi = pf->hw.func_caps.num_vsis; /* Set up the vsi struct and our local tracking of the MAIN PF vsi. */ pf->vsi = kcalloc(pf->num_alloc_vsi, sizeof(struct i40e_vsi *), GFP_KERNEL); if (!pf->vsi) { err = -ENOMEM; goto err_switch_setup; } /* We allocate one VSI which is needed as absolute minimum * in order to register the netdev */ v_idx = i40e_vsi_mem_alloc(pf, I40E_VSI_MAIN); if (v_idx < 0) { err = v_idx; goto err_switch_setup; } pf->lan_vsi = v_idx; vsi = pf->vsi[v_idx]; if (!vsi) { err = -EFAULT; goto err_switch_setup; } vsi->alloc_queue_pairs = 1; err = i40e_config_netdev(vsi); if (err) goto err_switch_setup; err = register_netdev(vsi->netdev); if (err) goto err_switch_setup; vsi->netdev_registered = true; i40e_dbg_pf_init(pf); err = i40e_setup_misc_vector_for_recovery_mode(pf); if (err) goto err_switch_setup; /* tell the firmware that we're starting */ i40e_send_version(pf); /* since everything's happy, start the service_task timer */ mod_timer(&pf->service_timer, round_jiffies(jiffies + pf->service_timer_period)); return 0; err_switch_setup: i40e_reset_interrupt_capability(pf); timer_shutdown_sync(&pf->service_timer); i40e_shutdown_adminq(hw); iounmap(hw->hw_addr); pci_release_mem_regions(pf->pdev); pci_disable_device(pf->pdev); kfree(pf); return err; } /** * i40e_set_subsystem_device_id - set subsystem device id * @hw: pointer to the hardware info * * Set PCI subsystem device id either from a pci_dev structure or * a specific FW register. **/ static inline void i40e_set_subsystem_device_id(struct i40e_hw *hw) { struct pci_dev *pdev = ((struct i40e_pf *)hw->back)->pdev; hw->subsystem_device_id = pdev->subsystem_device ? pdev->subsystem_device : (ushort)(rd32(hw, I40E_PFPCI_SUBSYSID) & USHRT_MAX); } /** * i40e_probe - Device initialization routine * @pdev: PCI device information struct * @ent: entry in i40e_pci_tbl * * i40e_probe initializes a PF identified by a pci_dev structure. * The OS initialization, configuring of the PF private structure, * and a hardware reset occur. * * Returns 0 on success, negative on failure **/ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent) { struct i40e_aq_get_phy_abilities_resp abilities; #ifdef CONFIG_I40E_DCB enum i40e_get_fw_lldp_status_resp lldp_status; #endif /* CONFIG_I40E_DCB */ struct i40e_pf *pf; struct i40e_hw *hw; static u16 pfs_found; u16 wol_nvm_bits; u16 link_status; #ifdef CONFIG_I40E_DCB int status; #endif /* CONFIG_I40E_DCB */ int err; u32 val; u32 i; err = pci_enable_device_mem(pdev); if (err) return err; /* set up for high or low dma */ err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); if (err) { dev_err(&pdev->dev, "DMA configuration failed: 0x%x\n", err); goto err_dma; } /* set up pci connections */ err = pci_request_mem_regions(pdev, i40e_driver_name); if (err) { dev_info(&pdev->dev, "pci_request_selected_regions failed %d\n", err); goto err_pci_reg; } pci_set_master(pdev); /* Now that we have a PCI connection, we need to do the * low level device setup. This is primarily setting up * the Admin Queue structures and then querying for the * device's current profile information. */ pf = kzalloc(sizeof(*pf), GFP_KERNEL); if (!pf) { err = -ENOMEM; goto err_pf_alloc; } pf->next_vsi = 0; pf->pdev = pdev; set_bit(__I40E_DOWN, pf->state); hw = &pf->hw; hw->back = pf; pf->ioremap_len = min_t(int, pci_resource_len(pdev, 0), I40E_MAX_CSR_SPACE); /* We believe that the highest register to read is * I40E_GLGEN_STAT_CLEAR, so we check if the BAR size * is not less than that before mapping to prevent a * kernel panic. */ if (pf->ioremap_len < I40E_GLGEN_STAT_CLEAR) { dev_err(&pdev->dev, "Cannot map registers, bar size 0x%X too small, aborting\n", pf->ioremap_len); err = -ENOMEM; goto err_ioremap; } hw->hw_addr = ioremap(pci_resource_start(pdev, 0), pf->ioremap_len); if (!hw->hw_addr) { err = -EIO; dev_info(&pdev->dev, "ioremap(0x%04x, 0x%04x) failed: 0x%x\n", (unsigned int)pci_resource_start(pdev, 0), pf->ioremap_len, err); goto err_ioremap; } hw->vendor_id = pdev->vendor; hw->device_id = pdev->device; pci_read_config_byte(pdev, PCI_REVISION_ID, &hw->revision_id); hw->subsystem_vendor_id = pdev->subsystem_vendor; i40e_set_subsystem_device_id(hw); hw->bus.device = PCI_SLOT(pdev->devfn); hw->bus.func = PCI_FUNC(pdev->devfn); hw->bus.bus_id = pdev->bus->number; pf->instance = pfs_found; /* Select something other than the 802.1ad ethertype for the * switch to use internally and drop on ingress. */ hw->switch_tag = 0xffff; hw->first_tag = ETH_P_8021AD; hw->second_tag = ETH_P_8021Q; INIT_LIST_HEAD(&pf->l3_flex_pit_list); INIT_LIST_HEAD(&pf->l4_flex_pit_list); INIT_LIST_HEAD(&pf->ddp_old_prof); /* set up the locks for the AQ, do this only once in probe * and destroy them only once in remove */ mutex_init(&hw->aq.asq_mutex); mutex_init(&hw->aq.arq_mutex); pf->msg_enable = netif_msg_init(debug, NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK); if (debug < -1) pf->hw.debug_mask = debug; /* do a special CORER for clearing PXE mode once at init */ if (hw->revision_id == 0 && (rd32(hw, I40E_GLLAN_RCTL_0) & I40E_GLLAN_RCTL_0_PXE_MODE_MASK)) { wr32(hw, I40E_GLGEN_RTRIG, I40E_GLGEN_RTRIG_CORER_MASK); i40e_flush(hw); msleep(200); pf->corer_count++; i40e_clear_pxe_mode(hw); } /* Reset here to make sure all is clean and to define PF 'n' */ i40e_clear_hw(hw); err = i40e_set_mac_type(hw); if (err) { dev_warn(&pdev->dev, "unidentified MAC or BLANK NVM: %d\n", err); goto err_pf_reset; } err = i40e_handle_resets(pf); if (err) goto err_pf_reset; i40e_check_recovery_mode(pf); if (is_kdump_kernel()) { hw->aq.num_arq_entries = I40E_MIN_ARQ_LEN; hw->aq.num_asq_entries = I40E_MIN_ASQ_LEN; } else { hw->aq.num_arq_entries = I40E_AQ_LEN; hw->aq.num_asq_entries = I40E_AQ_LEN; } hw->aq.arq_buf_size = I40E_MAX_AQ_BUF_SIZE; hw->aq.asq_buf_size = I40E_MAX_AQ_BUF_SIZE; pf->adminq_work_limit = I40E_AQ_WORK_LIMIT; snprintf(pf->int_name, sizeof(pf->int_name) - 1, "%s-%s:misc", dev_driver_string(&pf->pdev->dev), dev_name(&pdev->dev)); err = i40e_init_shared_code(hw); if (err) { dev_warn(&pdev->dev, "unidentified MAC or BLANK NVM: %d\n", err); goto err_pf_reset; } /* set up a default setting for link flow control */ pf->hw.fc.requested_mode = I40E_FC_NONE; err = i40e_init_adminq(hw); if (err) { if (err == -EIO) dev_info(&pdev->dev, "The driver for the device stopped because the NVM image v%u.%u is newer than expected v%u.%u. You must install the most recent version of the network driver.\n", hw->aq.api_maj_ver, hw->aq.api_min_ver, I40E_FW_API_VERSION_MAJOR, I40E_FW_MINOR_VERSION(hw)); else dev_info(&pdev->dev, "The driver for the device stopped because the device firmware failed to init. Try updating your NVM image.\n"); goto err_pf_reset; } i40e_get_oem_version(hw); /* provide nvm, fw, api versions, vendor:device id, subsys vendor:device id */ dev_info(&pdev->dev, "fw %d.%d.%05d api %d.%d nvm %s [%04x:%04x] [%04x:%04x]\n", hw->aq.fw_maj_ver, hw->aq.fw_min_ver, hw->aq.fw_build, hw->aq.api_maj_ver, hw->aq.api_min_ver, i40e_nvm_version_str(hw), hw->vendor_id, hw->device_id, hw->subsystem_vendor_id, hw->subsystem_device_id); if (hw->aq.api_maj_ver == I40E_FW_API_VERSION_MAJOR && hw->aq.api_min_ver > I40E_FW_MINOR_VERSION(hw)) dev_dbg(&pdev->dev, "The driver for the device detected a newer version of the NVM image v%u.%u than v%u.%u.\n", hw->aq.api_maj_ver, hw->aq.api_min_ver, I40E_FW_API_VERSION_MAJOR, I40E_FW_MINOR_VERSION(hw)); else if (hw->aq.api_maj_ver == 1 && hw->aq.api_min_ver < 4) dev_info(&pdev->dev, "The driver for the device detected an older version of the NVM image v%u.%u than expected v%u.%u. Please update the NVM image.\n", hw->aq.api_maj_ver, hw->aq.api_min_ver, I40E_FW_API_VERSION_MAJOR, I40E_FW_MINOR_VERSION(hw)); i40e_verify_eeprom(pf); /* Rev 0 hardware was never productized */ if (hw->revision_id < 1) dev_warn(&pdev->dev, "This device is a pre-production adapter/LOM. Please be aware there may be issues with your hardware. If you are experiencing problems please contact your Intel or hardware representative who provided you with this hardware.\n"); i40e_clear_pxe_mode(hw); err = i40e_get_capabilities(pf, i40e_aqc_opc_list_func_capabilities); if (err) goto err_adminq_setup; err = i40e_sw_init(pf); if (err) { dev_info(&pdev->dev, "sw_init failed: %d\n", err); goto err_sw_init; } if (test_bit(__I40E_RECOVERY_MODE, pf->state)) return i40e_init_recovery_mode(pf, hw); err = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp, hw->func_caps.num_rx_qp, 0, 0); if (err) { dev_info(&pdev->dev, "init_lan_hmc failed: %d\n", err); goto err_init_lan_hmc; } err = i40e_configure_lan_hmc(hw, I40E_HMC_MODEL_DIRECT_ONLY); if (err) { dev_info(&pdev->dev, "configure_lan_hmc failed: %d\n", err); err = -ENOENT; goto err_configure_lan_hmc; } /* Disable LLDP for NICs that have firmware versions lower than v4.3. * Ignore error return codes because if it was already disabled via * hardware settings this will fail */ if (pf->hw_features & I40E_HW_STOP_FW_LLDP) { dev_info(&pdev->dev, "Stopping firmware LLDP agent.\n"); i40e_aq_stop_lldp(hw, true, false, NULL); } /* allow a platform config to override the HW addr */ i40e_get_platform_mac_addr(pdev, pf); if (!is_valid_ether_addr(hw->mac.addr)) { dev_info(&pdev->dev, "invalid MAC address %pM\n", hw->mac.addr); err = -EIO; goto err_mac_addr; } dev_info(&pdev->dev, "MAC address: %pM\n", hw->mac.addr); ether_addr_copy(hw->mac.perm_addr, hw->mac.addr); i40e_get_port_mac_addr(hw, hw->mac.port_addr); if (is_valid_ether_addr(hw->mac.port_addr)) pf->hw_features |= I40E_HW_PORT_ID_VALID; i40e_ptp_alloc_pins(pf); pci_set_drvdata(pdev, pf); pci_save_state(pdev); #ifdef CONFIG_I40E_DCB status = i40e_get_fw_lldp_status(&pf->hw, &lldp_status); (!status && lldp_status == I40E_GET_FW_LLDP_STATUS_ENABLED) ? (pf->flags &= ~I40E_FLAG_DISABLE_FW_LLDP) : (pf->flags |= I40E_FLAG_DISABLE_FW_LLDP); dev_info(&pdev->dev, (pf->flags & I40E_FLAG_DISABLE_FW_LLDP) ? "FW LLDP is disabled\n" : "FW LLDP is enabled\n"); /* Enable FW to write default DCB config on link-up */ i40e_aq_set_dcb_parameters(hw, true, NULL); err = i40e_init_pf_dcb(pf); if (err) { dev_info(&pdev->dev, "DCB init failed %d, disabled\n", err); pf->flags &= ~(I40E_FLAG_DCB_CAPABLE | I40E_FLAG_DCB_ENABLED); /* Continue without DCB enabled */ } #endif /* CONFIG_I40E_DCB */ /* set up periodic task facility */ timer_setup(&pf->service_timer, i40e_service_timer, 0); pf->service_timer_period = HZ; INIT_WORK(&pf->service_task, i40e_service_task); clear_bit(__I40E_SERVICE_SCHED, pf->state); /* NVM bit on means WoL disabled for the port */ i40e_read_nvm_word(hw, I40E_SR_NVM_WAKE_ON_LAN, &wol_nvm_bits); if (BIT (hw->port) & wol_nvm_bits || hw->partition_id != 1) pf->wol_en = false; else pf->wol_en = true; device_set_wakeup_enable(&pf->pdev->dev, pf->wol_en); /* set up the main switch operations */ i40e_determine_queue_usage(pf); err = i40e_init_interrupt_scheme(pf); if (err) goto err_switch_setup; /* Reduce Tx and Rx pairs for kdump * When MSI-X is enabled, it's not allowed to use more TC queue * pairs than MSI-X vectors (pf->num_lan_msix) exist. Thus * vsi->num_queue_pairs will be equal to pf->num_lan_msix, i.e., 1. */ if (is_kdump_kernel()) pf->num_lan_msix = 1; pf->udp_tunnel_nic.set_port = i40e_udp_tunnel_set_port; pf->udp_tunnel_nic.unset_port = i40e_udp_tunnel_unset_port; pf->udp_tunnel_nic.flags = UDP_TUNNEL_NIC_INFO_MAY_SLEEP; pf->udp_tunnel_nic.shared = &pf->udp_tunnel_shared; pf->udp_tunnel_nic.tables[0].n_entries = I40E_MAX_PF_UDP_OFFLOAD_PORTS; pf->udp_tunnel_nic.tables[0].tunnel_types = UDP_TUNNEL_TYPE_VXLAN | UDP_TUNNEL_TYPE_GENEVE; /* The number of VSIs reported by the FW is the minimum guaranteed * to us; HW supports far more and we share the remaining pool with * the other PFs. We allocate space for more than the guarantee with * the understanding that we might not get them all later. */ if (pf->hw.func_caps.num_vsis < I40E_MIN_VSI_ALLOC) pf->num_alloc_vsi = I40E_MIN_VSI_ALLOC; else pf->num_alloc_vsi = pf->hw.func_caps.num_vsis; if (pf->num_alloc_vsi > UDP_TUNNEL_NIC_MAX_SHARING_DEVICES) { dev_warn(&pf->pdev->dev, "limiting the VSI count due to UDP tunnel limitation %d > %d\n", pf->num_alloc_vsi, UDP_TUNNEL_NIC_MAX_SHARING_DEVICES); pf->num_alloc_vsi = UDP_TUNNEL_NIC_MAX_SHARING_DEVICES; } /* Set up the *vsi struct and our local tracking of the MAIN PF vsi. */ pf->vsi = kcalloc(pf->num_alloc_vsi, sizeof(struct i40e_vsi *), GFP_KERNEL); if (!pf->vsi) { err = -ENOMEM; goto err_switch_setup; } #ifdef CONFIG_PCI_IOV /* prep for VF support */ if ((pf->flags & I40E_FLAG_SRIOV_ENABLED) && (pf->flags & I40E_FLAG_MSIX_ENABLED) && !test_bit(__I40E_BAD_EEPROM, pf->state)) { if (pci_num_vf(pdev)) pf->flags |= I40E_FLAG_VEB_MODE_ENABLED; } #endif err = i40e_setup_pf_switch(pf, false, false); if (err) { dev_info(&pdev->dev, "setup_pf_switch failed: %d\n", err); goto err_vsis; } INIT_LIST_HEAD(&pf->vsi[pf->lan_vsi]->ch_list); /* if FDIR VSI was set up, start it now */ for (i = 0; i < pf->num_alloc_vsi; i++) { if (pf->vsi[i] && pf->vsi[i]->type == I40E_VSI_FDIR) { i40e_vsi_open(pf->vsi[i]); break; } } /* The driver only wants link up/down and module qualification * reports from firmware. Note the negative logic. */ err = i40e_aq_set_phy_int_mask(&pf->hw, ~(I40E_AQ_EVENT_LINK_UPDOWN | I40E_AQ_EVENT_MEDIA_NA | I40E_AQ_EVENT_MODULE_QUAL_FAIL), NULL); if (err) dev_info(&pf->pdev->dev, "set phy mask fail, err %pe aq_err %s\n", ERR_PTR(err), i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); /* Reconfigure hardware for allowing smaller MSS in the case * of TSO, so that we avoid the MDD being fired and causing * a reset in the case of small MSS+TSO. */ val = rd32(hw, I40E_REG_MSS); if ((val & I40E_REG_MSS_MIN_MASK) > I40E_64BYTE_MSS) { val &= ~I40E_REG_MSS_MIN_MASK; val |= I40E_64BYTE_MSS; wr32(hw, I40E_REG_MSS, val); } if (pf->hw_features & I40E_HW_RESTART_AUTONEG) { msleep(75); err = i40e_aq_set_link_restart_an(&pf->hw, true, NULL); if (err) dev_info(&pf->pdev->dev, "link restart failed, err %pe aq_err %s\n", ERR_PTR(err), i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); } /* The main driver is (mostly) up and happy. We need to set this state * before setting up the misc vector or we get a race and the vector * ends up disabled forever. */ clear_bit(__I40E_DOWN, pf->state); /* In case of MSIX we are going to setup the misc vector right here * to handle admin queue events etc. In case of legacy and MSI * the misc functionality and queue processing is combined in * the same vector and that gets setup at open. */ if (pf->flags & I40E_FLAG_MSIX_ENABLED) { err = i40e_setup_misc_vector(pf); if (err) { dev_info(&pdev->dev, "setup of misc vector failed: %d\n", err); i40e_cloud_filter_exit(pf); i40e_fdir_teardown(pf); goto err_vsis; } } #ifdef CONFIG_PCI_IOV /* prep for VF support */ if ((pf->flags & I40E_FLAG_SRIOV_ENABLED) && (pf->flags & I40E_FLAG_MSIX_ENABLED) && !test_bit(__I40E_BAD_EEPROM, pf->state)) { /* disable link interrupts for VFs */ val = rd32(hw, I40E_PFGEN_PORTMDIO_NUM); val &= ~I40E_PFGEN_PORTMDIO_NUM_VFLINK_STAT_ENA_MASK; wr32(hw, I40E_PFGEN_PORTMDIO_NUM, val); i40e_flush(hw); if (pci_num_vf(pdev)) { dev_info(&pdev->dev, "Active VFs found, allocating resources.\n"); err = i40e_alloc_vfs(pf, pci_num_vf(pdev)); if (err) dev_info(&pdev->dev, "Error %d allocating resources for existing VFs\n", err); } } #endif /* CONFIG_PCI_IOV */ if (pf->flags & I40E_FLAG_IWARP_ENABLED) { pf->iwarp_base_vector = i40e_get_lump(pf, pf->irq_pile, pf->num_iwarp_msix, I40E_IWARP_IRQ_PILE_ID); if (pf->iwarp_base_vector < 0) { dev_info(&pdev->dev, "failed to get tracking for %d vectors for IWARP err=%d\n", pf->num_iwarp_msix, pf->iwarp_base_vector); pf->flags &= ~I40E_FLAG_IWARP_ENABLED; } } i40e_dbg_pf_init(pf); /* tell the firmware that we're starting */ i40e_send_version(pf); /* since everything's happy, start the service_task timer */ mod_timer(&pf->service_timer, round_jiffies(jiffies + pf->service_timer_period)); /* add this PF to client device list and launch a client service task */ if (pf->flags & I40E_FLAG_IWARP_ENABLED) { err = i40e_lan_add_device(pf); if (err) dev_info(&pdev->dev, "Failed to add PF to client API service list: %d\n", err); } #define PCI_SPEED_SIZE 8 #define PCI_WIDTH_SIZE 8 /* Devices on the IOSF bus do not have this information * and will report PCI Gen 1 x 1 by default so don't bother * checking them. */ if (!(pf->hw_features & I40E_HW_NO_PCI_LINK_CHECK)) { char speed[PCI_SPEED_SIZE] = "Unknown"; char width[PCI_WIDTH_SIZE] = "Unknown"; /* Get the negotiated link width and speed from PCI config * space */ pcie_capability_read_word(pf->pdev, PCI_EXP_LNKSTA, &link_status); i40e_set_pci_config_data(hw, link_status); switch (hw->bus.speed) { case i40e_bus_speed_8000: strscpy(speed, "8.0", PCI_SPEED_SIZE); break; case i40e_bus_speed_5000: strscpy(speed, "5.0", PCI_SPEED_SIZE); break; case i40e_bus_speed_2500: strscpy(speed, "2.5", PCI_SPEED_SIZE); break; default: break; } switch (hw->bus.width) { case i40e_bus_width_pcie_x8: strscpy(width, "8", PCI_WIDTH_SIZE); break; case i40e_bus_width_pcie_x4: strscpy(width, "4", PCI_WIDTH_SIZE); break; case i40e_bus_width_pcie_x2: strscpy(width, "2", PCI_WIDTH_SIZE); break; case i40e_bus_width_pcie_x1: strscpy(width, "1", PCI_WIDTH_SIZE); break; default: break; } dev_info(&pdev->dev, "PCI-Express: Speed %sGT/s Width x%s\n", speed, width); if (hw->bus.width < i40e_bus_width_pcie_x8 || hw->bus.speed < i40e_bus_speed_8000) { dev_warn(&pdev->dev, "PCI-Express bandwidth available for this device may be insufficient for optimal performance.\n"); dev_warn(&pdev->dev, "Please move the device to a different PCI-e link with more lanes and/or higher transfer rate.\n"); } } /* get the requested speeds from the fw */ err = i40e_aq_get_phy_capabilities(hw, false, false, &abilities, NULL); if (err) dev_dbg(&pf->pdev->dev, "get requested speeds ret = %pe last_status = %s\n", ERR_PTR(err), i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); pf->hw.phy.link_info.requested_speeds = abilities.link_speed; /* set the FEC config due to the board capabilities */ i40e_set_fec_in_flags(abilities.fec_cfg_curr_mod_ext_info, &pf->flags); /* get the supported phy types from the fw */ err = i40e_aq_get_phy_capabilities(hw, false, true, &abilities, NULL); if (err) dev_dbg(&pf->pdev->dev, "get supported phy types ret = %pe last_status = %s\n", ERR_PTR(err), i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); /* make sure the MFS hasn't been set lower than the default */ #define MAX_FRAME_SIZE_DEFAULT 0x2600 val = (rd32(&pf->hw, I40E_PRTGL_SAH) & I40E_PRTGL_SAH_MFS_MASK) >> I40E_PRTGL_SAH_MFS_SHIFT; if (val < MAX_FRAME_SIZE_DEFAULT) dev_warn(&pdev->dev, "MFS for port %x has been set below the default: %x\n", i, val); /* Add a filter to drop all Flow control frames from any VSI from being * transmitted. By doing so we stop a malicious VF from sending out * PAUSE or PFC frames and potentially controlling traffic for other * PF/VF VSIs. * The FW can still send Flow control frames if enabled. */ i40e_add_filter_to_drop_tx_flow_control_frames(&pf->hw, pf->main_vsi_seid); if ((pf->hw.device_id == I40E_DEV_ID_10G_BASE_T) || (pf->hw.device_id == I40E_DEV_ID_10G_BASE_T4)) pf->hw_features |= I40E_HW_PHY_CONTROLS_LEDS; if (pf->hw.device_id == I40E_DEV_ID_SFP_I_X722) pf->hw_features |= I40E_HW_HAVE_CRT_RETIMER; /* print a string summarizing features */ i40e_print_features(pf); return 0; /* Unwind what we've done if something failed in the setup */ err_vsis: set_bit(__I40E_DOWN, pf->state); i40e_clear_interrupt_scheme(pf); kfree(pf->vsi); err_switch_setup: i40e_reset_interrupt_capability(pf); timer_shutdown_sync(&pf->service_timer); err_mac_addr: err_configure_lan_hmc: (void)i40e_shutdown_lan_hmc(hw); err_init_lan_hmc: kfree(pf->qp_pile); err_sw_init: err_adminq_setup: err_pf_reset: iounmap(hw->hw_addr); err_ioremap: kfree(pf); err_pf_alloc: pci_release_mem_regions(pdev); err_pci_reg: err_dma: pci_disable_device(pdev); return err; } /** * i40e_remove - Device removal routine * @pdev: PCI device information struct * * i40e_remove is called by the PCI subsystem to alert the driver * that is should release a PCI device. This could be caused by a * Hot-Plug event, or because the driver is going to be removed from * memory. **/ static void i40e_remove(struct pci_dev *pdev) { struct i40e_pf *pf = pci_get_drvdata(pdev); struct i40e_hw *hw = &pf->hw; int ret_code; int i; i40e_dbg_pf_exit(pf); i40e_ptp_stop(pf); /* Disable RSS in hw */ i40e_write_rx_ctl(hw, I40E_PFQF_HENA(0), 0); i40e_write_rx_ctl(hw, I40E_PFQF_HENA(1), 0); /* Grab __I40E_RESET_RECOVERY_PENDING and set __I40E_IN_REMOVE * flags, once they are set, i40e_rebuild should not be called as * i40e_prep_for_reset always returns early. */ while (test_and_set_bit(__I40E_RESET_RECOVERY_PENDING, pf->state)) usleep_range(1000, 2000); set_bit(__I40E_IN_REMOVE, pf->state); if (pf->flags & I40E_FLAG_SRIOV_ENABLED) { set_bit(__I40E_VF_RESETS_DISABLED, pf->state); i40e_free_vfs(pf); pf->flags &= ~I40E_FLAG_SRIOV_ENABLED; } /* no more scheduling of any task */ set_bit(__I40E_SUSPENDED, pf->state); set_bit(__I40E_DOWN, pf->state); if (pf->service_timer.function) timer_shutdown_sync(&pf->service_timer); if (pf->service_task.func) cancel_work_sync(&pf->service_task); if (test_bit(__I40E_RECOVERY_MODE, pf->state)) { struct i40e_vsi *vsi = pf->vsi[0]; /* We know that we have allocated only one vsi for this PF, * it was just for registering netdevice, so the interface * could be visible in the 'ifconfig' output */ unregister_netdev(vsi->netdev); free_netdev(vsi->netdev); goto unmap; } /* Client close must be called explicitly here because the timer * has been stopped. */ i40e_notify_client_of_netdev_close(pf->vsi[pf->lan_vsi], false); i40e_fdir_teardown(pf); /* If there is a switch structure or any orphans, remove them. * This will leave only the PF's VSI remaining. */ for (i = 0; i < I40E_MAX_VEB; i++) { if (!pf->veb[i]) continue; if (pf->veb[i]->uplink_seid == pf->mac_seid || pf->veb[i]->uplink_seid == 0) i40e_switch_branch_release(pf->veb[i]); } /* Now we can shutdown the PF's VSI, just before we kill * adminq and hmc. */ if (pf->vsi[pf->lan_vsi]) i40e_vsi_release(pf->vsi[pf->lan_vsi]); i40e_cloud_filter_exit(pf); /* remove attached clients */ if (pf->flags & I40E_FLAG_IWARP_ENABLED) { ret_code = i40e_lan_del_device(pf); if (ret_code) dev_warn(&pdev->dev, "Failed to delete client device: %d\n", ret_code); } /* shutdown and destroy the HMC */ if (hw->hmc.hmc_obj) { ret_code = i40e_shutdown_lan_hmc(hw); if (ret_code) dev_warn(&pdev->dev, "Failed to destroy the HMC resources: %d\n", ret_code); } unmap: /* Free MSI/legacy interrupt 0 when in recovery mode. */ if (test_bit(__I40E_RECOVERY_MODE, pf->state) && !(pf->flags & I40E_FLAG_MSIX_ENABLED)) free_irq(pf->pdev->irq, pf); /* shutdown the adminq */ i40e_shutdown_adminq(hw); /* destroy the locks only once, here */ mutex_destroy(&hw->aq.arq_mutex); mutex_destroy(&hw->aq.asq_mutex); /* Clear all dynamic memory lists of rings, q_vectors, and VSIs */ rtnl_lock(); i40e_clear_interrupt_scheme(pf); for (i = 0; i < pf->num_alloc_vsi; i++) { if (pf->vsi[i]) { if (!test_bit(__I40E_RECOVERY_MODE, pf->state)) i40e_vsi_clear_rings(pf->vsi[i]); i40e_vsi_clear(pf->vsi[i]); pf->vsi[i] = NULL; } } rtnl_unlock(); for (i = 0; i < I40E_MAX_VEB; i++) { kfree(pf->veb[i]); pf->veb[i] = NULL; } kfree(pf->qp_pile); kfree(pf->vsi); iounmap(hw->hw_addr); kfree(pf); pci_release_mem_regions(pdev); pci_disable_device(pdev); } /** * i40e_pci_error_detected - warning that something funky happened in PCI land * @pdev: PCI device information struct * @error: the type of PCI error * * Called to warn that something happened and the error handling steps * are in progress. Allows the driver to quiesce things, be ready for * remediation. **/ static pci_ers_result_t i40e_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t error) { struct i40e_pf *pf = pci_get_drvdata(pdev); dev_info(&pdev->dev, "%s: error %d\n", __func__, error); if (!pf) { dev_info(&pdev->dev, "Cannot recover - error happened during device probe\n"); return PCI_ERS_RESULT_DISCONNECT; } /* shutdown all operations */ if (!test_bit(__I40E_SUSPENDED, pf->state)) i40e_prep_for_reset(pf); /* Request a slot reset */ return PCI_ERS_RESULT_NEED_RESET; } /** * i40e_pci_error_slot_reset - a PCI slot reset just happened * @pdev: PCI device information struct * * Called to find if the driver can work with the device now that * the pci slot has been reset. If a basic connection seems good * (registers are readable and have sane content) then return a * happy little PCI_ERS_RESULT_xxx. **/ static pci_ers_result_t i40e_pci_error_slot_reset(struct pci_dev *pdev) { struct i40e_pf *pf = pci_get_drvdata(pdev); pci_ers_result_t result; u32 reg; dev_dbg(&pdev->dev, "%s\n", __func__); if (pci_enable_device_mem(pdev)) { dev_info(&pdev->dev, "Cannot re-enable PCI device after reset.\n"); result = PCI_ERS_RESULT_DISCONNECT; } else { pci_set_master(pdev); pci_restore_state(pdev); pci_save_state(pdev); pci_wake_from_d3(pdev, false); reg = rd32(&pf->hw, I40E_GLGEN_RTRIG); if (reg == 0) result = PCI_ERS_RESULT_RECOVERED; else result = PCI_ERS_RESULT_DISCONNECT; } return result; } /** * i40e_pci_error_reset_prepare - prepare device driver for pci reset * @pdev: PCI device information struct */ static void i40e_pci_error_reset_prepare(struct pci_dev *pdev) { struct i40e_pf *pf = pci_get_drvdata(pdev); i40e_prep_for_reset(pf); } /** * i40e_pci_error_reset_done - pci reset done, device driver reset can begin * @pdev: PCI device information struct */ static void i40e_pci_error_reset_done(struct pci_dev *pdev) { struct i40e_pf *pf = pci_get_drvdata(pdev); if (test_bit(__I40E_IN_REMOVE, pf->state)) return; i40e_reset_and_rebuild(pf, false, false); } /** * i40e_pci_error_resume - restart operations after PCI error recovery * @pdev: PCI device information struct * * Called to allow the driver to bring things back up after PCI error * and/or reset recovery has finished. **/ static void i40e_pci_error_resume(struct pci_dev *pdev) { struct i40e_pf *pf = pci_get_drvdata(pdev); dev_dbg(&pdev->dev, "%s\n", __func__); if (test_bit(__I40E_SUSPENDED, pf->state)) return; i40e_handle_reset_warning(pf, false); } /** * i40e_enable_mc_magic_wake - enable multicast magic packet wake up * using the mac_address_write admin q function * @pf: pointer to i40e_pf struct **/ static void i40e_enable_mc_magic_wake(struct i40e_pf *pf) { struct i40e_hw *hw = &pf->hw; u8 mac_addr[6]; u16 flags = 0; int ret; /* Get current MAC address in case it's an LAA */ if (pf->vsi[pf->lan_vsi] && pf->vsi[pf->lan_vsi]->netdev) { ether_addr_copy(mac_addr, pf->vsi[pf->lan_vsi]->netdev->dev_addr); } else { dev_err(&pf->pdev->dev, "Failed to retrieve MAC address; using default\n"); ether_addr_copy(mac_addr, hw->mac.addr); } /* The FW expects the mac address write cmd to first be called with * one of these flags before calling it again with the multicast * enable flags. */ flags = I40E_AQC_WRITE_TYPE_LAA_WOL; if (hw->func_caps.flex10_enable && hw->partition_id != 1) flags = I40E_AQC_WRITE_TYPE_LAA_ONLY; ret = i40e_aq_mac_address_write(hw, flags, mac_addr, NULL); if (ret) { dev_err(&pf->pdev->dev, "Failed to update MAC address registers; cannot enable Multicast Magic packet wake up"); return; } flags = I40E_AQC_MC_MAG_EN | I40E_AQC_WOL_PRESERVE_ON_PFR | I40E_AQC_WRITE_TYPE_UPDATE_MC_MAG; ret = i40e_aq_mac_address_write(hw, flags, mac_addr, NULL); if (ret) dev_err(&pf->pdev->dev, "Failed to enable Multicast Magic Packet wake up\n"); } /** * i40e_shutdown - PCI callback for shutting down * @pdev: PCI device information struct **/ static void i40e_shutdown(struct pci_dev *pdev) { struct i40e_pf *pf = pci_get_drvdata(pdev); struct i40e_hw *hw = &pf->hw; set_bit(__I40E_SUSPENDED, pf->state); set_bit(__I40E_DOWN, pf->state); del_timer_sync(&pf->service_timer); cancel_work_sync(&pf->service_task); i40e_cloud_filter_exit(pf); i40e_fdir_teardown(pf); /* Client close must be called explicitly here because the timer * has been stopped. */ i40e_notify_client_of_netdev_close(pf->vsi[pf->lan_vsi], false); if (pf->wol_en && (pf->hw_features & I40E_HW_WOL_MC_MAGIC_PKT_WAKE)) i40e_enable_mc_magic_wake(pf); i40e_prep_for_reset(pf); wr32(hw, I40E_PFPM_APM, (pf->wol_en ? I40E_PFPM_APM_APME_MASK : 0)); wr32(hw, I40E_PFPM_WUFC, (pf->wol_en ? I40E_PFPM_WUFC_MAG_MASK : 0)); /* Free MSI/legacy interrupt 0 when in recovery mode. */ if (test_bit(__I40E_RECOVERY_MODE, pf->state) && !(pf->flags & I40E_FLAG_MSIX_ENABLED)) free_irq(pf->pdev->irq, pf); /* Since we're going to destroy queues during the * i40e_clear_interrupt_scheme() we should hold the RTNL lock for this * whole section */ rtnl_lock(); i40e_clear_interrupt_scheme(pf); rtnl_unlock(); if (system_state == SYSTEM_POWER_OFF) { pci_wake_from_d3(pdev, pf->wol_en); pci_set_power_state(pdev, PCI_D3hot); } } /** * i40e_suspend - PM callback for moving to D3 * @dev: generic device information structure **/ static int __maybe_unused i40e_suspend(struct device *dev) { struct i40e_pf *pf = dev_get_drvdata(dev); struct i40e_hw *hw = &pf->hw; /* If we're already suspended, then there is nothing to do */ if (test_and_set_bit(__I40E_SUSPENDED, pf->state)) return 0; set_bit(__I40E_DOWN, pf->state); /* Ensure service task will not be running */ del_timer_sync(&pf->service_timer); cancel_work_sync(&pf->service_task); /* Client close must be called explicitly here because the timer * has been stopped. */ i40e_notify_client_of_netdev_close(pf->vsi[pf->lan_vsi], false); if (pf->wol_en && (pf->hw_features & I40E_HW_WOL_MC_MAGIC_PKT_WAKE)) i40e_enable_mc_magic_wake(pf); /* Since we're going to destroy queues during the * i40e_clear_interrupt_scheme() we should hold the RTNL lock for this * whole section */ rtnl_lock(); i40e_prep_for_reset(pf); wr32(hw, I40E_PFPM_APM, (pf->wol_en ? I40E_PFPM_APM_APME_MASK : 0)); wr32(hw, I40E_PFPM_WUFC, (pf->wol_en ? I40E_PFPM_WUFC_MAG_MASK : 0)); /* Clear the interrupt scheme and release our IRQs so that the system * can safely hibernate even when there are a large number of CPUs. * Otherwise hibernation might fail when mapping all the vectors back * to CPU0. */ i40e_clear_interrupt_scheme(pf); rtnl_unlock(); return 0; } /** * i40e_resume - PM callback for waking up from D3 * @dev: generic device information structure **/ static int __maybe_unused i40e_resume(struct device *dev) { struct i40e_pf *pf = dev_get_drvdata(dev); int err; /* If we're not suspended, then there is nothing to do */ if (!test_bit(__I40E_SUSPENDED, pf->state)) return 0; /* We need to hold the RTNL lock prior to restoring interrupt schemes, * since we're going to be restoring queues */ rtnl_lock(); /* We cleared the interrupt scheme when we suspended, so we need to * restore it now to resume device functionality. */ err = i40e_restore_interrupt_scheme(pf); if (err) { dev_err(dev, "Cannot restore interrupt scheme: %d\n", err); } clear_bit(__I40E_DOWN, pf->state); i40e_reset_and_rebuild(pf, false, true); rtnl_unlock(); /* Clear suspended state last after everything is recovered */ clear_bit(__I40E_SUSPENDED, pf->state); /* Restart the service task */ mod_timer(&pf->service_timer, round_jiffies(jiffies + pf->service_timer_period)); return 0; } static const struct pci_error_handlers i40e_err_handler = { .error_detected = i40e_pci_error_detected, .slot_reset = i40e_pci_error_slot_reset, .reset_prepare = i40e_pci_error_reset_prepare, .reset_done = i40e_pci_error_reset_done, .resume = i40e_pci_error_resume, }; static SIMPLE_DEV_PM_OPS(i40e_pm_ops, i40e_suspend, i40e_resume); static struct pci_driver i40e_driver = { .name = i40e_driver_name, .id_table = i40e_pci_tbl, .probe = i40e_probe, .remove = i40e_remove, .driver = { .pm = &i40e_pm_ops, }, .shutdown = i40e_shutdown, .err_handler = &i40e_err_handler, .sriov_configure = i40e_pci_sriov_configure, }; /** * i40e_init_module - Driver registration routine * * i40e_init_module is the first routine called when the driver is * loaded. All it does is register with the PCI subsystem. **/ static int __init i40e_init_module(void) { int err; pr_info("%s: %s\n", i40e_driver_name, i40e_driver_string); pr_info("%s: %s\n", i40e_driver_name, i40e_copyright); /* There is no need to throttle the number of active tasks because * each device limits its own task using a state bit for scheduling * the service task, and the device tasks do not interfere with each * other, so we don't set a max task limit. We must set WQ_MEM_RECLAIM * since we need to be able to guarantee forward progress even under * memory pressure. */ i40e_wq = alloc_workqueue("%s", WQ_MEM_RECLAIM, 0, i40e_driver_name); if (!i40e_wq) { pr_err("%s: Failed to create workqueue\n", i40e_driver_name); return -ENOMEM; } i40e_dbg_init(); err = pci_register_driver(&i40e_driver); if (err) { destroy_workqueue(i40e_wq); i40e_dbg_exit(); return err; } return 0; } module_init(i40e_init_module); /** * i40e_exit_module - Driver exit cleanup routine * * i40e_exit_module is called just before the driver is removed * from memory. **/ static void __exit i40e_exit_module(void) { pci_unregister_driver(&i40e_driver); destroy_workqueue(i40e_wq); ida_destroy(&i40e_client_ida); i40e_dbg_exit(); } module_exit(i40e_exit_module);
linux-master
drivers/net/ethernet/intel/i40e/i40e_main.c
// SPDX-License-Identifier: GPL-2.0 /* Copyright(c) 2013 - 2018 Intel Corporation. */ #include "i40e.h" #include <linux/firmware.h> /** * i40e_ddp_profiles_eq - checks if DDP profiles are the equivalent * @a: new profile info * @b: old profile info * * checks if DDP profiles are the equivalent. * Returns true if profiles are the same. **/ static bool i40e_ddp_profiles_eq(struct i40e_profile_info *a, struct i40e_profile_info *b) { return a->track_id == b->track_id && !memcmp(&a->version, &b->version, sizeof(a->version)) && !memcmp(&a->name, &b->name, I40E_DDP_NAME_SIZE); } /** * i40e_ddp_does_profile_exist - checks if DDP profile loaded already * @hw: HW data structure * @pinfo: DDP profile information structure * * checks if DDP profile loaded already. * Returns >0 if the profile exists. * Returns 0 if the profile is absent. * Returns <0 if error. **/ static int i40e_ddp_does_profile_exist(struct i40e_hw *hw, struct i40e_profile_info *pinfo) { struct i40e_ddp_profile_list *profile_list; u8 buff[I40E_PROFILE_LIST_SIZE]; int status; int i; status = i40e_aq_get_ddp_list(hw, buff, I40E_PROFILE_LIST_SIZE, 0, NULL); if (status) return -1; profile_list = (struct i40e_ddp_profile_list *)buff; for (i = 0; i < profile_list->p_count; i++) { if (i40e_ddp_profiles_eq(pinfo, &profile_list->p_info[i])) return 1; } return 0; } /** * i40e_ddp_profiles_overlap - checks if DDP profiles overlap. * @new: new profile info * @old: old profile info * * checks if DDP profiles overlap. * Returns true if profiles are overlap. **/ static bool i40e_ddp_profiles_overlap(struct i40e_profile_info *new, struct i40e_profile_info *old) { unsigned int group_id_old = (u8)((old->track_id & 0x00FF0000) >> 16); unsigned int group_id_new = (u8)((new->track_id & 0x00FF0000) >> 16); /* 0x00 group must be only the first */ if (group_id_new == 0) return true; /* 0xFF group is compatible with anything else */ if (group_id_new == 0xFF || group_id_old == 0xFF) return false; /* otherwise only profiles from the same group are compatible*/ return group_id_old != group_id_new; } /** * i40e_ddp_does_profile_overlap - checks if DDP overlaps with existing one. * @hw: HW data structure * @pinfo: DDP profile information structure * * checks if DDP profile overlaps with existing one. * Returns >0 if the profile overlaps. * Returns 0 if the profile is ok. * Returns <0 if error. **/ static int i40e_ddp_does_profile_overlap(struct i40e_hw *hw, struct i40e_profile_info *pinfo) { struct i40e_ddp_profile_list *profile_list; u8 buff[I40E_PROFILE_LIST_SIZE]; int status; int i; status = i40e_aq_get_ddp_list(hw, buff, I40E_PROFILE_LIST_SIZE, 0, NULL); if (status) return -EIO; profile_list = (struct i40e_ddp_profile_list *)buff; for (i = 0; i < profile_list->p_count; i++) { if (i40e_ddp_profiles_overlap(pinfo, &profile_list->p_info[i])) return 1; } return 0; } /** * i40e_add_pinfo * @hw: pointer to the hardware structure * @profile: pointer to the profile segment of the package * @profile_info_sec: buffer for information section * @track_id: package tracking id * * Register a profile to the list of loaded profiles. */ static int i40e_add_pinfo(struct i40e_hw *hw, struct i40e_profile_segment *profile, u8 *profile_info_sec, u32 track_id) { struct i40e_profile_section_header *sec; struct i40e_profile_info *pinfo; u32 offset = 0, info = 0; int status; sec = (struct i40e_profile_section_header *)profile_info_sec; sec->tbl_size = 1; sec->data_end = sizeof(struct i40e_profile_section_header) + sizeof(struct i40e_profile_info); sec->section.type = SECTION_TYPE_INFO; sec->section.offset = sizeof(struct i40e_profile_section_header); sec->section.size = sizeof(struct i40e_profile_info); pinfo = (struct i40e_profile_info *)(profile_info_sec + sec->section.offset); pinfo->track_id = track_id; pinfo->version = profile->version; pinfo->op = I40E_DDP_ADD_TRACKID; /* Clear reserved field */ memset(pinfo->reserved, 0, sizeof(pinfo->reserved)); memcpy(pinfo->name, profile->name, I40E_DDP_NAME_SIZE); status = i40e_aq_write_ddp(hw, (void *)sec, sec->data_end, track_id, &offset, &info, NULL); return status; } /** * i40e_del_pinfo - delete DDP profile info from NIC * @hw: HW data structure * @profile: DDP profile segment to be deleted * @profile_info_sec: DDP profile section header * @track_id: track ID of the profile for deletion * * Removes DDP profile from the NIC. **/ static int i40e_del_pinfo(struct i40e_hw *hw, struct i40e_profile_segment *profile, u8 *profile_info_sec, u32 track_id) { struct i40e_profile_section_header *sec; struct i40e_profile_info *pinfo; u32 offset = 0, info = 0; int status; sec = (struct i40e_profile_section_header *)profile_info_sec; sec->tbl_size = 1; sec->data_end = sizeof(struct i40e_profile_section_header) + sizeof(struct i40e_profile_info); sec->section.type = SECTION_TYPE_INFO; sec->section.offset = sizeof(struct i40e_profile_section_header); sec->section.size = sizeof(struct i40e_profile_info); pinfo = (struct i40e_profile_info *)(profile_info_sec + sec->section.offset); pinfo->track_id = track_id; pinfo->version = profile->version; pinfo->op = I40E_DDP_REMOVE_TRACKID; /* Clear reserved field */ memset(pinfo->reserved, 0, sizeof(pinfo->reserved)); memcpy(pinfo->name, profile->name, I40E_DDP_NAME_SIZE); status = i40e_aq_write_ddp(hw, (void *)sec, sec->data_end, track_id, &offset, &info, NULL); return status; } /** * i40e_ddp_is_pkg_hdr_valid - performs basic pkg header integrity checks * @netdev: net device structure (for logging purposes) * @pkg_hdr: pointer to package header * @size_huge: size of the whole DDP profile package in size_t * * Checks correctness of pkg header: Version, size too big/small, and * all segment offsets alignment and boundaries. This function lets * reject non DDP profile file to be loaded by administrator mistake. **/ static bool i40e_ddp_is_pkg_hdr_valid(struct net_device *netdev, struct i40e_package_header *pkg_hdr, size_t size_huge) { u32 size = 0xFFFFFFFFU & size_huge; u32 pkg_hdr_size; u32 segment; if (!pkg_hdr) return false; if (pkg_hdr->version.major > 0) { struct i40e_ddp_version ver = pkg_hdr->version; netdev_err(netdev, "Unsupported DDP profile version %u.%u.%u.%u", ver.major, ver.minor, ver.update, ver.draft); return false; } if (size_huge > size) { netdev_err(netdev, "Invalid DDP profile - size is bigger than 4G"); return false; } if (size < (sizeof(struct i40e_package_header) + sizeof(u32) + sizeof(struct i40e_metadata_segment) + sizeof(u32) * 2)) { netdev_err(netdev, "Invalid DDP profile - size is too small."); return false; } pkg_hdr_size = sizeof(u32) * (pkg_hdr->segment_count + 2U); if (size < pkg_hdr_size) { netdev_err(netdev, "Invalid DDP profile - too many segments"); return false; } for (segment = 0; segment < pkg_hdr->segment_count; ++segment) { u32 offset = pkg_hdr->segment_offset[segment]; if (0xFU & offset) { netdev_err(netdev, "Invalid DDP profile %u segment alignment", segment); return false; } if (pkg_hdr_size > offset || offset >= size) { netdev_err(netdev, "Invalid DDP profile %u segment offset", segment); return false; } } return true; } /** * i40e_ddp_load - performs DDP loading * @netdev: net device structure * @data: buffer containing recipe file * @size: size of the buffer * @is_add: true when loading profile, false when rolling back the previous one * * Checks correctness and loads DDP profile to the NIC. The function is * also used for rolling back previously loaded profile. **/ int i40e_ddp_load(struct net_device *netdev, const u8 *data, size_t size, bool is_add) { u8 profile_info_sec[sizeof(struct i40e_profile_section_header) + sizeof(struct i40e_profile_info)]; struct i40e_metadata_segment *metadata_hdr; struct i40e_profile_segment *profile_hdr; struct i40e_profile_info pinfo; struct i40e_package_header *pkg_hdr; struct i40e_netdev_priv *np = netdev_priv(netdev); struct i40e_vsi *vsi = np->vsi; struct i40e_pf *pf = vsi->back; u32 track_id; int istatus; int status; pkg_hdr = (struct i40e_package_header *)data; if (!i40e_ddp_is_pkg_hdr_valid(netdev, pkg_hdr, size)) return -EINVAL; if (size < (sizeof(struct i40e_package_header) + sizeof(u32) + sizeof(struct i40e_metadata_segment) + sizeof(u32) * 2)) { netdev_err(netdev, "Invalid DDP recipe size."); return -EINVAL; } /* Find beginning of segment data in buffer */ metadata_hdr = (struct i40e_metadata_segment *) i40e_find_segment_in_package(SEGMENT_TYPE_METADATA, pkg_hdr); if (!metadata_hdr) { netdev_err(netdev, "Failed to find metadata segment in DDP recipe."); return -EINVAL; } track_id = metadata_hdr->track_id; profile_hdr = (struct i40e_profile_segment *) i40e_find_segment_in_package(SEGMENT_TYPE_I40E, pkg_hdr); if (!profile_hdr) { netdev_err(netdev, "Failed to find profile segment in DDP recipe."); return -EINVAL; } pinfo.track_id = track_id; pinfo.version = profile_hdr->version; if (is_add) pinfo.op = I40E_DDP_ADD_TRACKID; else pinfo.op = I40E_DDP_REMOVE_TRACKID; memcpy(pinfo.name, profile_hdr->name, I40E_DDP_NAME_SIZE); /* Check if profile data already exists*/ istatus = i40e_ddp_does_profile_exist(&pf->hw, &pinfo); if (istatus < 0) { netdev_err(netdev, "Failed to fetch loaded profiles."); return istatus; } if (is_add) { if (istatus > 0) { netdev_err(netdev, "DDP profile already loaded."); return -EINVAL; } istatus = i40e_ddp_does_profile_overlap(&pf->hw, &pinfo); if (istatus < 0) { netdev_err(netdev, "Failed to fetch loaded profiles."); return istatus; } if (istatus > 0) { netdev_err(netdev, "DDP profile overlaps with existing one."); return -EINVAL; } } else { if (istatus == 0) { netdev_err(netdev, "DDP profile for deletion does not exist."); return -EINVAL; } } /* Load profile data */ if (is_add) { status = i40e_write_profile(&pf->hw, profile_hdr, track_id); if (status) { if (status == -ENODEV) { netdev_err(netdev, "Profile is not supported by the device."); return -EPERM; } netdev_err(netdev, "Failed to write DDP profile."); return -EIO; } } else { status = i40e_rollback_profile(&pf->hw, profile_hdr, track_id); if (status) { netdev_err(netdev, "Failed to remove DDP profile."); return -EIO; } } /* Add/remove profile to/from profile list in FW */ if (is_add) { status = i40e_add_pinfo(&pf->hw, profile_hdr, profile_info_sec, track_id); if (status) { netdev_err(netdev, "Failed to add DDP profile info."); return -EIO; } } else { status = i40e_del_pinfo(&pf->hw, profile_hdr, profile_info_sec, track_id); if (status) { netdev_err(netdev, "Failed to restore DDP profile info."); return -EIO; } } return 0; } /** * i40e_ddp_restore - restore previously loaded profile and remove from list * @pf: PF data struct * * Restores previously loaded profile stored on the list in driver memory. * After rolling back removes entry from the list. **/ static int i40e_ddp_restore(struct i40e_pf *pf) { struct i40e_ddp_old_profile_list *entry; struct net_device *netdev = pf->vsi[pf->lan_vsi]->netdev; int status = 0; if (!list_empty(&pf->ddp_old_prof)) { entry = list_first_entry(&pf->ddp_old_prof, struct i40e_ddp_old_profile_list, list); status = i40e_ddp_load(netdev, entry->old_ddp_buf, entry->old_ddp_size, false); list_del(&entry->list); kfree(entry); } return status; } /** * i40e_ddp_flash - callback function for ethtool flash feature * @netdev: net device structure * @flash: kernel flash structure * * Ethtool callback function used for loading and unloading DDP profiles. **/ int i40e_ddp_flash(struct net_device *netdev, struct ethtool_flash *flash) { const struct firmware *ddp_config; struct i40e_netdev_priv *np = netdev_priv(netdev); struct i40e_vsi *vsi = np->vsi; struct i40e_pf *pf = vsi->back; int status = 0; /* Check for valid region first */ if (flash->region != I40_DDP_FLASH_REGION) { netdev_err(netdev, "Requested firmware region is not recognized by this driver."); return -EINVAL; } if (pf->hw.bus.func != 0) { netdev_err(netdev, "Any DDP operation is allowed only on Phy0 NIC interface"); return -EINVAL; } /* If the user supplied "-" instead of file name rollback previously * stored profile. */ if (strncmp(flash->data, "-", 2) != 0) { struct i40e_ddp_old_profile_list *list_entry; char profile_name[sizeof(I40E_DDP_PROFILE_PATH) + I40E_DDP_PROFILE_NAME_MAX]; profile_name[sizeof(profile_name) - 1] = 0; strncpy(profile_name, I40E_DDP_PROFILE_PATH, sizeof(profile_name) - 1); strncat(profile_name, flash->data, I40E_DDP_PROFILE_NAME_MAX); /* Load DDP recipe. */ status = request_firmware(&ddp_config, profile_name, &netdev->dev); if (status) { netdev_err(netdev, "DDP recipe file request failed."); return status; } status = i40e_ddp_load(netdev, ddp_config->data, ddp_config->size, true); if (!status) { list_entry = kzalloc(sizeof(struct i40e_ddp_old_profile_list) + ddp_config->size, GFP_KERNEL); if (!list_entry) { netdev_info(netdev, "Failed to allocate memory for previous DDP profile data."); netdev_info(netdev, "New profile loaded but roll-back will be impossible."); } else { memcpy(list_entry->old_ddp_buf, ddp_config->data, ddp_config->size); list_entry->old_ddp_size = ddp_config->size; list_add(&list_entry->list, &pf->ddp_old_prof); } } release_firmware(ddp_config); } else { if (!list_empty(&pf->ddp_old_prof)) { status = i40e_ddp_restore(pf); } else { netdev_warn(netdev, "There is no DDP profile to restore."); status = -ENOENT; } } return status; }
linux-master
drivers/net/ethernet/intel/i40e/i40e_ddp.c
// SPDX-License-Identifier: GPL-2.0 /* Copyright (c) 2020, Intel Corporation. */ /* flow director ethtool support for iavf */ #include "iavf.h" #define GTPU_PORT 2152 #define NAT_T_ESP_PORT 4500 #define PFCP_PORT 8805 static const struct in6_addr ipv6_addr_full_mask = { .in6_u = { .u6_addr8 = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, } } }; static const struct in6_addr ipv6_addr_zero_mask = { .in6_u = { .u6_addr8 = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, } } }; /** * iavf_validate_fdir_fltr_masks - validate Flow Director filter fields masks * @adapter: pointer to the VF adapter structure * @fltr: Flow Director filter data structure * * Returns 0 if all masks of packet fields are either full or empty. Returns * error on at least one partial mask. */ int iavf_validate_fdir_fltr_masks(struct iavf_adapter *adapter, struct iavf_fdir_fltr *fltr) { if (fltr->eth_mask.etype && fltr->eth_mask.etype != htons(U16_MAX)) goto partial_mask; if (fltr->ip_ver == 4) { if (fltr->ip_mask.v4_addrs.src_ip && fltr->ip_mask.v4_addrs.src_ip != htonl(U32_MAX)) goto partial_mask; if (fltr->ip_mask.v4_addrs.dst_ip && fltr->ip_mask.v4_addrs.dst_ip != htonl(U32_MAX)) goto partial_mask; if (fltr->ip_mask.tos && fltr->ip_mask.tos != U8_MAX) goto partial_mask; } else if (fltr->ip_ver == 6) { if (memcmp(&fltr->ip_mask.v6_addrs.src_ip, &ipv6_addr_zero_mask, sizeof(struct in6_addr)) && memcmp(&fltr->ip_mask.v6_addrs.src_ip, &ipv6_addr_full_mask, sizeof(struct in6_addr))) goto partial_mask; if (memcmp(&fltr->ip_mask.v6_addrs.dst_ip, &ipv6_addr_zero_mask, sizeof(struct in6_addr)) && memcmp(&fltr->ip_mask.v6_addrs.dst_ip, &ipv6_addr_full_mask, sizeof(struct in6_addr))) goto partial_mask; if (fltr->ip_mask.tclass && fltr->ip_mask.tclass != U8_MAX) goto partial_mask; } if (fltr->ip_mask.proto && fltr->ip_mask.proto != U8_MAX) goto partial_mask; if (fltr->ip_mask.src_port && fltr->ip_mask.src_port != htons(U16_MAX)) goto partial_mask; if (fltr->ip_mask.dst_port && fltr->ip_mask.dst_port != htons(U16_MAX)) goto partial_mask; if (fltr->ip_mask.spi && fltr->ip_mask.spi != htonl(U32_MAX)) goto partial_mask; if (fltr->ip_mask.l4_header && fltr->ip_mask.l4_header != htonl(U32_MAX)) goto partial_mask; return 0; partial_mask: dev_err(&adapter->pdev->dev, "Failed to add Flow Director filter, partial masks are not supported\n"); return -EOPNOTSUPP; } /** * iavf_pkt_udp_no_pay_len - the length of UDP packet without payload * @fltr: Flow Director filter data structure */ static u16 iavf_pkt_udp_no_pay_len(struct iavf_fdir_fltr *fltr) { return sizeof(struct ethhdr) + (fltr->ip_ver == 4 ? sizeof(struct iphdr) : sizeof(struct ipv6hdr)) + sizeof(struct udphdr); } /** * iavf_fill_fdir_gtpu_hdr - fill the GTP-U protocol header * @fltr: Flow Director filter data structure * @proto_hdrs: Flow Director protocol headers data structure * * Returns 0 if the GTP-U protocol header is set successfully */ static int iavf_fill_fdir_gtpu_hdr(struct iavf_fdir_fltr *fltr, struct virtchnl_proto_hdrs *proto_hdrs) { struct virtchnl_proto_hdr *uhdr = &proto_hdrs->proto_hdr[proto_hdrs->count - 1]; struct virtchnl_proto_hdr *ghdr = &proto_hdrs->proto_hdr[proto_hdrs->count++]; struct virtchnl_proto_hdr *ehdr = NULL; /* Extension Header if it exists */ u16 adj_offs, hdr_offs; int i; VIRTCHNL_SET_PROTO_HDR_TYPE(ghdr, GTPU_IP); adj_offs = iavf_pkt_udp_no_pay_len(fltr); for (i = 0; i < fltr->flex_cnt; i++) { #define IAVF_GTPU_HDR_TEID_OFFS0 4 #define IAVF_GTPU_HDR_TEID_OFFS1 6 #define IAVF_GTPU_HDR_N_PDU_AND_NEXT_EXTHDR_OFFS 10 #define IAVF_GTPU_HDR_NEXT_EXTHDR_TYPE_MASK 0x00FF /* skip N_PDU */ /* PDU Session Container Extension Header (PSC) */ #define IAVF_GTPU_PSC_EXTHDR_TYPE 0x85 #define IAVF_GTPU_HDR_PSC_PDU_TYPE_AND_QFI_OFFS 13 #define IAVF_GTPU_HDR_PSC_PDU_QFI_MASK 0x3F /* skip Type */ #define IAVF_GTPU_EH_QFI_IDX 1 if (fltr->flex_words[i].offset < adj_offs) return -EINVAL; hdr_offs = fltr->flex_words[i].offset - adj_offs; switch (hdr_offs) { case IAVF_GTPU_HDR_TEID_OFFS0: case IAVF_GTPU_HDR_TEID_OFFS1: { __be16 *pay_word = (__be16 *)ghdr->buffer; pay_word[hdr_offs >> 1] = htons(fltr->flex_words[i].word); VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(ghdr, GTPU_IP, TEID); } break; case IAVF_GTPU_HDR_N_PDU_AND_NEXT_EXTHDR_OFFS: if ((fltr->flex_words[i].word & IAVF_GTPU_HDR_NEXT_EXTHDR_TYPE_MASK) != IAVF_GTPU_PSC_EXTHDR_TYPE) return -EOPNOTSUPP; if (!ehdr) ehdr = &proto_hdrs->proto_hdr[proto_hdrs->count++]; VIRTCHNL_SET_PROTO_HDR_TYPE(ehdr, GTPU_EH); break; case IAVF_GTPU_HDR_PSC_PDU_TYPE_AND_QFI_OFFS: if (!ehdr) return -EINVAL; ehdr->buffer[IAVF_GTPU_EH_QFI_IDX] = fltr->flex_words[i].word & IAVF_GTPU_HDR_PSC_PDU_QFI_MASK; VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(ehdr, GTPU_EH, QFI); break; default: return -EINVAL; } } uhdr->field_selector = 0; /* The PF ignores the UDP header fields */ return 0; } /** * iavf_fill_fdir_pfcp_hdr - fill the PFCP protocol header * @fltr: Flow Director filter data structure * @proto_hdrs: Flow Director protocol headers data structure * * Returns 0 if the PFCP protocol header is set successfully */ static int iavf_fill_fdir_pfcp_hdr(struct iavf_fdir_fltr *fltr, struct virtchnl_proto_hdrs *proto_hdrs) { struct virtchnl_proto_hdr *uhdr = &proto_hdrs->proto_hdr[proto_hdrs->count - 1]; struct virtchnl_proto_hdr *hdr = &proto_hdrs->proto_hdr[proto_hdrs->count++]; u16 adj_offs, hdr_offs; int i; VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, PFCP); adj_offs = iavf_pkt_udp_no_pay_len(fltr); for (i = 0; i < fltr->flex_cnt; i++) { #define IAVF_PFCP_HDR_SFIELD_AND_MSG_TYPE_OFFS 0 if (fltr->flex_words[i].offset < adj_offs) return -EINVAL; hdr_offs = fltr->flex_words[i].offset - adj_offs; switch (hdr_offs) { case IAVF_PFCP_HDR_SFIELD_AND_MSG_TYPE_OFFS: hdr->buffer[0] = (fltr->flex_words[i].word >> 8) & 0xff; VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, PFCP, S_FIELD); break; default: return -EINVAL; } } uhdr->field_selector = 0; /* The PF ignores the UDP header fields */ return 0; } /** * iavf_fill_fdir_nat_t_esp_hdr - fill the NAT-T-ESP protocol header * @fltr: Flow Director filter data structure * @proto_hdrs: Flow Director protocol headers data structure * * Returns 0 if the NAT-T-ESP protocol header is set successfully */ static int iavf_fill_fdir_nat_t_esp_hdr(struct iavf_fdir_fltr *fltr, struct virtchnl_proto_hdrs *proto_hdrs) { struct virtchnl_proto_hdr *uhdr = &proto_hdrs->proto_hdr[proto_hdrs->count - 1]; struct virtchnl_proto_hdr *hdr = &proto_hdrs->proto_hdr[proto_hdrs->count++]; u16 adj_offs, hdr_offs; u32 spi = 0; int i; VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, ESP); adj_offs = iavf_pkt_udp_no_pay_len(fltr); for (i = 0; i < fltr->flex_cnt; i++) { #define IAVF_NAT_T_ESP_SPI_OFFS0 0 #define IAVF_NAT_T_ESP_SPI_OFFS1 2 if (fltr->flex_words[i].offset < adj_offs) return -EINVAL; hdr_offs = fltr->flex_words[i].offset - adj_offs; switch (hdr_offs) { case IAVF_NAT_T_ESP_SPI_OFFS0: spi |= fltr->flex_words[i].word << 16; break; case IAVF_NAT_T_ESP_SPI_OFFS1: spi |= fltr->flex_words[i].word; break; default: return -EINVAL; } } if (!spi) return -EOPNOTSUPP; /* Not support IKE Header Format with SPI 0 */ *(__be32 *)hdr->buffer = htonl(spi); VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, ESP, SPI); uhdr->field_selector = 0; /* The PF ignores the UDP header fields */ return 0; } /** * iavf_fill_fdir_udp_flex_pay_hdr - fill the UDP payload header * @fltr: Flow Director filter data structure * @proto_hdrs: Flow Director protocol headers data structure * * Returns 0 if the UDP payload defined protocol header is set successfully */ static int iavf_fill_fdir_udp_flex_pay_hdr(struct iavf_fdir_fltr *fltr, struct virtchnl_proto_hdrs *proto_hdrs) { int err; switch (ntohs(fltr->ip_data.dst_port)) { case GTPU_PORT: err = iavf_fill_fdir_gtpu_hdr(fltr, proto_hdrs); break; case NAT_T_ESP_PORT: err = iavf_fill_fdir_nat_t_esp_hdr(fltr, proto_hdrs); break; case PFCP_PORT: err = iavf_fill_fdir_pfcp_hdr(fltr, proto_hdrs); break; default: err = -EOPNOTSUPP; break; } return err; } /** * iavf_fill_fdir_ip4_hdr - fill the IPv4 protocol header * @fltr: Flow Director filter data structure * @proto_hdrs: Flow Director protocol headers data structure * * Returns 0 if the IPv4 protocol header is set successfully */ static int iavf_fill_fdir_ip4_hdr(struct iavf_fdir_fltr *fltr, struct virtchnl_proto_hdrs *proto_hdrs) { struct virtchnl_proto_hdr *hdr = &proto_hdrs->proto_hdr[proto_hdrs->count++]; struct iphdr *iph = (struct iphdr *)hdr->buffer; VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, IPV4); if (fltr->ip_mask.tos == U8_MAX) { iph->tos = fltr->ip_data.tos; VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV4, DSCP); } if (fltr->ip_mask.proto == U8_MAX) { iph->protocol = fltr->ip_data.proto; VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV4, PROT); } if (fltr->ip_mask.v4_addrs.src_ip == htonl(U32_MAX)) { iph->saddr = fltr->ip_data.v4_addrs.src_ip; VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV4, SRC); } if (fltr->ip_mask.v4_addrs.dst_ip == htonl(U32_MAX)) { iph->daddr = fltr->ip_data.v4_addrs.dst_ip; VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV4, DST); } return 0; } /** * iavf_fill_fdir_ip6_hdr - fill the IPv6 protocol header * @fltr: Flow Director filter data structure * @proto_hdrs: Flow Director protocol headers data structure * * Returns 0 if the IPv6 protocol header is set successfully */ static int iavf_fill_fdir_ip6_hdr(struct iavf_fdir_fltr *fltr, struct virtchnl_proto_hdrs *proto_hdrs) { struct virtchnl_proto_hdr *hdr = &proto_hdrs->proto_hdr[proto_hdrs->count++]; struct ipv6hdr *iph = (struct ipv6hdr *)hdr->buffer; VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, IPV6); if (fltr->ip_mask.tclass == U8_MAX) { iph->priority = (fltr->ip_data.tclass >> 4) & 0xF; iph->flow_lbl[0] = (fltr->ip_data.tclass << 4) & 0xF0; VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV6, TC); } if (fltr->ip_mask.proto == U8_MAX) { iph->nexthdr = fltr->ip_data.proto; VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV6, PROT); } if (!memcmp(&fltr->ip_mask.v6_addrs.src_ip, &ipv6_addr_full_mask, sizeof(struct in6_addr))) { memcpy(&iph->saddr, &fltr->ip_data.v6_addrs.src_ip, sizeof(struct in6_addr)); VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV6, SRC); } if (!memcmp(&fltr->ip_mask.v6_addrs.dst_ip, &ipv6_addr_full_mask, sizeof(struct in6_addr))) { memcpy(&iph->daddr, &fltr->ip_data.v6_addrs.dst_ip, sizeof(struct in6_addr)); VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV6, DST); } return 0; } /** * iavf_fill_fdir_tcp_hdr - fill the TCP protocol header * @fltr: Flow Director filter data structure * @proto_hdrs: Flow Director protocol headers data structure * * Returns 0 if the TCP protocol header is set successfully */ static int iavf_fill_fdir_tcp_hdr(struct iavf_fdir_fltr *fltr, struct virtchnl_proto_hdrs *proto_hdrs) { struct virtchnl_proto_hdr *hdr = &proto_hdrs->proto_hdr[proto_hdrs->count++]; struct tcphdr *tcph = (struct tcphdr *)hdr->buffer; VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, TCP); if (fltr->ip_mask.src_port == htons(U16_MAX)) { tcph->source = fltr->ip_data.src_port; VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, TCP, SRC_PORT); } if (fltr->ip_mask.dst_port == htons(U16_MAX)) { tcph->dest = fltr->ip_data.dst_port; VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, TCP, DST_PORT); } return 0; } /** * iavf_fill_fdir_udp_hdr - fill the UDP protocol header * @fltr: Flow Director filter data structure * @proto_hdrs: Flow Director protocol headers data structure * * Returns 0 if the UDP protocol header is set successfully */ static int iavf_fill_fdir_udp_hdr(struct iavf_fdir_fltr *fltr, struct virtchnl_proto_hdrs *proto_hdrs) { struct virtchnl_proto_hdr *hdr = &proto_hdrs->proto_hdr[proto_hdrs->count++]; struct udphdr *udph = (struct udphdr *)hdr->buffer; VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, UDP); if (fltr->ip_mask.src_port == htons(U16_MAX)) { udph->source = fltr->ip_data.src_port; VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, UDP, SRC_PORT); } if (fltr->ip_mask.dst_port == htons(U16_MAX)) { udph->dest = fltr->ip_data.dst_port; VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, UDP, DST_PORT); } if (!fltr->flex_cnt) return 0; return iavf_fill_fdir_udp_flex_pay_hdr(fltr, proto_hdrs); } /** * iavf_fill_fdir_sctp_hdr - fill the SCTP protocol header * @fltr: Flow Director filter data structure * @proto_hdrs: Flow Director protocol headers data structure * * Returns 0 if the SCTP protocol header is set successfully */ static int iavf_fill_fdir_sctp_hdr(struct iavf_fdir_fltr *fltr, struct virtchnl_proto_hdrs *proto_hdrs) { struct virtchnl_proto_hdr *hdr = &proto_hdrs->proto_hdr[proto_hdrs->count++]; struct sctphdr *sctph = (struct sctphdr *)hdr->buffer; VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, SCTP); if (fltr->ip_mask.src_port == htons(U16_MAX)) { sctph->source = fltr->ip_data.src_port; VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, SCTP, SRC_PORT); } if (fltr->ip_mask.dst_port == htons(U16_MAX)) { sctph->dest = fltr->ip_data.dst_port; VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, SCTP, DST_PORT); } return 0; } /** * iavf_fill_fdir_ah_hdr - fill the AH protocol header * @fltr: Flow Director filter data structure * @proto_hdrs: Flow Director protocol headers data structure * * Returns 0 if the AH protocol header is set successfully */ static int iavf_fill_fdir_ah_hdr(struct iavf_fdir_fltr *fltr, struct virtchnl_proto_hdrs *proto_hdrs) { struct virtchnl_proto_hdr *hdr = &proto_hdrs->proto_hdr[proto_hdrs->count++]; struct ip_auth_hdr *ah = (struct ip_auth_hdr *)hdr->buffer; VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, AH); if (fltr->ip_mask.spi == htonl(U32_MAX)) { ah->spi = fltr->ip_data.spi; VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, AH, SPI); } return 0; } /** * iavf_fill_fdir_esp_hdr - fill the ESP protocol header * @fltr: Flow Director filter data structure * @proto_hdrs: Flow Director protocol headers data structure * * Returns 0 if the ESP protocol header is set successfully */ static int iavf_fill_fdir_esp_hdr(struct iavf_fdir_fltr *fltr, struct virtchnl_proto_hdrs *proto_hdrs) { struct virtchnl_proto_hdr *hdr = &proto_hdrs->proto_hdr[proto_hdrs->count++]; struct ip_esp_hdr *esph = (struct ip_esp_hdr *)hdr->buffer; VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, ESP); if (fltr->ip_mask.spi == htonl(U32_MAX)) { esph->spi = fltr->ip_data.spi; VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, ESP, SPI); } return 0; } /** * iavf_fill_fdir_l4_hdr - fill the L4 protocol header * @fltr: Flow Director filter data structure * @proto_hdrs: Flow Director protocol headers data structure * * Returns 0 if the L4 protocol header is set successfully */ static int iavf_fill_fdir_l4_hdr(struct iavf_fdir_fltr *fltr, struct virtchnl_proto_hdrs *proto_hdrs) { struct virtchnl_proto_hdr *hdr; __be32 *l4_4_data; if (!fltr->ip_mask.proto) /* IPv4/IPv6 header only */ return 0; hdr = &proto_hdrs->proto_hdr[proto_hdrs->count++]; l4_4_data = (__be32 *)hdr->buffer; /* L2TPv3 over IP with 'Session ID' */ if (fltr->ip_data.proto == 115 && fltr->ip_mask.l4_header == htonl(U32_MAX)) { VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, L2TPV3); VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, L2TPV3, SESS_ID); *l4_4_data = fltr->ip_data.l4_header; } else { return -EOPNOTSUPP; } return 0; } /** * iavf_fill_fdir_eth_hdr - fill the Ethernet protocol header * @fltr: Flow Director filter data structure * @proto_hdrs: Flow Director protocol headers data structure * * Returns 0 if the Ethernet protocol header is set successfully */ static int iavf_fill_fdir_eth_hdr(struct iavf_fdir_fltr *fltr, struct virtchnl_proto_hdrs *proto_hdrs) { struct virtchnl_proto_hdr *hdr = &proto_hdrs->proto_hdr[proto_hdrs->count++]; struct ethhdr *ehdr = (struct ethhdr *)hdr->buffer; VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, ETH); if (fltr->eth_mask.etype == htons(U16_MAX)) { if (fltr->eth_data.etype == htons(ETH_P_IP) || fltr->eth_data.etype == htons(ETH_P_IPV6)) return -EOPNOTSUPP; ehdr->h_proto = fltr->eth_data.etype; VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, ETH, ETHERTYPE); } return 0; } /** * iavf_fill_fdir_add_msg - fill the Flow Director filter into virtchnl message * @adapter: pointer to the VF adapter structure * @fltr: Flow Director filter data structure * * Returns 0 if the add Flow Director virtchnl message is filled successfully */ int iavf_fill_fdir_add_msg(struct iavf_adapter *adapter, struct iavf_fdir_fltr *fltr) { struct virtchnl_fdir_add *vc_msg = &fltr->vc_add_msg; struct virtchnl_proto_hdrs *proto_hdrs; int err; proto_hdrs = &vc_msg->rule_cfg.proto_hdrs; err = iavf_fill_fdir_eth_hdr(fltr, proto_hdrs); /* L2 always exists */ if (err) return err; switch (fltr->flow_type) { case IAVF_FDIR_FLOW_IPV4_TCP: err = iavf_fill_fdir_ip4_hdr(fltr, proto_hdrs) | iavf_fill_fdir_tcp_hdr(fltr, proto_hdrs); break; case IAVF_FDIR_FLOW_IPV4_UDP: err = iavf_fill_fdir_ip4_hdr(fltr, proto_hdrs) | iavf_fill_fdir_udp_hdr(fltr, proto_hdrs); break; case IAVF_FDIR_FLOW_IPV4_SCTP: err = iavf_fill_fdir_ip4_hdr(fltr, proto_hdrs) | iavf_fill_fdir_sctp_hdr(fltr, proto_hdrs); break; case IAVF_FDIR_FLOW_IPV4_AH: err = iavf_fill_fdir_ip4_hdr(fltr, proto_hdrs) | iavf_fill_fdir_ah_hdr(fltr, proto_hdrs); break; case IAVF_FDIR_FLOW_IPV4_ESP: err = iavf_fill_fdir_ip4_hdr(fltr, proto_hdrs) | iavf_fill_fdir_esp_hdr(fltr, proto_hdrs); break; case IAVF_FDIR_FLOW_IPV4_OTHER: err = iavf_fill_fdir_ip4_hdr(fltr, proto_hdrs) | iavf_fill_fdir_l4_hdr(fltr, proto_hdrs); break; case IAVF_FDIR_FLOW_IPV6_TCP: err = iavf_fill_fdir_ip6_hdr(fltr, proto_hdrs) | iavf_fill_fdir_tcp_hdr(fltr, proto_hdrs); break; case IAVF_FDIR_FLOW_IPV6_UDP: err = iavf_fill_fdir_ip6_hdr(fltr, proto_hdrs) | iavf_fill_fdir_udp_hdr(fltr, proto_hdrs); break; case IAVF_FDIR_FLOW_IPV6_SCTP: err = iavf_fill_fdir_ip6_hdr(fltr, proto_hdrs) | iavf_fill_fdir_sctp_hdr(fltr, proto_hdrs); break; case IAVF_FDIR_FLOW_IPV6_AH: err = iavf_fill_fdir_ip6_hdr(fltr, proto_hdrs) | iavf_fill_fdir_ah_hdr(fltr, proto_hdrs); break; case IAVF_FDIR_FLOW_IPV6_ESP: err = iavf_fill_fdir_ip6_hdr(fltr, proto_hdrs) | iavf_fill_fdir_esp_hdr(fltr, proto_hdrs); break; case IAVF_FDIR_FLOW_IPV6_OTHER: err = iavf_fill_fdir_ip6_hdr(fltr, proto_hdrs) | iavf_fill_fdir_l4_hdr(fltr, proto_hdrs); break; case IAVF_FDIR_FLOW_NON_IP_L2: break; default: err = -EINVAL; break; } if (err) return err; vc_msg->vsi_id = adapter->vsi.id; vc_msg->rule_cfg.action_set.count = 1; vc_msg->rule_cfg.action_set.actions[0].type = fltr->action; vc_msg->rule_cfg.action_set.actions[0].act_conf.queue.index = fltr->q_index; return 0; } /** * iavf_fdir_flow_proto_name - get the flow protocol name * @flow_type: Flow Director filter flow type **/ static const char *iavf_fdir_flow_proto_name(enum iavf_fdir_flow_type flow_type) { switch (flow_type) { case IAVF_FDIR_FLOW_IPV4_TCP: case IAVF_FDIR_FLOW_IPV6_TCP: return "TCP"; case IAVF_FDIR_FLOW_IPV4_UDP: case IAVF_FDIR_FLOW_IPV6_UDP: return "UDP"; case IAVF_FDIR_FLOW_IPV4_SCTP: case IAVF_FDIR_FLOW_IPV6_SCTP: return "SCTP"; case IAVF_FDIR_FLOW_IPV4_AH: case IAVF_FDIR_FLOW_IPV6_AH: return "AH"; case IAVF_FDIR_FLOW_IPV4_ESP: case IAVF_FDIR_FLOW_IPV6_ESP: return "ESP"; case IAVF_FDIR_FLOW_IPV4_OTHER: case IAVF_FDIR_FLOW_IPV6_OTHER: return "Other"; case IAVF_FDIR_FLOW_NON_IP_L2: return "Ethernet"; default: return NULL; } } /** * iavf_print_fdir_fltr * @adapter: adapter structure * @fltr: Flow Director filter to print * * Print the Flow Director filter **/ void iavf_print_fdir_fltr(struct iavf_adapter *adapter, struct iavf_fdir_fltr *fltr) { const char *proto = iavf_fdir_flow_proto_name(fltr->flow_type); if (!proto) return; switch (fltr->flow_type) { case IAVF_FDIR_FLOW_IPV4_TCP: case IAVF_FDIR_FLOW_IPV4_UDP: case IAVF_FDIR_FLOW_IPV4_SCTP: dev_info(&adapter->pdev->dev, "Rule ID: %u dst_ip: %pI4 src_ip %pI4 %s: dst_port %hu src_port %hu\n", fltr->loc, &fltr->ip_data.v4_addrs.dst_ip, &fltr->ip_data.v4_addrs.src_ip, proto, ntohs(fltr->ip_data.dst_port), ntohs(fltr->ip_data.src_port)); break; case IAVF_FDIR_FLOW_IPV4_AH: case IAVF_FDIR_FLOW_IPV4_ESP: dev_info(&adapter->pdev->dev, "Rule ID: %u dst_ip: %pI4 src_ip %pI4 %s: SPI %u\n", fltr->loc, &fltr->ip_data.v4_addrs.dst_ip, &fltr->ip_data.v4_addrs.src_ip, proto, ntohl(fltr->ip_data.spi)); break; case IAVF_FDIR_FLOW_IPV4_OTHER: dev_info(&adapter->pdev->dev, "Rule ID: %u dst_ip: %pI4 src_ip %pI4 proto: %u L4_bytes: 0x%x\n", fltr->loc, &fltr->ip_data.v4_addrs.dst_ip, &fltr->ip_data.v4_addrs.src_ip, fltr->ip_data.proto, ntohl(fltr->ip_data.l4_header)); break; case IAVF_FDIR_FLOW_IPV6_TCP: case IAVF_FDIR_FLOW_IPV6_UDP: case IAVF_FDIR_FLOW_IPV6_SCTP: dev_info(&adapter->pdev->dev, "Rule ID: %u dst_ip: %pI6 src_ip %pI6 %s: dst_port %hu src_port %hu\n", fltr->loc, &fltr->ip_data.v6_addrs.dst_ip, &fltr->ip_data.v6_addrs.src_ip, proto, ntohs(fltr->ip_data.dst_port), ntohs(fltr->ip_data.src_port)); break; case IAVF_FDIR_FLOW_IPV6_AH: case IAVF_FDIR_FLOW_IPV6_ESP: dev_info(&adapter->pdev->dev, "Rule ID: %u dst_ip: %pI6 src_ip %pI6 %s: SPI %u\n", fltr->loc, &fltr->ip_data.v6_addrs.dst_ip, &fltr->ip_data.v6_addrs.src_ip, proto, ntohl(fltr->ip_data.spi)); break; case IAVF_FDIR_FLOW_IPV6_OTHER: dev_info(&adapter->pdev->dev, "Rule ID: %u dst_ip: %pI6 src_ip %pI6 proto: %u L4_bytes: 0x%x\n", fltr->loc, &fltr->ip_data.v6_addrs.dst_ip, &fltr->ip_data.v6_addrs.src_ip, fltr->ip_data.proto, ntohl(fltr->ip_data.l4_header)); break; case IAVF_FDIR_FLOW_NON_IP_L2: dev_info(&adapter->pdev->dev, "Rule ID: %u eth_type: 0x%x\n", fltr->loc, ntohs(fltr->eth_data.etype)); break; default: break; } } /** * iavf_fdir_is_dup_fltr - test if filter is already in list * @adapter: pointer to the VF adapter structure * @fltr: Flow Director filter data structure * * Returns true if the filter is found in the list */ bool iavf_fdir_is_dup_fltr(struct iavf_adapter *adapter, struct iavf_fdir_fltr *fltr) { struct iavf_fdir_fltr *tmp; bool ret = false; spin_lock_bh(&adapter->fdir_fltr_lock); list_for_each_entry(tmp, &adapter->fdir_list_head, list) { if (tmp->flow_type != fltr->flow_type) continue; if (!memcmp(&tmp->eth_data, &fltr->eth_data, sizeof(fltr->eth_data)) && !memcmp(&tmp->ip_data, &fltr->ip_data, sizeof(fltr->ip_data)) && !memcmp(&tmp->ext_data, &fltr->ext_data, sizeof(fltr->ext_data))) { ret = true; break; } } spin_unlock_bh(&adapter->fdir_fltr_lock); return ret; } /** * iavf_find_fdir_fltr_by_loc - find filter with location * @adapter: pointer to the VF adapter structure * @loc: location to find. * * Returns pointer to Flow Director filter if found or null */ struct iavf_fdir_fltr *iavf_find_fdir_fltr_by_loc(struct iavf_adapter *adapter, u32 loc) { struct iavf_fdir_fltr *rule; list_for_each_entry(rule, &adapter->fdir_list_head, list) if (rule->loc == loc) return rule; return NULL; } /** * iavf_fdir_list_add_fltr - add a new node to the flow director filter list * @adapter: pointer to the VF adapter structure * @fltr: filter node to add to structure */ void iavf_fdir_list_add_fltr(struct iavf_adapter *adapter, struct iavf_fdir_fltr *fltr) { struct iavf_fdir_fltr *rule, *parent = NULL; list_for_each_entry(rule, &adapter->fdir_list_head, list) { if (rule->loc >= fltr->loc) break; parent = rule; } if (parent) list_add(&fltr->list, &parent->list); else list_add(&fltr->list, &adapter->fdir_list_head); }
linux-master
drivers/net/ethernet/intel/iavf/iavf_fdir.c
// SPDX-License-Identifier: GPL-2.0 /* Copyright (c) 2021, Intel Corporation. */ /* advanced RSS configuration ethtool support for iavf */ #include "iavf.h" /** * iavf_fill_adv_rss_ip4_hdr - fill the IPv4 RSS protocol header * @hdr: the virtchnl message protocol header data structure * @hash_flds: the RSS configuration protocol hash fields */ static void iavf_fill_adv_rss_ip4_hdr(struct virtchnl_proto_hdr *hdr, u64 hash_flds) { VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, IPV4); if (hash_flds & IAVF_ADV_RSS_HASH_FLD_IPV4_SA) VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV4, SRC); if (hash_flds & IAVF_ADV_RSS_HASH_FLD_IPV4_DA) VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV4, DST); } /** * iavf_fill_adv_rss_ip6_hdr - fill the IPv6 RSS protocol header * @hdr: the virtchnl message protocol header data structure * @hash_flds: the RSS configuration protocol hash fields */ static void iavf_fill_adv_rss_ip6_hdr(struct virtchnl_proto_hdr *hdr, u64 hash_flds) { VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, IPV6); if (hash_flds & IAVF_ADV_RSS_HASH_FLD_IPV6_SA) VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV6, SRC); if (hash_flds & IAVF_ADV_RSS_HASH_FLD_IPV6_DA) VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV6, DST); } /** * iavf_fill_adv_rss_tcp_hdr - fill the TCP RSS protocol header * @hdr: the virtchnl message protocol header data structure * @hash_flds: the RSS configuration protocol hash fields */ static void iavf_fill_adv_rss_tcp_hdr(struct virtchnl_proto_hdr *hdr, u64 hash_flds) { VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, TCP); if (hash_flds & IAVF_ADV_RSS_HASH_FLD_TCP_SRC_PORT) VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, TCP, SRC_PORT); if (hash_flds & IAVF_ADV_RSS_HASH_FLD_TCP_DST_PORT) VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, TCP, DST_PORT); } /** * iavf_fill_adv_rss_udp_hdr - fill the UDP RSS protocol header * @hdr: the virtchnl message protocol header data structure * @hash_flds: the RSS configuration protocol hash fields */ static void iavf_fill_adv_rss_udp_hdr(struct virtchnl_proto_hdr *hdr, u64 hash_flds) { VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, UDP); if (hash_flds & IAVF_ADV_RSS_HASH_FLD_UDP_SRC_PORT) VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, UDP, SRC_PORT); if (hash_flds & IAVF_ADV_RSS_HASH_FLD_UDP_DST_PORT) VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, UDP, DST_PORT); } /** * iavf_fill_adv_rss_sctp_hdr - fill the SCTP RSS protocol header * @hdr: the virtchnl message protocol header data structure * @hash_flds: the RSS configuration protocol hash fields */ static void iavf_fill_adv_rss_sctp_hdr(struct virtchnl_proto_hdr *hdr, u64 hash_flds) { VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, SCTP); if (hash_flds & IAVF_ADV_RSS_HASH_FLD_SCTP_SRC_PORT) VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, SCTP, SRC_PORT); if (hash_flds & IAVF_ADV_RSS_HASH_FLD_SCTP_DST_PORT) VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, SCTP, DST_PORT); } /** * iavf_fill_adv_rss_cfg_msg - fill the RSS configuration into virtchnl message * @rss_cfg: the virtchnl message to be filled with RSS configuration setting * @packet_hdrs: the RSS configuration protocol header types * @hash_flds: the RSS configuration protocol hash fields * * Returns 0 if the RSS configuration virtchnl message is filled successfully */ int iavf_fill_adv_rss_cfg_msg(struct virtchnl_rss_cfg *rss_cfg, u32 packet_hdrs, u64 hash_flds) { struct virtchnl_proto_hdrs *proto_hdrs = &rss_cfg->proto_hdrs; struct virtchnl_proto_hdr *hdr; rss_cfg->rss_algorithm = VIRTCHNL_RSS_ALG_TOEPLITZ_ASYMMETRIC; proto_hdrs->tunnel_level = 0; /* always outer layer */ hdr = &proto_hdrs->proto_hdr[proto_hdrs->count++]; switch (packet_hdrs & IAVF_ADV_RSS_FLOW_SEG_HDR_L3) { case IAVF_ADV_RSS_FLOW_SEG_HDR_IPV4: iavf_fill_adv_rss_ip4_hdr(hdr, hash_flds); break; case IAVF_ADV_RSS_FLOW_SEG_HDR_IPV6: iavf_fill_adv_rss_ip6_hdr(hdr, hash_flds); break; default: return -EINVAL; } hdr = &proto_hdrs->proto_hdr[proto_hdrs->count++]; switch (packet_hdrs & IAVF_ADV_RSS_FLOW_SEG_HDR_L4) { case IAVF_ADV_RSS_FLOW_SEG_HDR_TCP: iavf_fill_adv_rss_tcp_hdr(hdr, hash_flds); break; case IAVF_ADV_RSS_FLOW_SEG_HDR_UDP: iavf_fill_adv_rss_udp_hdr(hdr, hash_flds); break; case IAVF_ADV_RSS_FLOW_SEG_HDR_SCTP: iavf_fill_adv_rss_sctp_hdr(hdr, hash_flds); break; default: return -EINVAL; } return 0; } /** * iavf_find_adv_rss_cfg_by_hdrs - find RSS configuration with header type * @adapter: pointer to the VF adapter structure * @packet_hdrs: protocol header type to find. * * Returns pointer to advance RSS configuration if found or null */ struct iavf_adv_rss * iavf_find_adv_rss_cfg_by_hdrs(struct iavf_adapter *adapter, u32 packet_hdrs) { struct iavf_adv_rss *rss; list_for_each_entry(rss, &adapter->adv_rss_list_head, list) if (rss->packet_hdrs == packet_hdrs) return rss; return NULL; } /** * iavf_print_adv_rss_cfg * @adapter: pointer to the VF adapter structure * @rss: pointer to the advance RSS configuration to print * @action: the string description about how to handle the RSS * @result: the string description about the virtchnl result * * Print the advance RSS configuration **/ void iavf_print_adv_rss_cfg(struct iavf_adapter *adapter, struct iavf_adv_rss *rss, const char *action, const char *result) { u32 packet_hdrs = rss->packet_hdrs; u64 hash_flds = rss->hash_flds; static char hash_opt[300]; const char *proto; if (packet_hdrs & IAVF_ADV_RSS_FLOW_SEG_HDR_TCP) proto = "TCP"; else if (packet_hdrs & IAVF_ADV_RSS_FLOW_SEG_HDR_UDP) proto = "UDP"; else if (packet_hdrs & IAVF_ADV_RSS_FLOW_SEG_HDR_SCTP) proto = "SCTP"; else return; memset(hash_opt, 0, sizeof(hash_opt)); strcat(hash_opt, proto); if (packet_hdrs & IAVF_ADV_RSS_FLOW_SEG_HDR_IPV4) strcat(hash_opt, "v4 "); else strcat(hash_opt, "v6 "); if (hash_flds & (IAVF_ADV_RSS_HASH_FLD_IPV4_SA | IAVF_ADV_RSS_HASH_FLD_IPV6_SA)) strcat(hash_opt, "IP SA,"); if (hash_flds & (IAVF_ADV_RSS_HASH_FLD_IPV4_DA | IAVF_ADV_RSS_HASH_FLD_IPV6_DA)) strcat(hash_opt, "IP DA,"); if (hash_flds & (IAVF_ADV_RSS_HASH_FLD_TCP_SRC_PORT | IAVF_ADV_RSS_HASH_FLD_UDP_SRC_PORT | IAVF_ADV_RSS_HASH_FLD_SCTP_SRC_PORT)) strcat(hash_opt, "src port,"); if (hash_flds & (IAVF_ADV_RSS_HASH_FLD_TCP_DST_PORT | IAVF_ADV_RSS_HASH_FLD_UDP_DST_PORT | IAVF_ADV_RSS_HASH_FLD_SCTP_DST_PORT)) strcat(hash_opt, "dst port,"); if (!action) action = ""; if (!result) result = ""; dev_info(&adapter->pdev->dev, "%s %s %s\n", action, hash_opt, result); }
linux-master
drivers/net/ethernet/intel/iavf/iavf_adv_rss.c
// SPDX-License-Identifier: GPL-2.0 /* Copyright(c) 2013 - 2018 Intel Corporation. */ #include "iavf.h" #include "iavf_prototype.h" #include "iavf_client.h" /* All iavf tracepoints are defined by the include below, which must * be included exactly once across the whole kernel with * CREATE_TRACE_POINTS defined */ #define CREATE_TRACE_POINTS #include "iavf_trace.h" static int iavf_setup_all_tx_resources(struct iavf_adapter *adapter); static int iavf_setup_all_rx_resources(struct iavf_adapter *adapter); static int iavf_close(struct net_device *netdev); static void iavf_init_get_resources(struct iavf_adapter *adapter); static int iavf_check_reset_complete(struct iavf_hw *hw); char iavf_driver_name[] = "iavf"; static const char iavf_driver_string[] = "Intel(R) Ethernet Adaptive Virtual Function Network Driver"; static const char iavf_copyright[] = "Copyright (c) 2013 - 2018 Intel Corporation."; /* iavf_pci_tbl - PCI Device ID Table * * Wildcard entries (PCI_ANY_ID) should come last * Last entry must be all 0s * * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, * Class, Class Mask, private data (not used) } */ static const struct pci_device_id iavf_pci_tbl[] = { {PCI_VDEVICE(INTEL, IAVF_DEV_ID_VF), 0}, {PCI_VDEVICE(INTEL, IAVF_DEV_ID_VF_HV), 0}, {PCI_VDEVICE(INTEL, IAVF_DEV_ID_X722_VF), 0}, {PCI_VDEVICE(INTEL, IAVF_DEV_ID_ADAPTIVE_VF), 0}, /* required last entry */ {0, } }; MODULE_DEVICE_TABLE(pci, iavf_pci_tbl); MODULE_ALIAS("i40evf"); MODULE_AUTHOR("Intel Corporation, <[email protected]>"); MODULE_DESCRIPTION("Intel(R) Ethernet Adaptive Virtual Function Network Driver"); MODULE_LICENSE("GPL v2"); static const struct net_device_ops iavf_netdev_ops; int iavf_status_to_errno(enum iavf_status status) { switch (status) { case IAVF_SUCCESS: return 0; case IAVF_ERR_PARAM: case IAVF_ERR_MAC_TYPE: case IAVF_ERR_INVALID_MAC_ADDR: case IAVF_ERR_INVALID_LINK_SETTINGS: case IAVF_ERR_INVALID_PD_ID: case IAVF_ERR_INVALID_QP_ID: case IAVF_ERR_INVALID_CQ_ID: case IAVF_ERR_INVALID_CEQ_ID: case IAVF_ERR_INVALID_AEQ_ID: case IAVF_ERR_INVALID_SIZE: case IAVF_ERR_INVALID_ARP_INDEX: case IAVF_ERR_INVALID_FPM_FUNC_ID: case IAVF_ERR_QP_INVALID_MSG_SIZE: case IAVF_ERR_INVALID_FRAG_COUNT: case IAVF_ERR_INVALID_ALIGNMENT: case IAVF_ERR_INVALID_PUSH_PAGE_INDEX: case IAVF_ERR_INVALID_IMM_DATA_SIZE: case IAVF_ERR_INVALID_VF_ID: case IAVF_ERR_INVALID_HMCFN_ID: case IAVF_ERR_INVALID_PBLE_INDEX: case IAVF_ERR_INVALID_SD_INDEX: case IAVF_ERR_INVALID_PAGE_DESC_INDEX: case IAVF_ERR_INVALID_SD_TYPE: case IAVF_ERR_INVALID_HMC_OBJ_INDEX: case IAVF_ERR_INVALID_HMC_OBJ_COUNT: case IAVF_ERR_INVALID_SRQ_ARM_LIMIT: return -EINVAL; case IAVF_ERR_NVM: case IAVF_ERR_NVM_CHECKSUM: case IAVF_ERR_PHY: case IAVF_ERR_CONFIG: case IAVF_ERR_UNKNOWN_PHY: case IAVF_ERR_LINK_SETUP: case IAVF_ERR_ADAPTER_STOPPED: case IAVF_ERR_PRIMARY_REQUESTS_PENDING: case IAVF_ERR_AUTONEG_NOT_COMPLETE: case IAVF_ERR_RESET_FAILED: case IAVF_ERR_BAD_PTR: case IAVF_ERR_SWFW_SYNC: case IAVF_ERR_QP_TOOMANY_WRS_POSTED: case IAVF_ERR_QUEUE_EMPTY: case IAVF_ERR_FLUSHED_QUEUE: case IAVF_ERR_OPCODE_MISMATCH: case IAVF_ERR_CQP_COMPL_ERROR: case IAVF_ERR_BACKING_PAGE_ERROR: case IAVF_ERR_NO_PBLCHUNKS_AVAILABLE: case IAVF_ERR_MEMCPY_FAILED: case IAVF_ERR_SRQ_ENABLED: case IAVF_ERR_ADMIN_QUEUE_ERROR: case IAVF_ERR_ADMIN_QUEUE_FULL: case IAVF_ERR_BAD_RDMA_CQE: case IAVF_ERR_NVM_BLANK_MODE: case IAVF_ERR_PE_DOORBELL_NOT_ENABLED: case IAVF_ERR_DIAG_TEST_FAILED: case IAVF_ERR_FIRMWARE_API_VERSION: case IAVF_ERR_ADMIN_QUEUE_CRITICAL_ERROR: return -EIO; case IAVF_ERR_DEVICE_NOT_SUPPORTED: return -ENODEV; case IAVF_ERR_NO_AVAILABLE_VSI: case IAVF_ERR_RING_FULL: return -ENOSPC; case IAVF_ERR_NO_MEMORY: return -ENOMEM; case IAVF_ERR_TIMEOUT: case IAVF_ERR_ADMIN_QUEUE_TIMEOUT: return -ETIMEDOUT; case IAVF_ERR_NOT_IMPLEMENTED: case IAVF_NOT_SUPPORTED: return -EOPNOTSUPP; case IAVF_ERR_ADMIN_QUEUE_NO_WORK: return -EALREADY; case IAVF_ERR_NOT_READY: return -EBUSY; case IAVF_ERR_BUF_TOO_SHORT: return -EMSGSIZE; } return -EIO; } int virtchnl_status_to_errno(enum virtchnl_status_code v_status) { switch (v_status) { case VIRTCHNL_STATUS_SUCCESS: return 0; case VIRTCHNL_STATUS_ERR_PARAM: case VIRTCHNL_STATUS_ERR_INVALID_VF_ID: return -EINVAL; case VIRTCHNL_STATUS_ERR_NO_MEMORY: return -ENOMEM; case VIRTCHNL_STATUS_ERR_OPCODE_MISMATCH: case VIRTCHNL_STATUS_ERR_CQP_COMPL_ERROR: case VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR: return -EIO; case VIRTCHNL_STATUS_ERR_NOT_SUPPORTED: return -EOPNOTSUPP; } return -EIO; } /** * iavf_pdev_to_adapter - go from pci_dev to adapter * @pdev: pci_dev pointer */ static struct iavf_adapter *iavf_pdev_to_adapter(struct pci_dev *pdev) { return netdev_priv(pci_get_drvdata(pdev)); } /** * iavf_is_reset_in_progress - Check if a reset is in progress * @adapter: board private structure */ static bool iavf_is_reset_in_progress(struct iavf_adapter *adapter) { if (adapter->state == __IAVF_RESETTING || adapter->flags & (IAVF_FLAG_RESET_PENDING | IAVF_FLAG_RESET_NEEDED)) return true; return false; } /** * iavf_wait_for_reset - Wait for reset to finish. * @adapter: board private structure * * Returns 0 if reset finished successfully, negative on timeout or interrupt. */ int iavf_wait_for_reset(struct iavf_adapter *adapter) { int ret = wait_event_interruptible_timeout(adapter->reset_waitqueue, !iavf_is_reset_in_progress(adapter), msecs_to_jiffies(5000)); /* If ret < 0 then it means wait was interrupted. * If ret == 0 then it means we got a timeout while waiting * for reset to finish. * If ret > 0 it means reset has finished. */ if (ret > 0) return 0; else if (ret < 0) return -EINTR; else return -EBUSY; } /** * iavf_allocate_dma_mem_d - OS specific memory alloc for shared code * @hw: pointer to the HW structure * @mem: ptr to mem struct to fill out * @size: size of memory requested * @alignment: what to align the allocation to **/ enum iavf_status iavf_allocate_dma_mem_d(struct iavf_hw *hw, struct iavf_dma_mem *mem, u64 size, u32 alignment) { struct iavf_adapter *adapter = (struct iavf_adapter *)hw->back; if (!mem) return IAVF_ERR_PARAM; mem->size = ALIGN(size, alignment); mem->va = dma_alloc_coherent(&adapter->pdev->dev, mem->size, (dma_addr_t *)&mem->pa, GFP_KERNEL); if (mem->va) return 0; else return IAVF_ERR_NO_MEMORY; } /** * iavf_free_dma_mem - wrapper for DMA memory freeing * @hw: pointer to the HW structure * @mem: ptr to mem struct to free **/ enum iavf_status iavf_free_dma_mem(struct iavf_hw *hw, struct iavf_dma_mem *mem) { struct iavf_adapter *adapter = (struct iavf_adapter *)hw->back; if (!mem || !mem->va) return IAVF_ERR_PARAM; dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va, (dma_addr_t)mem->pa); return 0; } /** * iavf_allocate_virt_mem - virt memory alloc wrapper * @hw: pointer to the HW structure * @mem: ptr to mem struct to fill out * @size: size of memory requested **/ enum iavf_status iavf_allocate_virt_mem(struct iavf_hw *hw, struct iavf_virt_mem *mem, u32 size) { if (!mem) return IAVF_ERR_PARAM; mem->size = size; mem->va = kzalloc(size, GFP_KERNEL); if (mem->va) return 0; else return IAVF_ERR_NO_MEMORY; } /** * iavf_free_virt_mem - virt memory free wrapper * @hw: pointer to the HW structure * @mem: ptr to mem struct to free **/ void iavf_free_virt_mem(struct iavf_hw *hw, struct iavf_virt_mem *mem) { kfree(mem->va); } /** * iavf_lock_timeout - try to lock mutex but give up after timeout * @lock: mutex that should be locked * @msecs: timeout in msecs * * Returns 0 on success, negative on failure **/ static int iavf_lock_timeout(struct mutex *lock, unsigned int msecs) { unsigned int wait, delay = 10; for (wait = 0; wait < msecs; wait += delay) { if (mutex_trylock(lock)) return 0; msleep(delay); } return -1; } /** * iavf_schedule_reset - Set the flags and schedule a reset event * @adapter: board private structure * @flags: IAVF_FLAG_RESET_PENDING or IAVF_FLAG_RESET_NEEDED **/ void iavf_schedule_reset(struct iavf_adapter *adapter, u64 flags) { if (!test_bit(__IAVF_IN_REMOVE_TASK, &adapter->crit_section) && !(adapter->flags & (IAVF_FLAG_RESET_PENDING | IAVF_FLAG_RESET_NEEDED))) { adapter->flags |= flags; queue_work(adapter->wq, &adapter->reset_task); } } /** * iavf_schedule_aq_request - Set the flags and schedule aq request * @adapter: board private structure * @flags: requested aq flags **/ void iavf_schedule_aq_request(struct iavf_adapter *adapter, u64 flags) { adapter->aq_required |= flags; mod_delayed_work(adapter->wq, &adapter->watchdog_task, 0); } /** * iavf_tx_timeout - Respond to a Tx Hang * @netdev: network interface device structure * @txqueue: queue number that is timing out **/ static void iavf_tx_timeout(struct net_device *netdev, unsigned int txqueue) { struct iavf_adapter *adapter = netdev_priv(netdev); adapter->tx_timeout_count++; iavf_schedule_reset(adapter, IAVF_FLAG_RESET_NEEDED); } /** * iavf_misc_irq_disable - Mask off interrupt generation on the NIC * @adapter: board private structure **/ static void iavf_misc_irq_disable(struct iavf_adapter *adapter) { struct iavf_hw *hw = &adapter->hw; if (!adapter->msix_entries) return; wr32(hw, IAVF_VFINT_DYN_CTL01, 0); iavf_flush(hw); synchronize_irq(adapter->msix_entries[0].vector); } /** * iavf_misc_irq_enable - Enable default interrupt generation settings * @adapter: board private structure **/ static void iavf_misc_irq_enable(struct iavf_adapter *adapter) { struct iavf_hw *hw = &adapter->hw; wr32(hw, IAVF_VFINT_DYN_CTL01, IAVF_VFINT_DYN_CTL01_INTENA_MASK | IAVF_VFINT_DYN_CTL01_ITR_INDX_MASK); wr32(hw, IAVF_VFINT_ICR0_ENA1, IAVF_VFINT_ICR0_ENA1_ADMINQ_MASK); iavf_flush(hw); } /** * iavf_irq_disable - Mask off interrupt generation on the NIC * @adapter: board private structure **/ static void iavf_irq_disable(struct iavf_adapter *adapter) { int i; struct iavf_hw *hw = &adapter->hw; if (!adapter->msix_entries) return; for (i = 1; i < adapter->num_msix_vectors; i++) { wr32(hw, IAVF_VFINT_DYN_CTLN1(i - 1), 0); synchronize_irq(adapter->msix_entries[i].vector); } iavf_flush(hw); } /** * iavf_irq_enable_queues - Enable interrupt for all queues * @adapter: board private structure **/ static void iavf_irq_enable_queues(struct iavf_adapter *adapter) { struct iavf_hw *hw = &adapter->hw; int i; for (i = 1; i < adapter->num_msix_vectors; i++) { wr32(hw, IAVF_VFINT_DYN_CTLN1(i - 1), IAVF_VFINT_DYN_CTLN1_INTENA_MASK | IAVF_VFINT_DYN_CTLN1_ITR_INDX_MASK); } } /** * iavf_irq_enable - Enable default interrupt generation settings * @adapter: board private structure * @flush: boolean value whether to run rd32() **/ void iavf_irq_enable(struct iavf_adapter *adapter, bool flush) { struct iavf_hw *hw = &adapter->hw; iavf_misc_irq_enable(adapter); iavf_irq_enable_queues(adapter); if (flush) iavf_flush(hw); } /** * iavf_msix_aq - Interrupt handler for vector 0 * @irq: interrupt number * @data: pointer to netdev **/ static irqreturn_t iavf_msix_aq(int irq, void *data) { struct net_device *netdev = data; struct iavf_adapter *adapter = netdev_priv(netdev); struct iavf_hw *hw = &adapter->hw; /* handle non-queue interrupts, these reads clear the registers */ rd32(hw, IAVF_VFINT_ICR01); rd32(hw, IAVF_VFINT_ICR0_ENA1); if (adapter->state != __IAVF_REMOVE) /* schedule work on the private workqueue */ queue_work(adapter->wq, &adapter->adminq_task); return IRQ_HANDLED; } /** * iavf_msix_clean_rings - MSIX mode Interrupt Handler * @irq: interrupt number * @data: pointer to a q_vector **/ static irqreturn_t iavf_msix_clean_rings(int irq, void *data) { struct iavf_q_vector *q_vector = data; if (!q_vector->tx.ring && !q_vector->rx.ring) return IRQ_HANDLED; napi_schedule_irqoff(&q_vector->napi); return IRQ_HANDLED; } /** * iavf_map_vector_to_rxq - associate irqs with rx queues * @adapter: board private structure * @v_idx: interrupt number * @r_idx: queue number **/ static void iavf_map_vector_to_rxq(struct iavf_adapter *adapter, int v_idx, int r_idx) { struct iavf_q_vector *q_vector = &adapter->q_vectors[v_idx]; struct iavf_ring *rx_ring = &adapter->rx_rings[r_idx]; struct iavf_hw *hw = &adapter->hw; rx_ring->q_vector = q_vector; rx_ring->next = q_vector->rx.ring; rx_ring->vsi = &adapter->vsi; q_vector->rx.ring = rx_ring; q_vector->rx.count++; q_vector->rx.next_update = jiffies + 1; q_vector->rx.target_itr = ITR_TO_REG(rx_ring->itr_setting); q_vector->ring_mask |= BIT(r_idx); wr32(hw, IAVF_VFINT_ITRN1(IAVF_RX_ITR, q_vector->reg_idx), q_vector->rx.current_itr >> 1); q_vector->rx.current_itr = q_vector->rx.target_itr; } /** * iavf_map_vector_to_txq - associate irqs with tx queues * @adapter: board private structure * @v_idx: interrupt number * @t_idx: queue number **/ static void iavf_map_vector_to_txq(struct iavf_adapter *adapter, int v_idx, int t_idx) { struct iavf_q_vector *q_vector = &adapter->q_vectors[v_idx]; struct iavf_ring *tx_ring = &adapter->tx_rings[t_idx]; struct iavf_hw *hw = &adapter->hw; tx_ring->q_vector = q_vector; tx_ring->next = q_vector->tx.ring; tx_ring->vsi = &adapter->vsi; q_vector->tx.ring = tx_ring; q_vector->tx.count++; q_vector->tx.next_update = jiffies + 1; q_vector->tx.target_itr = ITR_TO_REG(tx_ring->itr_setting); q_vector->num_ringpairs++; wr32(hw, IAVF_VFINT_ITRN1(IAVF_TX_ITR, q_vector->reg_idx), q_vector->tx.target_itr >> 1); q_vector->tx.current_itr = q_vector->tx.target_itr; } /** * iavf_map_rings_to_vectors - Maps descriptor rings to vectors * @adapter: board private structure to initialize * * This function maps descriptor rings to the queue-specific vectors * we were allotted through the MSI-X enabling code. Ideally, we'd have * one vector per ring/queue, but on a constrained vector budget, we * group the rings as "efficiently" as possible. You would add new * mapping configurations in here. **/ static void iavf_map_rings_to_vectors(struct iavf_adapter *adapter) { int rings_remaining = adapter->num_active_queues; int ridx = 0, vidx = 0; int q_vectors; q_vectors = adapter->num_msix_vectors - NONQ_VECS; for (; ridx < rings_remaining; ridx++) { iavf_map_vector_to_rxq(adapter, vidx, ridx); iavf_map_vector_to_txq(adapter, vidx, ridx); /* In the case where we have more queues than vectors, continue * round-robin on vectors until all queues are mapped. */ if (++vidx >= q_vectors) vidx = 0; } adapter->aq_required |= IAVF_FLAG_AQ_MAP_VECTORS; } /** * iavf_irq_affinity_notify - Callback for affinity changes * @notify: context as to what irq was changed * @mask: the new affinity mask * * This is a callback function used by the irq_set_affinity_notifier function * so that we may register to receive changes to the irq affinity masks. **/ static void iavf_irq_affinity_notify(struct irq_affinity_notify *notify, const cpumask_t *mask) { struct iavf_q_vector *q_vector = container_of(notify, struct iavf_q_vector, affinity_notify); cpumask_copy(&q_vector->affinity_mask, mask); } /** * iavf_irq_affinity_release - Callback for affinity notifier release * @ref: internal core kernel usage * * This is a callback function used by the irq_set_affinity_notifier function * to inform the current notification subscriber that they will no longer * receive notifications. **/ static void iavf_irq_affinity_release(struct kref *ref) {} /** * iavf_request_traffic_irqs - Initialize MSI-X interrupts * @adapter: board private structure * @basename: device basename * * Allocates MSI-X vectors for tx and rx handling, and requests * interrupts from the kernel. **/ static int iavf_request_traffic_irqs(struct iavf_adapter *adapter, char *basename) { unsigned int vector, q_vectors; unsigned int rx_int_idx = 0, tx_int_idx = 0; int irq_num, err; int cpu; iavf_irq_disable(adapter); /* Decrement for Other and TCP Timer vectors */ q_vectors = adapter->num_msix_vectors - NONQ_VECS; for (vector = 0; vector < q_vectors; vector++) { struct iavf_q_vector *q_vector = &adapter->q_vectors[vector]; irq_num = adapter->msix_entries[vector + NONQ_VECS].vector; if (q_vector->tx.ring && q_vector->rx.ring) { snprintf(q_vector->name, sizeof(q_vector->name), "iavf-%s-TxRx-%u", basename, rx_int_idx++); tx_int_idx++; } else if (q_vector->rx.ring) { snprintf(q_vector->name, sizeof(q_vector->name), "iavf-%s-rx-%u", basename, rx_int_idx++); } else if (q_vector->tx.ring) { snprintf(q_vector->name, sizeof(q_vector->name), "iavf-%s-tx-%u", basename, tx_int_idx++); } else { /* skip this unused q_vector */ continue; } err = request_irq(irq_num, iavf_msix_clean_rings, 0, q_vector->name, q_vector); if (err) { dev_info(&adapter->pdev->dev, "Request_irq failed, error: %d\n", err); goto free_queue_irqs; } /* register for affinity change notifications */ q_vector->affinity_notify.notify = iavf_irq_affinity_notify; q_vector->affinity_notify.release = iavf_irq_affinity_release; irq_set_affinity_notifier(irq_num, &q_vector->affinity_notify); /* Spread the IRQ affinity hints across online CPUs. Note that * get_cpu_mask returns a mask with a permanent lifetime so * it's safe to use as a hint for irq_update_affinity_hint. */ cpu = cpumask_local_spread(q_vector->v_idx, -1); irq_update_affinity_hint(irq_num, get_cpu_mask(cpu)); } return 0; free_queue_irqs: while (vector) { vector--; irq_num = adapter->msix_entries[vector + NONQ_VECS].vector; irq_set_affinity_notifier(irq_num, NULL); irq_update_affinity_hint(irq_num, NULL); free_irq(irq_num, &adapter->q_vectors[vector]); } return err; } /** * iavf_request_misc_irq - Initialize MSI-X interrupts * @adapter: board private structure * * Allocates MSI-X vector 0 and requests interrupts from the kernel. This * vector is only for the admin queue, and stays active even when the netdev * is closed. **/ static int iavf_request_misc_irq(struct iavf_adapter *adapter) { struct net_device *netdev = adapter->netdev; int err; snprintf(adapter->misc_vector_name, sizeof(adapter->misc_vector_name) - 1, "iavf-%s:mbx", dev_name(&adapter->pdev->dev)); err = request_irq(adapter->msix_entries[0].vector, &iavf_msix_aq, 0, adapter->misc_vector_name, netdev); if (err) { dev_err(&adapter->pdev->dev, "request_irq for %s failed: %d\n", adapter->misc_vector_name, err); free_irq(adapter->msix_entries[0].vector, netdev); } return err; } /** * iavf_free_traffic_irqs - Free MSI-X interrupts * @adapter: board private structure * * Frees all MSI-X vectors other than 0. **/ static void iavf_free_traffic_irqs(struct iavf_adapter *adapter) { int vector, irq_num, q_vectors; if (!adapter->msix_entries) return; q_vectors = adapter->num_msix_vectors - NONQ_VECS; for (vector = 0; vector < q_vectors; vector++) { irq_num = adapter->msix_entries[vector + NONQ_VECS].vector; irq_set_affinity_notifier(irq_num, NULL); irq_update_affinity_hint(irq_num, NULL); free_irq(irq_num, &adapter->q_vectors[vector]); } } /** * iavf_free_misc_irq - Free MSI-X miscellaneous vector * @adapter: board private structure * * Frees MSI-X vector 0. **/ static void iavf_free_misc_irq(struct iavf_adapter *adapter) { struct net_device *netdev = adapter->netdev; if (!adapter->msix_entries) return; free_irq(adapter->msix_entries[0].vector, netdev); } /** * iavf_configure_tx - Configure Transmit Unit after Reset * @adapter: board private structure * * Configure the Tx unit of the MAC after a reset. **/ static void iavf_configure_tx(struct iavf_adapter *adapter) { struct iavf_hw *hw = &adapter->hw; int i; for (i = 0; i < adapter->num_active_queues; i++) adapter->tx_rings[i].tail = hw->hw_addr + IAVF_QTX_TAIL1(i); } /** * iavf_configure_rx - Configure Receive Unit after Reset * @adapter: board private structure * * Configure the Rx unit of the MAC after a reset. **/ static void iavf_configure_rx(struct iavf_adapter *adapter) { unsigned int rx_buf_len = IAVF_RXBUFFER_2048; struct iavf_hw *hw = &adapter->hw; int i; /* Legacy Rx will always default to a 2048 buffer size. */ #if (PAGE_SIZE < 8192) if (!(adapter->flags & IAVF_FLAG_LEGACY_RX)) { struct net_device *netdev = adapter->netdev; /* For jumbo frames on systems with 4K pages we have to use * an order 1 page, so we might as well increase the size * of our Rx buffer to make better use of the available space */ rx_buf_len = IAVF_RXBUFFER_3072; /* We use a 1536 buffer size for configurations with * standard Ethernet mtu. On x86 this gives us enough room * for shared info and 192 bytes of padding. */ if (!IAVF_2K_TOO_SMALL_WITH_PADDING && (netdev->mtu <= ETH_DATA_LEN)) rx_buf_len = IAVF_RXBUFFER_1536 - NET_IP_ALIGN; } #endif for (i = 0; i < adapter->num_active_queues; i++) { adapter->rx_rings[i].tail = hw->hw_addr + IAVF_QRX_TAIL1(i); adapter->rx_rings[i].rx_buf_len = rx_buf_len; if (adapter->flags & IAVF_FLAG_LEGACY_RX) clear_ring_build_skb_enabled(&adapter->rx_rings[i]); else set_ring_build_skb_enabled(&adapter->rx_rings[i]); } } /** * iavf_find_vlan - Search filter list for specific vlan filter * @adapter: board private structure * @vlan: vlan tag * * Returns ptr to the filter object or NULL. Must be called while holding the * mac_vlan_list_lock. **/ static struct iavf_vlan_filter *iavf_find_vlan(struct iavf_adapter *adapter, struct iavf_vlan vlan) { struct iavf_vlan_filter *f; list_for_each_entry(f, &adapter->vlan_filter_list, list) { if (f->vlan.vid == vlan.vid && f->vlan.tpid == vlan.tpid) return f; } return NULL; } /** * iavf_add_vlan - Add a vlan filter to the list * @adapter: board private structure * @vlan: VLAN tag * * Returns ptr to the filter object or NULL when no memory available. **/ static struct iavf_vlan_filter *iavf_add_vlan(struct iavf_adapter *adapter, struct iavf_vlan vlan) { struct iavf_vlan_filter *f = NULL; spin_lock_bh(&adapter->mac_vlan_list_lock); f = iavf_find_vlan(adapter, vlan); if (!f) { f = kzalloc(sizeof(*f), GFP_ATOMIC); if (!f) goto clearout; f->vlan = vlan; list_add_tail(&f->list, &adapter->vlan_filter_list); f->state = IAVF_VLAN_ADD; adapter->num_vlan_filters++; iavf_schedule_aq_request(adapter, IAVF_FLAG_AQ_ADD_VLAN_FILTER); } clearout: spin_unlock_bh(&adapter->mac_vlan_list_lock); return f; } /** * iavf_del_vlan - Remove a vlan filter from the list * @adapter: board private structure * @vlan: VLAN tag **/ static void iavf_del_vlan(struct iavf_adapter *adapter, struct iavf_vlan vlan) { struct iavf_vlan_filter *f; spin_lock_bh(&adapter->mac_vlan_list_lock); f = iavf_find_vlan(adapter, vlan); if (f) { f->state = IAVF_VLAN_REMOVE; iavf_schedule_aq_request(adapter, IAVF_FLAG_AQ_DEL_VLAN_FILTER); } spin_unlock_bh(&adapter->mac_vlan_list_lock); } /** * iavf_restore_filters * @adapter: board private structure * * Restore existing non MAC filters when VF netdev comes back up **/ static void iavf_restore_filters(struct iavf_adapter *adapter) { struct iavf_vlan_filter *f; /* re-add all VLAN filters */ spin_lock_bh(&adapter->mac_vlan_list_lock); list_for_each_entry(f, &adapter->vlan_filter_list, list) { if (f->state == IAVF_VLAN_INACTIVE) f->state = IAVF_VLAN_ADD; } spin_unlock_bh(&adapter->mac_vlan_list_lock); adapter->aq_required |= IAVF_FLAG_AQ_ADD_VLAN_FILTER; } /** * iavf_get_num_vlans_added - get number of VLANs added * @adapter: board private structure */ u16 iavf_get_num_vlans_added(struct iavf_adapter *adapter) { return adapter->num_vlan_filters; } /** * iavf_get_max_vlans_allowed - get maximum VLANs allowed for this VF * @adapter: board private structure * * This depends on the negotiated VLAN capability. For VIRTCHNL_VF_OFFLOAD_VLAN, * do not impose a limit as that maintains current behavior and for * VIRTCHNL_VF_OFFLOAD_VLAN_V2, use the maximum allowed sent from the PF. **/ static u16 iavf_get_max_vlans_allowed(struct iavf_adapter *adapter) { /* don't impose any limit for VIRTCHNL_VF_OFFLOAD_VLAN since there has * never been a limit on the VF driver side */ if (VLAN_ALLOWED(adapter)) return VLAN_N_VID; else if (VLAN_V2_ALLOWED(adapter)) return adapter->vlan_v2_caps.filtering.max_filters; return 0; } /** * iavf_max_vlans_added - check if maximum VLANs allowed already exist * @adapter: board private structure **/ static bool iavf_max_vlans_added(struct iavf_adapter *adapter) { if (iavf_get_num_vlans_added(adapter) < iavf_get_max_vlans_allowed(adapter)) return false; return true; } /** * iavf_vlan_rx_add_vid - Add a VLAN filter to a device * @netdev: network device struct * @proto: unused protocol data * @vid: VLAN tag **/ static int iavf_vlan_rx_add_vid(struct net_device *netdev, __always_unused __be16 proto, u16 vid) { struct iavf_adapter *adapter = netdev_priv(netdev); /* Do not track VLAN 0 filter, always added by the PF on VF init */ if (!vid) return 0; if (!VLAN_FILTERING_ALLOWED(adapter)) return -EIO; if (iavf_max_vlans_added(adapter)) { netdev_err(netdev, "Max allowed VLAN filters %u. Remove existing VLANs or disable filtering via Ethtool if supported.\n", iavf_get_max_vlans_allowed(adapter)); return -EIO; } if (!iavf_add_vlan(adapter, IAVF_VLAN(vid, be16_to_cpu(proto)))) return -ENOMEM; return 0; } /** * iavf_vlan_rx_kill_vid - Remove a VLAN filter from a device * @netdev: network device struct * @proto: unused protocol data * @vid: VLAN tag **/ static int iavf_vlan_rx_kill_vid(struct net_device *netdev, __always_unused __be16 proto, u16 vid) { struct iavf_adapter *adapter = netdev_priv(netdev); /* We do not track VLAN 0 filter */ if (!vid) return 0; iavf_del_vlan(adapter, IAVF_VLAN(vid, be16_to_cpu(proto))); return 0; } /** * iavf_find_filter - Search filter list for specific mac filter * @adapter: board private structure * @macaddr: the MAC address * * Returns ptr to the filter object or NULL. Must be called while holding the * mac_vlan_list_lock. **/ static struct iavf_mac_filter *iavf_find_filter(struct iavf_adapter *adapter, const u8 *macaddr) { struct iavf_mac_filter *f; if (!macaddr) return NULL; list_for_each_entry(f, &adapter->mac_filter_list, list) { if (ether_addr_equal(macaddr, f->macaddr)) return f; } return NULL; } /** * iavf_add_filter - Add a mac filter to the filter list * @adapter: board private structure * @macaddr: the MAC address * * Returns ptr to the filter object or NULL when no memory available. **/ struct iavf_mac_filter *iavf_add_filter(struct iavf_adapter *adapter, const u8 *macaddr) { struct iavf_mac_filter *f; if (!macaddr) return NULL; f = iavf_find_filter(adapter, macaddr); if (!f) { f = kzalloc(sizeof(*f), GFP_ATOMIC); if (!f) return f; ether_addr_copy(f->macaddr, macaddr); list_add_tail(&f->list, &adapter->mac_filter_list); f->add = true; f->add_handled = false; f->is_new_mac = true; f->is_primary = ether_addr_equal(macaddr, adapter->hw.mac.addr); adapter->aq_required |= IAVF_FLAG_AQ_ADD_MAC_FILTER; } else { f->remove = false; } return f; } /** * iavf_replace_primary_mac - Replace current primary address * @adapter: board private structure * @new_mac: new MAC address to be applied * * Replace current dev_addr and send request to PF for removal of previous * primary MAC address filter and addition of new primary MAC filter. * Return 0 for success, -ENOMEM for failure. * * Do not call this with mac_vlan_list_lock! **/ static int iavf_replace_primary_mac(struct iavf_adapter *adapter, const u8 *new_mac) { struct iavf_hw *hw = &adapter->hw; struct iavf_mac_filter *new_f; struct iavf_mac_filter *old_f; spin_lock_bh(&adapter->mac_vlan_list_lock); new_f = iavf_add_filter(adapter, new_mac); if (!new_f) { spin_unlock_bh(&adapter->mac_vlan_list_lock); return -ENOMEM; } old_f = iavf_find_filter(adapter, hw->mac.addr); if (old_f) { old_f->is_primary = false; old_f->remove = true; adapter->aq_required |= IAVF_FLAG_AQ_DEL_MAC_FILTER; } /* Always send the request to add if changing primary MAC, * even if filter is already present on the list */ new_f->is_primary = true; new_f->add = true; adapter->aq_required |= IAVF_FLAG_AQ_ADD_MAC_FILTER; ether_addr_copy(hw->mac.addr, new_mac); spin_unlock_bh(&adapter->mac_vlan_list_lock); /* schedule the watchdog task to immediately process the request */ mod_delayed_work(adapter->wq, &adapter->watchdog_task, 0); return 0; } /** * iavf_is_mac_set_handled - wait for a response to set MAC from PF * @netdev: network interface device structure * @macaddr: MAC address to set * * Returns true on success, false on failure */ static bool iavf_is_mac_set_handled(struct net_device *netdev, const u8 *macaddr) { struct iavf_adapter *adapter = netdev_priv(netdev); struct iavf_mac_filter *f; bool ret = false; spin_lock_bh(&adapter->mac_vlan_list_lock); f = iavf_find_filter(adapter, macaddr); if (!f || (!f->add && f->add_handled)) ret = true; spin_unlock_bh(&adapter->mac_vlan_list_lock); return ret; } /** * iavf_set_mac - NDO callback to set port MAC address * @netdev: network interface device structure * @p: pointer to an address structure * * Returns 0 on success, negative on failure */ static int iavf_set_mac(struct net_device *netdev, void *p) { struct iavf_adapter *adapter = netdev_priv(netdev); struct sockaddr *addr = p; int ret; if (!is_valid_ether_addr(addr->sa_data)) return -EADDRNOTAVAIL; ret = iavf_replace_primary_mac(adapter, addr->sa_data); if (ret) return ret; ret = wait_event_interruptible_timeout(adapter->vc_waitqueue, iavf_is_mac_set_handled(netdev, addr->sa_data), msecs_to_jiffies(2500)); /* If ret < 0 then it means wait was interrupted. * If ret == 0 then it means we got a timeout. * else it means we got response for set MAC from PF, * check if netdev MAC was updated to requested MAC, * if yes then set MAC succeeded otherwise it failed return -EACCES */ if (ret < 0) return ret; if (!ret) return -EAGAIN; if (!ether_addr_equal(netdev->dev_addr, addr->sa_data)) return -EACCES; return 0; } /** * iavf_addr_sync - Callback for dev_(mc|uc)_sync to add address * @netdev: the netdevice * @addr: address to add * * Called by __dev_(mc|uc)_sync when an address needs to be added. We call * __dev_(uc|mc)_sync from .set_rx_mode and guarantee to hold the hash lock. */ static int iavf_addr_sync(struct net_device *netdev, const u8 *addr) { struct iavf_adapter *adapter = netdev_priv(netdev); if (iavf_add_filter(adapter, addr)) return 0; else return -ENOMEM; } /** * iavf_addr_unsync - Callback for dev_(mc|uc)_sync to remove address * @netdev: the netdevice * @addr: address to add * * Called by __dev_(mc|uc)_sync when an address needs to be removed. We call * __dev_(uc|mc)_sync from .set_rx_mode and guarantee to hold the hash lock. */ static int iavf_addr_unsync(struct net_device *netdev, const u8 *addr) { struct iavf_adapter *adapter = netdev_priv(netdev); struct iavf_mac_filter *f; /* Under some circumstances, we might receive a request to delete * our own device address from our uc list. Because we store the * device address in the VSI's MAC/VLAN filter list, we need to ignore * such requests and not delete our device address from this list. */ if (ether_addr_equal(addr, netdev->dev_addr)) return 0; f = iavf_find_filter(adapter, addr); if (f) { f->remove = true; adapter->aq_required |= IAVF_FLAG_AQ_DEL_MAC_FILTER; } return 0; } /** * iavf_set_rx_mode - NDO callback to set the netdev filters * @netdev: network interface device structure **/ static void iavf_set_rx_mode(struct net_device *netdev) { struct iavf_adapter *adapter = netdev_priv(netdev); spin_lock_bh(&adapter->mac_vlan_list_lock); __dev_uc_sync(netdev, iavf_addr_sync, iavf_addr_unsync); __dev_mc_sync(netdev, iavf_addr_sync, iavf_addr_unsync); spin_unlock_bh(&adapter->mac_vlan_list_lock); if (netdev->flags & IFF_PROMISC && !(adapter->flags & IAVF_FLAG_PROMISC_ON)) adapter->aq_required |= IAVF_FLAG_AQ_REQUEST_PROMISC; else if (!(netdev->flags & IFF_PROMISC) && adapter->flags & IAVF_FLAG_PROMISC_ON) adapter->aq_required |= IAVF_FLAG_AQ_RELEASE_PROMISC; if (netdev->flags & IFF_ALLMULTI && !(adapter->flags & IAVF_FLAG_ALLMULTI_ON)) adapter->aq_required |= IAVF_FLAG_AQ_REQUEST_ALLMULTI; else if (!(netdev->flags & IFF_ALLMULTI) && adapter->flags & IAVF_FLAG_ALLMULTI_ON) adapter->aq_required |= IAVF_FLAG_AQ_RELEASE_ALLMULTI; } /** * iavf_napi_enable_all - enable NAPI on all queue vectors * @adapter: board private structure **/ static void iavf_napi_enable_all(struct iavf_adapter *adapter) { int q_idx; struct iavf_q_vector *q_vector; int q_vectors = adapter->num_msix_vectors - NONQ_VECS; for (q_idx = 0; q_idx < q_vectors; q_idx++) { struct napi_struct *napi; q_vector = &adapter->q_vectors[q_idx]; napi = &q_vector->napi; napi_enable(napi); } } /** * iavf_napi_disable_all - disable NAPI on all queue vectors * @adapter: board private structure **/ static void iavf_napi_disable_all(struct iavf_adapter *adapter) { int q_idx; struct iavf_q_vector *q_vector; int q_vectors = adapter->num_msix_vectors - NONQ_VECS; for (q_idx = 0; q_idx < q_vectors; q_idx++) { q_vector = &adapter->q_vectors[q_idx]; napi_disable(&q_vector->napi); } } /** * iavf_configure - set up transmit and receive data structures * @adapter: board private structure **/ static void iavf_configure(struct iavf_adapter *adapter) { struct net_device *netdev = adapter->netdev; int i; iavf_set_rx_mode(netdev); iavf_configure_tx(adapter); iavf_configure_rx(adapter); adapter->aq_required |= IAVF_FLAG_AQ_CONFIGURE_QUEUES; for (i = 0; i < adapter->num_active_queues; i++) { struct iavf_ring *ring = &adapter->rx_rings[i]; iavf_alloc_rx_buffers(ring, IAVF_DESC_UNUSED(ring)); } } /** * iavf_up_complete - Finish the last steps of bringing up a connection * @adapter: board private structure * * Expects to be called while holding the __IAVF_IN_CRITICAL_TASK bit lock. **/ static void iavf_up_complete(struct iavf_adapter *adapter) { iavf_change_state(adapter, __IAVF_RUNNING); clear_bit(__IAVF_VSI_DOWN, adapter->vsi.state); iavf_napi_enable_all(adapter); adapter->aq_required |= IAVF_FLAG_AQ_ENABLE_QUEUES; if (CLIENT_ENABLED(adapter)) adapter->flags |= IAVF_FLAG_CLIENT_NEEDS_OPEN; mod_delayed_work(adapter->wq, &adapter->watchdog_task, 0); } /** * iavf_clear_mac_vlan_filters - Remove mac and vlan filters not sent to PF * yet and mark other to be removed. * @adapter: board private structure **/ static void iavf_clear_mac_vlan_filters(struct iavf_adapter *adapter) { struct iavf_vlan_filter *vlf, *vlftmp; struct iavf_mac_filter *f, *ftmp; spin_lock_bh(&adapter->mac_vlan_list_lock); /* clear the sync flag on all filters */ __dev_uc_unsync(adapter->netdev, NULL); __dev_mc_unsync(adapter->netdev, NULL); /* remove all MAC filters */ list_for_each_entry_safe(f, ftmp, &adapter->mac_filter_list, list) { if (f->add) { list_del(&f->list); kfree(f); } else { f->remove = true; } } /* disable all VLAN filters */ list_for_each_entry_safe(vlf, vlftmp, &adapter->vlan_filter_list, list) vlf->state = IAVF_VLAN_DISABLE; spin_unlock_bh(&adapter->mac_vlan_list_lock); } /** * iavf_clear_cloud_filters - Remove cloud filters not sent to PF yet and * mark other to be removed. * @adapter: board private structure **/ static void iavf_clear_cloud_filters(struct iavf_adapter *adapter) { struct iavf_cloud_filter *cf, *cftmp; /* remove all cloud filters */ spin_lock_bh(&adapter->cloud_filter_list_lock); list_for_each_entry_safe(cf, cftmp, &adapter->cloud_filter_list, list) { if (cf->add) { list_del(&cf->list); kfree(cf); adapter->num_cloud_filters--; } else { cf->del = true; } } spin_unlock_bh(&adapter->cloud_filter_list_lock); } /** * iavf_clear_fdir_filters - Remove fdir filters not sent to PF yet and mark * other to be removed. * @adapter: board private structure **/ static void iavf_clear_fdir_filters(struct iavf_adapter *adapter) { struct iavf_fdir_fltr *fdir, *fdirtmp; /* remove all Flow Director filters */ spin_lock_bh(&adapter->fdir_fltr_lock); list_for_each_entry_safe(fdir, fdirtmp, &adapter->fdir_list_head, list) { if (fdir->state == IAVF_FDIR_FLTR_ADD_REQUEST) { list_del(&fdir->list); kfree(fdir); adapter->fdir_active_fltr--; } else { fdir->state = IAVF_FDIR_FLTR_DEL_REQUEST; } } spin_unlock_bh(&adapter->fdir_fltr_lock); } /** * iavf_clear_adv_rss_conf - Remove adv rss conf not sent to PF yet and mark * other to be removed. * @adapter: board private structure **/ static void iavf_clear_adv_rss_conf(struct iavf_adapter *adapter) { struct iavf_adv_rss *rss, *rsstmp; /* remove all advance RSS configuration */ spin_lock_bh(&adapter->adv_rss_lock); list_for_each_entry_safe(rss, rsstmp, &adapter->adv_rss_list_head, list) { if (rss->state == IAVF_ADV_RSS_ADD_REQUEST) { list_del(&rss->list); kfree(rss); } else { rss->state = IAVF_ADV_RSS_DEL_REQUEST; } } spin_unlock_bh(&adapter->adv_rss_lock); } /** * iavf_down - Shutdown the connection processing * @adapter: board private structure * * Expects to be called while holding the __IAVF_IN_CRITICAL_TASK bit lock. **/ void iavf_down(struct iavf_adapter *adapter) { struct net_device *netdev = adapter->netdev; if (adapter->state <= __IAVF_DOWN_PENDING) return; netif_carrier_off(netdev); netif_tx_disable(netdev); adapter->link_up = false; iavf_napi_disable_all(adapter); iavf_irq_disable(adapter); iavf_clear_mac_vlan_filters(adapter); iavf_clear_cloud_filters(adapter); iavf_clear_fdir_filters(adapter); iavf_clear_adv_rss_conf(adapter); if (!(adapter->flags & IAVF_FLAG_PF_COMMS_FAILED) && !(test_bit(__IAVF_IN_REMOVE_TASK, &adapter->crit_section))) { /* cancel any current operation */ adapter->current_op = VIRTCHNL_OP_UNKNOWN; /* Schedule operations to close down the HW. Don't wait * here for this to complete. The watchdog is still running * and it will take care of this. */ if (!list_empty(&adapter->mac_filter_list)) adapter->aq_required |= IAVF_FLAG_AQ_DEL_MAC_FILTER; if (!list_empty(&adapter->vlan_filter_list)) adapter->aq_required |= IAVF_FLAG_AQ_DEL_VLAN_FILTER; if (!list_empty(&adapter->cloud_filter_list)) adapter->aq_required |= IAVF_FLAG_AQ_DEL_CLOUD_FILTER; if (!list_empty(&adapter->fdir_list_head)) adapter->aq_required |= IAVF_FLAG_AQ_DEL_FDIR_FILTER; if (!list_empty(&adapter->adv_rss_list_head)) adapter->aq_required |= IAVF_FLAG_AQ_DEL_ADV_RSS_CFG; adapter->aq_required |= IAVF_FLAG_AQ_DISABLE_QUEUES; } mod_delayed_work(adapter->wq, &adapter->watchdog_task, 0); } /** * iavf_acquire_msix_vectors - Setup the MSIX capability * @adapter: board private structure * @vectors: number of vectors to request * * Work with the OS to set up the MSIX vectors needed. * * Returns 0 on success, negative on failure **/ static int iavf_acquire_msix_vectors(struct iavf_adapter *adapter, int vectors) { int err, vector_threshold; /* We'll want at least 3 (vector_threshold): * 0) Other (Admin Queue and link, mostly) * 1) TxQ[0] Cleanup * 2) RxQ[0] Cleanup */ vector_threshold = MIN_MSIX_COUNT; /* The more we get, the more we will assign to Tx/Rx Cleanup * for the separate queues...where Rx Cleanup >= Tx Cleanup. * Right now, we simply care about how many we'll get; we'll * set them up later while requesting irq's. */ err = pci_enable_msix_range(adapter->pdev, adapter->msix_entries, vector_threshold, vectors); if (err < 0) { dev_err(&adapter->pdev->dev, "Unable to allocate MSI-X interrupts\n"); kfree(adapter->msix_entries); adapter->msix_entries = NULL; return err; } /* Adjust for only the vectors we'll use, which is minimum * of max_msix_q_vectors + NONQ_VECS, or the number of * vectors we were allocated. */ adapter->num_msix_vectors = err; return 0; } /** * iavf_free_queues - Free memory for all rings * @adapter: board private structure to initialize * * Free all of the memory associated with queue pairs. **/ static void iavf_free_queues(struct iavf_adapter *adapter) { if (!adapter->vsi_res) return; adapter->num_active_queues = 0; kfree(adapter->tx_rings); adapter->tx_rings = NULL; kfree(adapter->rx_rings); adapter->rx_rings = NULL; } /** * iavf_set_queue_vlan_tag_loc - set location for VLAN tag offload * @adapter: board private structure * * Based on negotiated capabilities, the VLAN tag needs to be inserted and/or * stripped in certain descriptor fields. Instead of checking the offload * capability bits in the hot path, cache the location the ring specific * flags. */ void iavf_set_queue_vlan_tag_loc(struct iavf_adapter *adapter) { int i; for (i = 0; i < adapter->num_active_queues; i++) { struct iavf_ring *tx_ring = &adapter->tx_rings[i]; struct iavf_ring *rx_ring = &adapter->rx_rings[i]; /* prevent multiple L2TAG bits being set after VFR */ tx_ring->flags &= ~(IAVF_TXRX_FLAGS_VLAN_TAG_LOC_L2TAG1 | IAVF_TXR_FLAGS_VLAN_TAG_LOC_L2TAG2); rx_ring->flags &= ~(IAVF_TXRX_FLAGS_VLAN_TAG_LOC_L2TAG1 | IAVF_RXR_FLAGS_VLAN_TAG_LOC_L2TAG2_2); if (VLAN_ALLOWED(adapter)) { tx_ring->flags |= IAVF_TXRX_FLAGS_VLAN_TAG_LOC_L2TAG1; rx_ring->flags |= IAVF_TXRX_FLAGS_VLAN_TAG_LOC_L2TAG1; } else if (VLAN_V2_ALLOWED(adapter)) { struct virtchnl_vlan_supported_caps *stripping_support; struct virtchnl_vlan_supported_caps *insertion_support; stripping_support = &adapter->vlan_v2_caps.offloads.stripping_support; insertion_support = &adapter->vlan_v2_caps.offloads.insertion_support; if (stripping_support->outer) { if (stripping_support->outer & VIRTCHNL_VLAN_TAG_LOCATION_L2TAG1) rx_ring->flags |= IAVF_TXRX_FLAGS_VLAN_TAG_LOC_L2TAG1; else if (stripping_support->outer & VIRTCHNL_VLAN_TAG_LOCATION_L2TAG2_2) rx_ring->flags |= IAVF_RXR_FLAGS_VLAN_TAG_LOC_L2TAG2_2; } else if (stripping_support->inner) { if (stripping_support->inner & VIRTCHNL_VLAN_TAG_LOCATION_L2TAG1) rx_ring->flags |= IAVF_TXRX_FLAGS_VLAN_TAG_LOC_L2TAG1; else if (stripping_support->inner & VIRTCHNL_VLAN_TAG_LOCATION_L2TAG2_2) rx_ring->flags |= IAVF_RXR_FLAGS_VLAN_TAG_LOC_L2TAG2_2; } if (insertion_support->outer) { if (insertion_support->outer & VIRTCHNL_VLAN_TAG_LOCATION_L2TAG1) tx_ring->flags |= IAVF_TXRX_FLAGS_VLAN_TAG_LOC_L2TAG1; else if (insertion_support->outer & VIRTCHNL_VLAN_TAG_LOCATION_L2TAG2) tx_ring->flags |= IAVF_TXR_FLAGS_VLAN_TAG_LOC_L2TAG2; } else if (insertion_support->inner) { if (insertion_support->inner & VIRTCHNL_VLAN_TAG_LOCATION_L2TAG1) tx_ring->flags |= IAVF_TXRX_FLAGS_VLAN_TAG_LOC_L2TAG1; else if (insertion_support->inner & VIRTCHNL_VLAN_TAG_LOCATION_L2TAG2) tx_ring->flags |= IAVF_TXR_FLAGS_VLAN_TAG_LOC_L2TAG2; } } } } /** * iavf_alloc_queues - Allocate memory for all rings * @adapter: board private structure to initialize * * We allocate one ring per queue at run-time since we don't know the * number of queues at compile-time. The polling_netdev array is * intended for Multiqueue, but should work fine with a single queue. **/ static int iavf_alloc_queues(struct iavf_adapter *adapter) { int i, num_active_queues; /* If we're in reset reallocating queues we don't actually know yet for * certain the PF gave us the number of queues we asked for but we'll * assume it did. Once basic reset is finished we'll confirm once we * start negotiating config with PF. */ if (adapter->num_req_queues) num_active_queues = adapter->num_req_queues; else if ((adapter->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ADQ) && adapter->num_tc) num_active_queues = adapter->ch_config.total_qps; else num_active_queues = min_t(int, adapter->vsi_res->num_queue_pairs, (int)(num_online_cpus())); adapter->tx_rings = kcalloc(num_active_queues, sizeof(struct iavf_ring), GFP_KERNEL); if (!adapter->tx_rings) goto err_out; adapter->rx_rings = kcalloc(num_active_queues, sizeof(struct iavf_ring), GFP_KERNEL); if (!adapter->rx_rings) goto err_out; for (i = 0; i < num_active_queues; i++) { struct iavf_ring *tx_ring; struct iavf_ring *rx_ring; tx_ring = &adapter->tx_rings[i]; tx_ring->queue_index = i; tx_ring->netdev = adapter->netdev; tx_ring->dev = &adapter->pdev->dev; tx_ring->count = adapter->tx_desc_count; tx_ring->itr_setting = IAVF_ITR_TX_DEF; if (adapter->flags & IAVF_FLAG_WB_ON_ITR_CAPABLE) tx_ring->flags |= IAVF_TXR_FLAGS_WB_ON_ITR; rx_ring = &adapter->rx_rings[i]; rx_ring->queue_index = i; rx_ring->netdev = adapter->netdev; rx_ring->dev = &adapter->pdev->dev; rx_ring->count = adapter->rx_desc_count; rx_ring->itr_setting = IAVF_ITR_RX_DEF; } adapter->num_active_queues = num_active_queues; iavf_set_queue_vlan_tag_loc(adapter); return 0; err_out: iavf_free_queues(adapter); return -ENOMEM; } /** * iavf_set_interrupt_capability - set MSI-X or FAIL if not supported * @adapter: board private structure to initialize * * Attempt to configure the interrupts using the best available * capabilities of the hardware and the kernel. **/ static int iavf_set_interrupt_capability(struct iavf_adapter *adapter) { int vector, v_budget; int pairs = 0; int err = 0; if (!adapter->vsi_res) { err = -EIO; goto out; } pairs = adapter->num_active_queues; /* It's easy to be greedy for MSI-X vectors, but it really doesn't do * us much good if we have more vectors than CPUs. However, we already * limit the total number of queues by the number of CPUs so we do not * need any further limiting here. */ v_budget = min_t(int, pairs + NONQ_VECS, (int)adapter->vf_res->max_vectors); adapter->msix_entries = kcalloc(v_budget, sizeof(struct msix_entry), GFP_KERNEL); if (!adapter->msix_entries) { err = -ENOMEM; goto out; } for (vector = 0; vector < v_budget; vector++) adapter->msix_entries[vector].entry = vector; err = iavf_acquire_msix_vectors(adapter, v_budget); if (!err) iavf_schedule_finish_config(adapter); out: return err; } /** * iavf_config_rss_aq - Configure RSS keys and lut by using AQ commands * @adapter: board private structure * * Return 0 on success, negative on failure **/ static int iavf_config_rss_aq(struct iavf_adapter *adapter) { struct iavf_aqc_get_set_rss_key_data *rss_key = (struct iavf_aqc_get_set_rss_key_data *)adapter->rss_key; struct iavf_hw *hw = &adapter->hw; enum iavf_status status; if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { /* bail because we already have a command pending */ dev_err(&adapter->pdev->dev, "Cannot configure RSS, command %d pending\n", adapter->current_op); return -EBUSY; } status = iavf_aq_set_rss_key(hw, adapter->vsi.id, rss_key); if (status) { dev_err(&adapter->pdev->dev, "Cannot set RSS key, err %s aq_err %s\n", iavf_stat_str(hw, status), iavf_aq_str(hw, hw->aq.asq_last_status)); return iavf_status_to_errno(status); } status = iavf_aq_set_rss_lut(hw, adapter->vsi.id, false, adapter->rss_lut, adapter->rss_lut_size); if (status) { dev_err(&adapter->pdev->dev, "Cannot set RSS lut, err %s aq_err %s\n", iavf_stat_str(hw, status), iavf_aq_str(hw, hw->aq.asq_last_status)); return iavf_status_to_errno(status); } return 0; } /** * iavf_config_rss_reg - Configure RSS keys and lut by writing registers * @adapter: board private structure * * Returns 0 on success, negative on failure **/ static int iavf_config_rss_reg(struct iavf_adapter *adapter) { struct iavf_hw *hw = &adapter->hw; u32 *dw; u16 i; dw = (u32 *)adapter->rss_key; for (i = 0; i <= adapter->rss_key_size / 4; i++) wr32(hw, IAVF_VFQF_HKEY(i), dw[i]); dw = (u32 *)adapter->rss_lut; for (i = 0; i <= adapter->rss_lut_size / 4; i++) wr32(hw, IAVF_VFQF_HLUT(i), dw[i]); iavf_flush(hw); return 0; } /** * iavf_config_rss - Configure RSS keys and lut * @adapter: board private structure * * Returns 0 on success, negative on failure **/ int iavf_config_rss(struct iavf_adapter *adapter) { if (RSS_PF(adapter)) { adapter->aq_required |= IAVF_FLAG_AQ_SET_RSS_LUT | IAVF_FLAG_AQ_SET_RSS_KEY; return 0; } else if (RSS_AQ(adapter)) { return iavf_config_rss_aq(adapter); } else { return iavf_config_rss_reg(adapter); } } /** * iavf_fill_rss_lut - Fill the lut with default values * @adapter: board private structure **/ static void iavf_fill_rss_lut(struct iavf_adapter *adapter) { u16 i; for (i = 0; i < adapter->rss_lut_size; i++) adapter->rss_lut[i] = i % adapter->num_active_queues; } /** * iavf_init_rss - Prepare for RSS * @adapter: board private structure * * Return 0 on success, negative on failure **/ static int iavf_init_rss(struct iavf_adapter *adapter) { struct iavf_hw *hw = &adapter->hw; if (!RSS_PF(adapter)) { /* Enable PCTYPES for RSS, TCP/UDP with IPv4/IPv6 */ if (adapter->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2) adapter->hena = IAVF_DEFAULT_RSS_HENA_EXPANDED; else adapter->hena = IAVF_DEFAULT_RSS_HENA; wr32(hw, IAVF_VFQF_HENA(0), (u32)adapter->hena); wr32(hw, IAVF_VFQF_HENA(1), (u32)(adapter->hena >> 32)); } iavf_fill_rss_lut(adapter); netdev_rss_key_fill((void *)adapter->rss_key, adapter->rss_key_size); return iavf_config_rss(adapter); } /** * iavf_alloc_q_vectors - Allocate memory for interrupt vectors * @adapter: board private structure to initialize * * We allocate one q_vector per queue interrupt. If allocation fails we * return -ENOMEM. **/ static int iavf_alloc_q_vectors(struct iavf_adapter *adapter) { int q_idx = 0, num_q_vectors; struct iavf_q_vector *q_vector; num_q_vectors = adapter->num_msix_vectors - NONQ_VECS; adapter->q_vectors = kcalloc(num_q_vectors, sizeof(*q_vector), GFP_KERNEL); if (!adapter->q_vectors) return -ENOMEM; for (q_idx = 0; q_idx < num_q_vectors; q_idx++) { q_vector = &adapter->q_vectors[q_idx]; q_vector->adapter = adapter; q_vector->vsi = &adapter->vsi; q_vector->v_idx = q_idx; q_vector->reg_idx = q_idx; cpumask_copy(&q_vector->affinity_mask, cpu_possible_mask); netif_napi_add(adapter->netdev, &q_vector->napi, iavf_napi_poll); } return 0; } /** * iavf_free_q_vectors - Free memory allocated for interrupt vectors * @adapter: board private structure to initialize * * This function frees the memory allocated to the q_vectors. In addition if * NAPI is enabled it will delete any references to the NAPI struct prior * to freeing the q_vector. **/ static void iavf_free_q_vectors(struct iavf_adapter *adapter) { int q_idx, num_q_vectors; if (!adapter->q_vectors) return; num_q_vectors = adapter->num_msix_vectors - NONQ_VECS; for (q_idx = 0; q_idx < num_q_vectors; q_idx++) { struct iavf_q_vector *q_vector = &adapter->q_vectors[q_idx]; netif_napi_del(&q_vector->napi); } kfree(adapter->q_vectors); adapter->q_vectors = NULL; } /** * iavf_reset_interrupt_capability - Reset MSIX setup * @adapter: board private structure * **/ static void iavf_reset_interrupt_capability(struct iavf_adapter *adapter) { if (!adapter->msix_entries) return; pci_disable_msix(adapter->pdev); kfree(adapter->msix_entries); adapter->msix_entries = NULL; } /** * iavf_init_interrupt_scheme - Determine if MSIX is supported and init * @adapter: board private structure to initialize * **/ static int iavf_init_interrupt_scheme(struct iavf_adapter *adapter) { int err; err = iavf_alloc_queues(adapter); if (err) { dev_err(&adapter->pdev->dev, "Unable to allocate memory for queues\n"); goto err_alloc_queues; } err = iavf_set_interrupt_capability(adapter); if (err) { dev_err(&adapter->pdev->dev, "Unable to setup interrupt capabilities\n"); goto err_set_interrupt; } err = iavf_alloc_q_vectors(adapter); if (err) { dev_err(&adapter->pdev->dev, "Unable to allocate memory for queue vectors\n"); goto err_alloc_q_vectors; } /* If we've made it so far while ADq flag being ON, then we haven't * bailed out anywhere in middle. And ADq isn't just enabled but actual * resources have been allocated in the reset path. * Now we can truly claim that ADq is enabled. */ if ((adapter->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ADQ) && adapter->num_tc) dev_info(&adapter->pdev->dev, "ADq Enabled, %u TCs created", adapter->num_tc); dev_info(&adapter->pdev->dev, "Multiqueue %s: Queue pair count = %u", (adapter->num_active_queues > 1) ? "Enabled" : "Disabled", adapter->num_active_queues); return 0; err_alloc_q_vectors: iavf_reset_interrupt_capability(adapter); err_set_interrupt: iavf_free_queues(adapter); err_alloc_queues: return err; } /** * iavf_free_rss - Free memory used by RSS structs * @adapter: board private structure **/ static void iavf_free_rss(struct iavf_adapter *adapter) { kfree(adapter->rss_key); adapter->rss_key = NULL; kfree(adapter->rss_lut); adapter->rss_lut = NULL; } /** * iavf_reinit_interrupt_scheme - Reallocate queues and vectors * @adapter: board private structure * @running: true if adapter->state == __IAVF_RUNNING * * Returns 0 on success, negative on failure **/ static int iavf_reinit_interrupt_scheme(struct iavf_adapter *adapter, bool running) { struct net_device *netdev = adapter->netdev; int err; if (running) iavf_free_traffic_irqs(adapter); iavf_free_misc_irq(adapter); iavf_reset_interrupt_capability(adapter); iavf_free_q_vectors(adapter); iavf_free_queues(adapter); err = iavf_init_interrupt_scheme(adapter); if (err) goto err; netif_tx_stop_all_queues(netdev); err = iavf_request_misc_irq(adapter); if (err) goto err; set_bit(__IAVF_VSI_DOWN, adapter->vsi.state); iavf_map_rings_to_vectors(adapter); err: return err; } /** * iavf_finish_config - do all netdev work that needs RTNL * @work: our work_struct * * Do work that needs both RTNL and crit_lock. **/ static void iavf_finish_config(struct work_struct *work) { struct iavf_adapter *adapter; int pairs, err; adapter = container_of(work, struct iavf_adapter, finish_config); /* Always take RTNL first to prevent circular lock dependency */ rtnl_lock(); mutex_lock(&adapter->crit_lock); if ((adapter->flags & IAVF_FLAG_SETUP_NETDEV_FEATURES) && adapter->netdev_registered && !test_bit(__IAVF_IN_REMOVE_TASK, &adapter->crit_section)) { netdev_update_features(adapter->netdev); adapter->flags &= ~IAVF_FLAG_SETUP_NETDEV_FEATURES; } switch (adapter->state) { case __IAVF_DOWN: if (!adapter->netdev_registered) { err = register_netdevice(adapter->netdev); if (err) { dev_err(&adapter->pdev->dev, "Unable to register netdev (%d)\n", err); /* go back and try again.*/ iavf_free_rss(adapter); iavf_free_misc_irq(adapter); iavf_reset_interrupt_capability(adapter); iavf_change_state(adapter, __IAVF_INIT_CONFIG_ADAPTER); goto out; } adapter->netdev_registered = true; } /* Set the real number of queues when reset occurs while * state == __IAVF_DOWN */ fallthrough; case __IAVF_RUNNING: pairs = adapter->num_active_queues; netif_set_real_num_rx_queues(adapter->netdev, pairs); netif_set_real_num_tx_queues(adapter->netdev, pairs); break; default: break; } out: mutex_unlock(&adapter->crit_lock); rtnl_unlock(); } /** * iavf_schedule_finish_config - Set the flags and schedule a reset event * @adapter: board private structure **/ void iavf_schedule_finish_config(struct iavf_adapter *adapter) { if (!test_bit(__IAVF_IN_REMOVE_TASK, &adapter->crit_section)) queue_work(adapter->wq, &adapter->finish_config); } /** * iavf_process_aq_command - process aq_required flags * and sends aq command * @adapter: pointer to iavf adapter structure * * Returns 0 on success * Returns error code if no command was sent * or error code if the command failed. **/ static int iavf_process_aq_command(struct iavf_adapter *adapter) { if (adapter->aq_required & IAVF_FLAG_AQ_GET_CONFIG) return iavf_send_vf_config_msg(adapter); if (adapter->aq_required & IAVF_FLAG_AQ_GET_OFFLOAD_VLAN_V2_CAPS) return iavf_send_vf_offload_vlan_v2_msg(adapter); if (adapter->aq_required & IAVF_FLAG_AQ_DISABLE_QUEUES) { iavf_disable_queues(adapter); return 0; } if (adapter->aq_required & IAVF_FLAG_AQ_MAP_VECTORS) { iavf_map_queues(adapter); return 0; } if (adapter->aq_required & IAVF_FLAG_AQ_ADD_MAC_FILTER) { iavf_add_ether_addrs(adapter); return 0; } if (adapter->aq_required & IAVF_FLAG_AQ_ADD_VLAN_FILTER) { iavf_add_vlans(adapter); return 0; } if (adapter->aq_required & IAVF_FLAG_AQ_DEL_MAC_FILTER) { iavf_del_ether_addrs(adapter); return 0; } if (adapter->aq_required & IAVF_FLAG_AQ_DEL_VLAN_FILTER) { iavf_del_vlans(adapter); return 0; } if (adapter->aq_required & IAVF_FLAG_AQ_ENABLE_VLAN_STRIPPING) { iavf_enable_vlan_stripping(adapter); return 0; } if (adapter->aq_required & IAVF_FLAG_AQ_DISABLE_VLAN_STRIPPING) { iavf_disable_vlan_stripping(adapter); return 0; } if (adapter->aq_required & IAVF_FLAG_AQ_CONFIGURE_QUEUES) { iavf_configure_queues(adapter); return 0; } if (adapter->aq_required & IAVF_FLAG_AQ_ENABLE_QUEUES) { iavf_enable_queues(adapter); return 0; } if (adapter->aq_required & IAVF_FLAG_AQ_CONFIGURE_RSS) { /* This message goes straight to the firmware, not the * PF, so we don't have to set current_op as we will * not get a response through the ARQ. */ adapter->aq_required &= ~IAVF_FLAG_AQ_CONFIGURE_RSS; return 0; } if (adapter->aq_required & IAVF_FLAG_AQ_GET_HENA) { iavf_get_hena(adapter); return 0; } if (adapter->aq_required & IAVF_FLAG_AQ_SET_HENA) { iavf_set_hena(adapter); return 0; } if (adapter->aq_required & IAVF_FLAG_AQ_SET_RSS_KEY) { iavf_set_rss_key(adapter); return 0; } if (adapter->aq_required & IAVF_FLAG_AQ_SET_RSS_LUT) { iavf_set_rss_lut(adapter); return 0; } if (adapter->aq_required & IAVF_FLAG_AQ_REQUEST_PROMISC) { iavf_set_promiscuous(adapter, FLAG_VF_UNICAST_PROMISC | FLAG_VF_MULTICAST_PROMISC); return 0; } if (adapter->aq_required & IAVF_FLAG_AQ_REQUEST_ALLMULTI) { iavf_set_promiscuous(adapter, FLAG_VF_MULTICAST_PROMISC); return 0; } if ((adapter->aq_required & IAVF_FLAG_AQ_RELEASE_PROMISC) || (adapter->aq_required & IAVF_FLAG_AQ_RELEASE_ALLMULTI)) { iavf_set_promiscuous(adapter, 0); return 0; } if (adapter->aq_required & IAVF_FLAG_AQ_ENABLE_CHANNELS) { iavf_enable_channels(adapter); return 0; } if (adapter->aq_required & IAVF_FLAG_AQ_DISABLE_CHANNELS) { iavf_disable_channels(adapter); return 0; } if (adapter->aq_required & IAVF_FLAG_AQ_ADD_CLOUD_FILTER) { iavf_add_cloud_filter(adapter); return 0; } if (adapter->aq_required & IAVF_FLAG_AQ_DEL_CLOUD_FILTER) { iavf_del_cloud_filter(adapter); return 0; } if (adapter->aq_required & IAVF_FLAG_AQ_DEL_CLOUD_FILTER) { iavf_del_cloud_filter(adapter); return 0; } if (adapter->aq_required & IAVF_FLAG_AQ_ADD_CLOUD_FILTER) { iavf_add_cloud_filter(adapter); return 0; } if (adapter->aq_required & IAVF_FLAG_AQ_ADD_FDIR_FILTER) { iavf_add_fdir_filter(adapter); return IAVF_SUCCESS; } if (adapter->aq_required & IAVF_FLAG_AQ_DEL_FDIR_FILTER) { iavf_del_fdir_filter(adapter); return IAVF_SUCCESS; } if (adapter->aq_required & IAVF_FLAG_AQ_ADD_ADV_RSS_CFG) { iavf_add_adv_rss_cfg(adapter); return 0; } if (adapter->aq_required & IAVF_FLAG_AQ_DEL_ADV_RSS_CFG) { iavf_del_adv_rss_cfg(adapter); return 0; } if (adapter->aq_required & IAVF_FLAG_AQ_DISABLE_CTAG_VLAN_STRIPPING) { iavf_disable_vlan_stripping_v2(adapter, ETH_P_8021Q); return 0; } if (adapter->aq_required & IAVF_FLAG_AQ_DISABLE_STAG_VLAN_STRIPPING) { iavf_disable_vlan_stripping_v2(adapter, ETH_P_8021AD); return 0; } if (adapter->aq_required & IAVF_FLAG_AQ_ENABLE_CTAG_VLAN_STRIPPING) { iavf_enable_vlan_stripping_v2(adapter, ETH_P_8021Q); return 0; } if (adapter->aq_required & IAVF_FLAG_AQ_ENABLE_STAG_VLAN_STRIPPING) { iavf_enable_vlan_stripping_v2(adapter, ETH_P_8021AD); return 0; } if (adapter->aq_required & IAVF_FLAG_AQ_DISABLE_CTAG_VLAN_INSERTION) { iavf_disable_vlan_insertion_v2(adapter, ETH_P_8021Q); return 0; } if (adapter->aq_required & IAVF_FLAG_AQ_DISABLE_STAG_VLAN_INSERTION) { iavf_disable_vlan_insertion_v2(adapter, ETH_P_8021AD); return 0; } if (adapter->aq_required & IAVF_FLAG_AQ_ENABLE_CTAG_VLAN_INSERTION) { iavf_enable_vlan_insertion_v2(adapter, ETH_P_8021Q); return 0; } if (adapter->aq_required & IAVF_FLAG_AQ_ENABLE_STAG_VLAN_INSERTION) { iavf_enable_vlan_insertion_v2(adapter, ETH_P_8021AD); return 0; } if (adapter->aq_required & IAVF_FLAG_AQ_REQUEST_STATS) { iavf_request_stats(adapter); return 0; } return -EAGAIN; } /** * iavf_set_vlan_offload_features - set VLAN offload configuration * @adapter: board private structure * @prev_features: previous features used for comparison * @features: updated features used for configuration * * Set the aq_required bit(s) based on the requested features passed in to * configure VLAN stripping and/or VLAN insertion if supported. Also, schedule * the watchdog if any changes are requested to expedite the request via * virtchnl. **/ static void iavf_set_vlan_offload_features(struct iavf_adapter *adapter, netdev_features_t prev_features, netdev_features_t features) { bool enable_stripping = true, enable_insertion = true; u16 vlan_ethertype = 0; u64 aq_required = 0; /* keep cases separate because one ethertype for offloads can be * disabled at the same time as another is disabled, so check for an * enabled ethertype first, then check for disabled. Default to * ETH_P_8021Q so an ethertype is specified if disabling insertion and * stripping. */ if (features & (NETIF_F_HW_VLAN_STAG_RX | NETIF_F_HW_VLAN_STAG_TX)) vlan_ethertype = ETH_P_8021AD; else if (features & (NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_TX)) vlan_ethertype = ETH_P_8021Q; else if (prev_features & (NETIF_F_HW_VLAN_STAG_RX | NETIF_F_HW_VLAN_STAG_TX)) vlan_ethertype = ETH_P_8021AD; else if (prev_features & (NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_TX)) vlan_ethertype = ETH_P_8021Q; else vlan_ethertype = ETH_P_8021Q; if (!(features & (NETIF_F_HW_VLAN_STAG_RX | NETIF_F_HW_VLAN_CTAG_RX))) enable_stripping = false; if (!(features & (NETIF_F_HW_VLAN_STAG_TX | NETIF_F_HW_VLAN_CTAG_TX))) enable_insertion = false; if (VLAN_ALLOWED(adapter)) { /* VIRTCHNL_VF_OFFLOAD_VLAN only has support for toggling VLAN * stripping via virtchnl. VLAN insertion can be toggled on the * netdev, but it doesn't require a virtchnl message */ if (enable_stripping) aq_required |= IAVF_FLAG_AQ_ENABLE_VLAN_STRIPPING; else aq_required |= IAVF_FLAG_AQ_DISABLE_VLAN_STRIPPING; } else if (VLAN_V2_ALLOWED(adapter)) { switch (vlan_ethertype) { case ETH_P_8021Q: if (enable_stripping) aq_required |= IAVF_FLAG_AQ_ENABLE_CTAG_VLAN_STRIPPING; else aq_required |= IAVF_FLAG_AQ_DISABLE_CTAG_VLAN_STRIPPING; if (enable_insertion) aq_required |= IAVF_FLAG_AQ_ENABLE_CTAG_VLAN_INSERTION; else aq_required |= IAVF_FLAG_AQ_DISABLE_CTAG_VLAN_INSERTION; break; case ETH_P_8021AD: if (enable_stripping) aq_required |= IAVF_FLAG_AQ_ENABLE_STAG_VLAN_STRIPPING; else aq_required |= IAVF_FLAG_AQ_DISABLE_STAG_VLAN_STRIPPING; if (enable_insertion) aq_required |= IAVF_FLAG_AQ_ENABLE_STAG_VLAN_INSERTION; else aq_required |= IAVF_FLAG_AQ_DISABLE_STAG_VLAN_INSERTION; break; } } if (aq_required) { adapter->aq_required |= aq_required; mod_delayed_work(adapter->wq, &adapter->watchdog_task, 0); } } /** * iavf_startup - first step of driver startup * @adapter: board private structure * * Function process __IAVF_STARTUP driver state. * When success the state is changed to __IAVF_INIT_VERSION_CHECK * when fails the state is changed to __IAVF_INIT_FAILED **/ static void iavf_startup(struct iavf_adapter *adapter) { struct pci_dev *pdev = adapter->pdev; struct iavf_hw *hw = &adapter->hw; enum iavf_status status; int ret; WARN_ON(adapter->state != __IAVF_STARTUP); /* driver loaded, probe complete */ adapter->flags &= ~IAVF_FLAG_PF_COMMS_FAILED; adapter->flags &= ~IAVF_FLAG_RESET_PENDING; status = iavf_set_mac_type(hw); if (status) { dev_err(&pdev->dev, "Failed to set MAC type (%d)\n", status); goto err; } ret = iavf_check_reset_complete(hw); if (ret) { dev_info(&pdev->dev, "Device is still in reset (%d), retrying\n", ret); goto err; } hw->aq.num_arq_entries = IAVF_AQ_LEN; hw->aq.num_asq_entries = IAVF_AQ_LEN; hw->aq.arq_buf_size = IAVF_MAX_AQ_BUF_SIZE; hw->aq.asq_buf_size = IAVF_MAX_AQ_BUF_SIZE; status = iavf_init_adminq(hw); if (status) { dev_err(&pdev->dev, "Failed to init Admin Queue (%d)\n", status); goto err; } ret = iavf_send_api_ver(adapter); if (ret) { dev_err(&pdev->dev, "Unable to send to PF (%d)\n", ret); iavf_shutdown_adminq(hw); goto err; } iavf_change_state(adapter, __IAVF_INIT_VERSION_CHECK); return; err: iavf_change_state(adapter, __IAVF_INIT_FAILED); } /** * iavf_init_version_check - second step of driver startup * @adapter: board private structure * * Function process __IAVF_INIT_VERSION_CHECK driver state. * When success the state is changed to __IAVF_INIT_GET_RESOURCES * when fails the state is changed to __IAVF_INIT_FAILED **/ static void iavf_init_version_check(struct iavf_adapter *adapter) { struct pci_dev *pdev = adapter->pdev; struct iavf_hw *hw = &adapter->hw; int err = -EAGAIN; WARN_ON(adapter->state != __IAVF_INIT_VERSION_CHECK); if (!iavf_asq_done(hw)) { dev_err(&pdev->dev, "Admin queue command never completed\n"); iavf_shutdown_adminq(hw); iavf_change_state(adapter, __IAVF_STARTUP); goto err; } /* aq msg sent, awaiting reply */ err = iavf_verify_api_ver(adapter); if (err) { if (err == -EALREADY) err = iavf_send_api_ver(adapter); else dev_err(&pdev->dev, "Unsupported PF API version %d.%d, expected %d.%d\n", adapter->pf_version.major, adapter->pf_version.minor, VIRTCHNL_VERSION_MAJOR, VIRTCHNL_VERSION_MINOR); goto err; } err = iavf_send_vf_config_msg(adapter); if (err) { dev_err(&pdev->dev, "Unable to send config request (%d)\n", err); goto err; } iavf_change_state(adapter, __IAVF_INIT_GET_RESOURCES); return; err: iavf_change_state(adapter, __IAVF_INIT_FAILED); } /** * iavf_parse_vf_resource_msg - parse response from VIRTCHNL_OP_GET_VF_RESOURCES * @adapter: board private structure */ int iavf_parse_vf_resource_msg(struct iavf_adapter *adapter) { int i, num_req_queues = adapter->num_req_queues; struct iavf_vsi *vsi = &adapter->vsi; for (i = 0; i < adapter->vf_res->num_vsis; i++) { if (adapter->vf_res->vsi_res[i].vsi_type == VIRTCHNL_VSI_SRIOV) adapter->vsi_res = &adapter->vf_res->vsi_res[i]; } if (!adapter->vsi_res) { dev_err(&adapter->pdev->dev, "No LAN VSI found\n"); return -ENODEV; } if (num_req_queues && num_req_queues > adapter->vsi_res->num_queue_pairs) { /* Problem. The PF gave us fewer queues than what we had * negotiated in our request. Need a reset to see if we can't * get back to a working state. */ dev_err(&adapter->pdev->dev, "Requested %d queues, but PF only gave us %d.\n", num_req_queues, adapter->vsi_res->num_queue_pairs); adapter->flags |= IAVF_FLAG_REINIT_MSIX_NEEDED; adapter->num_req_queues = adapter->vsi_res->num_queue_pairs; iavf_schedule_reset(adapter, IAVF_FLAG_RESET_NEEDED); return -EAGAIN; } adapter->num_req_queues = 0; adapter->vsi.id = adapter->vsi_res->vsi_id; adapter->vsi.back = adapter; adapter->vsi.base_vector = 1; vsi->netdev = adapter->netdev; vsi->qs_handle = adapter->vsi_res->qset_handle; if (adapter->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_PF) { adapter->rss_key_size = adapter->vf_res->rss_key_size; adapter->rss_lut_size = adapter->vf_res->rss_lut_size; } else { adapter->rss_key_size = IAVF_HKEY_ARRAY_SIZE; adapter->rss_lut_size = IAVF_HLUT_ARRAY_SIZE; } return 0; } /** * iavf_init_get_resources - third step of driver startup * @adapter: board private structure * * Function process __IAVF_INIT_GET_RESOURCES driver state and * finishes driver initialization procedure. * When success the state is changed to __IAVF_DOWN * when fails the state is changed to __IAVF_INIT_FAILED **/ static void iavf_init_get_resources(struct iavf_adapter *adapter) { struct pci_dev *pdev = adapter->pdev; struct iavf_hw *hw = &adapter->hw; int err; WARN_ON(adapter->state != __IAVF_INIT_GET_RESOURCES); /* aq msg sent, awaiting reply */ if (!adapter->vf_res) { adapter->vf_res = kzalloc(IAVF_VIRTCHNL_VF_RESOURCE_SIZE, GFP_KERNEL); if (!adapter->vf_res) { err = -ENOMEM; goto err; } } err = iavf_get_vf_config(adapter); if (err == -EALREADY) { err = iavf_send_vf_config_msg(adapter); goto err; } else if (err == -EINVAL) { /* We only get -EINVAL if the device is in a very bad * state or if we've been disabled for previous bad * behavior. Either way, we're done now. */ iavf_shutdown_adminq(hw); dev_err(&pdev->dev, "Unable to get VF config due to PF error condition, not retrying\n"); return; } if (err) { dev_err(&pdev->dev, "Unable to get VF config (%d)\n", err); goto err_alloc; } err = iavf_parse_vf_resource_msg(adapter); if (err) { dev_err(&pdev->dev, "Failed to parse VF resource message from PF (%d)\n", err); goto err_alloc; } /* Some features require additional messages to negotiate extended * capabilities. These are processed in sequence by the * __IAVF_INIT_EXTENDED_CAPS driver state. */ adapter->extended_caps = IAVF_EXTENDED_CAPS; iavf_change_state(adapter, __IAVF_INIT_EXTENDED_CAPS); return; err_alloc: kfree(adapter->vf_res); adapter->vf_res = NULL; err: iavf_change_state(adapter, __IAVF_INIT_FAILED); } /** * iavf_init_send_offload_vlan_v2_caps - part of initializing VLAN V2 caps * @adapter: board private structure * * Function processes send of the extended VLAN V2 capability message to the * PF. Must clear IAVF_EXTENDED_CAP_RECV_VLAN_V2 if the message is not sent, * e.g. due to PF not negotiating VIRTCHNL_VF_OFFLOAD_VLAN_V2. */ static void iavf_init_send_offload_vlan_v2_caps(struct iavf_adapter *adapter) { int ret; WARN_ON(!(adapter->extended_caps & IAVF_EXTENDED_CAP_SEND_VLAN_V2)); ret = iavf_send_vf_offload_vlan_v2_msg(adapter); if (ret && ret == -EOPNOTSUPP) { /* PF does not support VIRTCHNL_VF_OFFLOAD_V2. In this case, * we did not send the capability exchange message and do not * expect a response. */ adapter->extended_caps &= ~IAVF_EXTENDED_CAP_RECV_VLAN_V2; } /* We sent the message, so move on to the next step */ adapter->extended_caps &= ~IAVF_EXTENDED_CAP_SEND_VLAN_V2; } /** * iavf_init_recv_offload_vlan_v2_caps - part of initializing VLAN V2 caps * @adapter: board private structure * * Function processes receipt of the extended VLAN V2 capability message from * the PF. **/ static void iavf_init_recv_offload_vlan_v2_caps(struct iavf_adapter *adapter) { int ret; WARN_ON(!(adapter->extended_caps & IAVF_EXTENDED_CAP_RECV_VLAN_V2)); memset(&adapter->vlan_v2_caps, 0, sizeof(adapter->vlan_v2_caps)); ret = iavf_get_vf_vlan_v2_caps(adapter); if (ret) goto err; /* We've processed receipt of the VLAN V2 caps message */ adapter->extended_caps &= ~IAVF_EXTENDED_CAP_RECV_VLAN_V2; return; err: /* We didn't receive a reply. Make sure we try sending again when * __IAVF_INIT_FAILED attempts to recover. */ adapter->extended_caps |= IAVF_EXTENDED_CAP_SEND_VLAN_V2; iavf_change_state(adapter, __IAVF_INIT_FAILED); } /** * iavf_init_process_extended_caps - Part of driver startup * @adapter: board private structure * * Function processes __IAVF_INIT_EXTENDED_CAPS driver state. This state * handles negotiating capabilities for features which require an additional * message. * * Once all extended capabilities exchanges are finished, the driver will * transition into __IAVF_INIT_CONFIG_ADAPTER. */ static void iavf_init_process_extended_caps(struct iavf_adapter *adapter) { WARN_ON(adapter->state != __IAVF_INIT_EXTENDED_CAPS); /* Process capability exchange for VLAN V2 */ if (adapter->extended_caps & IAVF_EXTENDED_CAP_SEND_VLAN_V2) { iavf_init_send_offload_vlan_v2_caps(adapter); return; } else if (adapter->extended_caps & IAVF_EXTENDED_CAP_RECV_VLAN_V2) { iavf_init_recv_offload_vlan_v2_caps(adapter); return; } /* When we reach here, no further extended capabilities exchanges are * necessary, so we finally transition into __IAVF_INIT_CONFIG_ADAPTER */ iavf_change_state(adapter, __IAVF_INIT_CONFIG_ADAPTER); } /** * iavf_init_config_adapter - last part of driver startup * @adapter: board private structure * * After all the supported capabilities are negotiated, then the * __IAVF_INIT_CONFIG_ADAPTER state will finish driver initialization. */ static void iavf_init_config_adapter(struct iavf_adapter *adapter) { struct net_device *netdev = adapter->netdev; struct pci_dev *pdev = adapter->pdev; int err; WARN_ON(adapter->state != __IAVF_INIT_CONFIG_ADAPTER); if (iavf_process_config(adapter)) goto err; adapter->current_op = VIRTCHNL_OP_UNKNOWN; adapter->flags |= IAVF_FLAG_RX_CSUM_ENABLED; netdev->netdev_ops = &iavf_netdev_ops; iavf_set_ethtool_ops(netdev); netdev->watchdog_timeo = 5 * HZ; /* MTU range: 68 - 9710 */ netdev->min_mtu = ETH_MIN_MTU; netdev->max_mtu = IAVF_MAX_RXBUFFER - IAVF_PACKET_HDR_PAD; if (!is_valid_ether_addr(adapter->hw.mac.addr)) { dev_info(&pdev->dev, "Invalid MAC address %pM, using random\n", adapter->hw.mac.addr); eth_hw_addr_random(netdev); ether_addr_copy(adapter->hw.mac.addr, netdev->dev_addr); } else { eth_hw_addr_set(netdev, adapter->hw.mac.addr); ether_addr_copy(netdev->perm_addr, adapter->hw.mac.addr); } adapter->tx_desc_count = IAVF_DEFAULT_TXD; adapter->rx_desc_count = IAVF_DEFAULT_RXD; err = iavf_init_interrupt_scheme(adapter); if (err) goto err_sw_init; iavf_map_rings_to_vectors(adapter); if (adapter->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_WB_ON_ITR) adapter->flags |= IAVF_FLAG_WB_ON_ITR_CAPABLE; err = iavf_request_misc_irq(adapter); if (err) goto err_sw_init; netif_carrier_off(netdev); adapter->link_up = false; netif_tx_stop_all_queues(netdev); if (CLIENT_ALLOWED(adapter)) { err = iavf_lan_add_device(adapter); if (err) dev_info(&pdev->dev, "Failed to add VF to client API service list: %d\n", err); } dev_info(&pdev->dev, "MAC address: %pM\n", adapter->hw.mac.addr); if (netdev->features & NETIF_F_GRO) dev_info(&pdev->dev, "GRO is enabled\n"); iavf_change_state(adapter, __IAVF_DOWN); set_bit(__IAVF_VSI_DOWN, adapter->vsi.state); iavf_misc_irq_enable(adapter); wake_up(&adapter->down_waitqueue); adapter->rss_key = kzalloc(adapter->rss_key_size, GFP_KERNEL); adapter->rss_lut = kzalloc(adapter->rss_lut_size, GFP_KERNEL); if (!adapter->rss_key || !adapter->rss_lut) { err = -ENOMEM; goto err_mem; } if (RSS_AQ(adapter)) adapter->aq_required |= IAVF_FLAG_AQ_CONFIGURE_RSS; else iavf_init_rss(adapter); if (VLAN_V2_ALLOWED(adapter)) /* request initial VLAN offload settings */ iavf_set_vlan_offload_features(adapter, 0, netdev->features); iavf_schedule_finish_config(adapter); return; err_mem: iavf_free_rss(adapter); iavf_free_misc_irq(adapter); err_sw_init: iavf_reset_interrupt_capability(adapter); err: iavf_change_state(adapter, __IAVF_INIT_FAILED); } /** * iavf_watchdog_task - Periodic call-back task * @work: pointer to work_struct **/ static void iavf_watchdog_task(struct work_struct *work) { struct iavf_adapter *adapter = container_of(work, struct iavf_adapter, watchdog_task.work); struct iavf_hw *hw = &adapter->hw; u32 reg_val; if (!mutex_trylock(&adapter->crit_lock)) { if (adapter->state == __IAVF_REMOVE) return; goto restart_watchdog; } if (adapter->flags & IAVF_FLAG_PF_COMMS_FAILED) iavf_change_state(adapter, __IAVF_COMM_FAILED); switch (adapter->state) { case __IAVF_STARTUP: iavf_startup(adapter); mutex_unlock(&adapter->crit_lock); queue_delayed_work(adapter->wq, &adapter->watchdog_task, msecs_to_jiffies(30)); return; case __IAVF_INIT_VERSION_CHECK: iavf_init_version_check(adapter); mutex_unlock(&adapter->crit_lock); queue_delayed_work(adapter->wq, &adapter->watchdog_task, msecs_to_jiffies(30)); return; case __IAVF_INIT_GET_RESOURCES: iavf_init_get_resources(adapter); mutex_unlock(&adapter->crit_lock); queue_delayed_work(adapter->wq, &adapter->watchdog_task, msecs_to_jiffies(1)); return; case __IAVF_INIT_EXTENDED_CAPS: iavf_init_process_extended_caps(adapter); mutex_unlock(&adapter->crit_lock); queue_delayed_work(adapter->wq, &adapter->watchdog_task, msecs_to_jiffies(1)); return; case __IAVF_INIT_CONFIG_ADAPTER: iavf_init_config_adapter(adapter); mutex_unlock(&adapter->crit_lock); queue_delayed_work(adapter->wq, &adapter->watchdog_task, msecs_to_jiffies(1)); return; case __IAVF_INIT_FAILED: if (test_bit(__IAVF_IN_REMOVE_TASK, &adapter->crit_section)) { /* Do not update the state and do not reschedule * watchdog task, iavf_remove should handle this state * as it can loop forever */ mutex_unlock(&adapter->crit_lock); return; } if (++adapter->aq_wait_count > IAVF_AQ_MAX_ERR) { dev_err(&adapter->pdev->dev, "Failed to communicate with PF; waiting before retry\n"); adapter->flags |= IAVF_FLAG_PF_COMMS_FAILED; iavf_shutdown_adminq(hw); mutex_unlock(&adapter->crit_lock); queue_delayed_work(adapter->wq, &adapter->watchdog_task, (5 * HZ)); return; } /* Try again from failed step*/ iavf_change_state(adapter, adapter->last_state); mutex_unlock(&adapter->crit_lock); queue_delayed_work(adapter->wq, &adapter->watchdog_task, HZ); return; case __IAVF_COMM_FAILED: if (test_bit(__IAVF_IN_REMOVE_TASK, &adapter->crit_section)) { /* Set state to __IAVF_INIT_FAILED and perform remove * steps. Remove IAVF_FLAG_PF_COMMS_FAILED so the task * doesn't bring the state back to __IAVF_COMM_FAILED. */ iavf_change_state(adapter, __IAVF_INIT_FAILED); adapter->flags &= ~IAVF_FLAG_PF_COMMS_FAILED; mutex_unlock(&adapter->crit_lock); return; } reg_val = rd32(hw, IAVF_VFGEN_RSTAT) & IAVF_VFGEN_RSTAT_VFR_STATE_MASK; if (reg_val == VIRTCHNL_VFR_VFACTIVE || reg_val == VIRTCHNL_VFR_COMPLETED) { /* A chance for redemption! */ dev_err(&adapter->pdev->dev, "Hardware came out of reset. Attempting reinit.\n"); /* When init task contacts the PF and * gets everything set up again, it'll restart the * watchdog for us. Down, boy. Sit. Stay. Woof. */ iavf_change_state(adapter, __IAVF_STARTUP); adapter->flags &= ~IAVF_FLAG_PF_COMMS_FAILED; } adapter->aq_required = 0; adapter->current_op = VIRTCHNL_OP_UNKNOWN; mutex_unlock(&adapter->crit_lock); queue_delayed_work(adapter->wq, &adapter->watchdog_task, msecs_to_jiffies(10)); return; case __IAVF_RESETTING: mutex_unlock(&adapter->crit_lock); queue_delayed_work(adapter->wq, &adapter->watchdog_task, HZ * 2); return; case __IAVF_DOWN: case __IAVF_DOWN_PENDING: case __IAVF_TESTING: case __IAVF_RUNNING: if (adapter->current_op) { if (!iavf_asq_done(hw)) { dev_dbg(&adapter->pdev->dev, "Admin queue timeout\n"); iavf_send_api_ver(adapter); } } else { int ret = iavf_process_aq_command(adapter); /* An error will be returned if no commands were * processed; use this opportunity to update stats * if the error isn't -ENOTSUPP */ if (ret && ret != -EOPNOTSUPP && adapter->state == __IAVF_RUNNING) iavf_request_stats(adapter); } if (adapter->state == __IAVF_RUNNING) iavf_detect_recover_hung(&adapter->vsi); break; case __IAVF_REMOVE: default: mutex_unlock(&adapter->crit_lock); return; } /* check for hw reset */ reg_val = rd32(hw, IAVF_VF_ARQLEN1) & IAVF_VF_ARQLEN1_ARQENABLE_MASK; if (!reg_val) { adapter->aq_required = 0; adapter->current_op = VIRTCHNL_OP_UNKNOWN; dev_err(&adapter->pdev->dev, "Hardware reset detected\n"); iavf_schedule_reset(adapter, IAVF_FLAG_RESET_PENDING); mutex_unlock(&adapter->crit_lock); queue_delayed_work(adapter->wq, &adapter->watchdog_task, HZ * 2); return; } schedule_delayed_work(&adapter->client_task, msecs_to_jiffies(5)); mutex_unlock(&adapter->crit_lock); restart_watchdog: if (adapter->state >= __IAVF_DOWN) queue_work(adapter->wq, &adapter->adminq_task); if (adapter->aq_required) queue_delayed_work(adapter->wq, &adapter->watchdog_task, msecs_to_jiffies(20)); else queue_delayed_work(adapter->wq, &adapter->watchdog_task, HZ * 2); } /** * iavf_disable_vf - disable VF * @adapter: board private structure * * Set communication failed flag and free all resources. * NOTE: This function is expected to be called with crit_lock being held. **/ static void iavf_disable_vf(struct iavf_adapter *adapter) { struct iavf_mac_filter *f, *ftmp; struct iavf_vlan_filter *fv, *fvtmp; struct iavf_cloud_filter *cf, *cftmp; adapter->flags |= IAVF_FLAG_PF_COMMS_FAILED; /* We don't use netif_running() because it may be true prior to * ndo_open() returning, so we can't assume it means all our open * tasks have finished, since we're not holding the rtnl_lock here. */ if (adapter->state == __IAVF_RUNNING) { set_bit(__IAVF_VSI_DOWN, adapter->vsi.state); netif_carrier_off(adapter->netdev); netif_tx_disable(adapter->netdev); adapter->link_up = false; iavf_napi_disable_all(adapter); iavf_irq_disable(adapter); iavf_free_traffic_irqs(adapter); iavf_free_all_tx_resources(adapter); iavf_free_all_rx_resources(adapter); } spin_lock_bh(&adapter->mac_vlan_list_lock); /* Delete all of the filters */ list_for_each_entry_safe(f, ftmp, &adapter->mac_filter_list, list) { list_del(&f->list); kfree(f); } list_for_each_entry_safe(fv, fvtmp, &adapter->vlan_filter_list, list) { list_del(&fv->list); kfree(fv); } adapter->num_vlan_filters = 0; spin_unlock_bh(&adapter->mac_vlan_list_lock); spin_lock_bh(&adapter->cloud_filter_list_lock); list_for_each_entry_safe(cf, cftmp, &adapter->cloud_filter_list, list) { list_del(&cf->list); kfree(cf); adapter->num_cloud_filters--; } spin_unlock_bh(&adapter->cloud_filter_list_lock); iavf_free_misc_irq(adapter); iavf_reset_interrupt_capability(adapter); iavf_free_q_vectors(adapter); iavf_free_queues(adapter); memset(adapter->vf_res, 0, IAVF_VIRTCHNL_VF_RESOURCE_SIZE); iavf_shutdown_adminq(&adapter->hw); adapter->flags &= ~IAVF_FLAG_RESET_PENDING; iavf_change_state(adapter, __IAVF_DOWN); wake_up(&adapter->down_waitqueue); dev_info(&adapter->pdev->dev, "Reset task did not complete, VF disabled\n"); } /** * iavf_reset_task - Call-back task to handle hardware reset * @work: pointer to work_struct * * During reset we need to shut down and reinitialize the admin queue * before we can use it to communicate with the PF again. We also clear * and reinit the rings because that context is lost as well. **/ static void iavf_reset_task(struct work_struct *work) { struct iavf_adapter *adapter = container_of(work, struct iavf_adapter, reset_task); struct virtchnl_vf_resource *vfres = adapter->vf_res; struct net_device *netdev = adapter->netdev; struct iavf_hw *hw = &adapter->hw; struct iavf_mac_filter *f, *ftmp; struct iavf_cloud_filter *cf; enum iavf_status status; u32 reg_val; int i = 0, err; bool running; /* When device is being removed it doesn't make sense to run the reset * task, just return in such a case. */ if (!mutex_trylock(&adapter->crit_lock)) { if (adapter->state != __IAVF_REMOVE) queue_work(adapter->wq, &adapter->reset_task); return; } while (!mutex_trylock(&adapter->client_lock)) usleep_range(500, 1000); if (CLIENT_ENABLED(adapter)) { adapter->flags &= ~(IAVF_FLAG_CLIENT_NEEDS_OPEN | IAVF_FLAG_CLIENT_NEEDS_CLOSE | IAVF_FLAG_CLIENT_NEEDS_L2_PARAMS | IAVF_FLAG_SERVICE_CLIENT_REQUESTED); cancel_delayed_work_sync(&adapter->client_task); iavf_notify_client_close(&adapter->vsi, true); } iavf_misc_irq_disable(adapter); if (adapter->flags & IAVF_FLAG_RESET_NEEDED) { adapter->flags &= ~IAVF_FLAG_RESET_NEEDED; /* Restart the AQ here. If we have been reset but didn't * detect it, or if the PF had to reinit, our AQ will be hosed. */ iavf_shutdown_adminq(hw); iavf_init_adminq(hw); iavf_request_reset(adapter); } adapter->flags |= IAVF_FLAG_RESET_PENDING; /* poll until we see the reset actually happen */ for (i = 0; i < IAVF_RESET_WAIT_DETECTED_COUNT; i++) { reg_val = rd32(hw, IAVF_VF_ARQLEN1) & IAVF_VF_ARQLEN1_ARQENABLE_MASK; if (!reg_val) break; usleep_range(5000, 10000); } if (i == IAVF_RESET_WAIT_DETECTED_COUNT) { dev_info(&adapter->pdev->dev, "Never saw reset\n"); goto continue_reset; /* act like the reset happened */ } /* wait until the reset is complete and the PF is responding to us */ for (i = 0; i < IAVF_RESET_WAIT_COMPLETE_COUNT; i++) { /* sleep first to make sure a minimum wait time is met */ msleep(IAVF_RESET_WAIT_MS); reg_val = rd32(hw, IAVF_VFGEN_RSTAT) & IAVF_VFGEN_RSTAT_VFR_STATE_MASK; if (reg_val == VIRTCHNL_VFR_VFACTIVE) break; } pci_set_master(adapter->pdev); pci_restore_msi_state(adapter->pdev); if (i == IAVF_RESET_WAIT_COMPLETE_COUNT) { dev_err(&adapter->pdev->dev, "Reset never finished (%x)\n", reg_val); iavf_disable_vf(adapter); mutex_unlock(&adapter->client_lock); mutex_unlock(&adapter->crit_lock); return; /* Do not attempt to reinit. It's dead, Jim. */ } continue_reset: /* We don't use netif_running() because it may be true prior to * ndo_open() returning, so we can't assume it means all our open * tasks have finished, since we're not holding the rtnl_lock here. */ running = adapter->state == __IAVF_RUNNING; if (running) { netif_carrier_off(netdev); netif_tx_stop_all_queues(netdev); adapter->link_up = false; iavf_napi_disable_all(adapter); } iavf_irq_disable(adapter); iavf_change_state(adapter, __IAVF_RESETTING); adapter->flags &= ~IAVF_FLAG_RESET_PENDING; /* free the Tx/Rx rings and descriptors, might be better to just * re-use them sometime in the future */ iavf_free_all_rx_resources(adapter); iavf_free_all_tx_resources(adapter); adapter->flags |= IAVF_FLAG_QUEUES_DISABLED; /* kill and reinit the admin queue */ iavf_shutdown_adminq(hw); adapter->current_op = VIRTCHNL_OP_UNKNOWN; status = iavf_init_adminq(hw); if (status) { dev_info(&adapter->pdev->dev, "Failed to init adminq: %d\n", status); goto reset_err; } adapter->aq_required = 0; if ((adapter->flags & IAVF_FLAG_REINIT_MSIX_NEEDED) || (adapter->flags & IAVF_FLAG_REINIT_ITR_NEEDED)) { err = iavf_reinit_interrupt_scheme(adapter, running); if (err) goto reset_err; } if (RSS_AQ(adapter)) { adapter->aq_required |= IAVF_FLAG_AQ_CONFIGURE_RSS; } else { err = iavf_init_rss(adapter); if (err) goto reset_err; } adapter->aq_required |= IAVF_FLAG_AQ_GET_CONFIG; /* always set since VIRTCHNL_OP_GET_VF_RESOURCES has not been * sent/received yet, so VLAN_V2_ALLOWED() cannot is not reliable here, * however the VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS won't be sent until * VIRTCHNL_OP_GET_VF_RESOURCES and VIRTCHNL_VF_OFFLOAD_VLAN_V2 have * been successfully sent and negotiated */ adapter->aq_required |= IAVF_FLAG_AQ_GET_OFFLOAD_VLAN_V2_CAPS; adapter->aq_required |= IAVF_FLAG_AQ_MAP_VECTORS; spin_lock_bh(&adapter->mac_vlan_list_lock); /* Delete filter for the current MAC address, it could have * been changed by the PF via administratively set MAC. * Will be re-added via VIRTCHNL_OP_GET_VF_RESOURCES. */ list_for_each_entry_safe(f, ftmp, &adapter->mac_filter_list, list) { if (ether_addr_equal(f->macaddr, adapter->hw.mac.addr)) { list_del(&f->list); kfree(f); } } /* re-add all MAC filters */ list_for_each_entry(f, &adapter->mac_filter_list, list) { f->add = true; } spin_unlock_bh(&adapter->mac_vlan_list_lock); /* check if TCs are running and re-add all cloud filters */ spin_lock_bh(&adapter->cloud_filter_list_lock); if ((vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ADQ) && adapter->num_tc) { list_for_each_entry(cf, &adapter->cloud_filter_list, list) { cf->add = true; } } spin_unlock_bh(&adapter->cloud_filter_list_lock); adapter->aq_required |= IAVF_FLAG_AQ_ADD_MAC_FILTER; adapter->aq_required |= IAVF_FLAG_AQ_ADD_CLOUD_FILTER; iavf_misc_irq_enable(adapter); mod_delayed_work(adapter->wq, &adapter->watchdog_task, 2); /* We were running when the reset started, so we need to restore some * state here. */ if (running) { /* allocate transmit descriptors */ err = iavf_setup_all_tx_resources(adapter); if (err) goto reset_err; /* allocate receive descriptors */ err = iavf_setup_all_rx_resources(adapter); if (err) goto reset_err; if ((adapter->flags & IAVF_FLAG_REINIT_MSIX_NEEDED) || (adapter->flags & IAVF_FLAG_REINIT_ITR_NEEDED)) { err = iavf_request_traffic_irqs(adapter, netdev->name); if (err) goto reset_err; adapter->flags &= ~IAVF_FLAG_REINIT_MSIX_NEEDED; } iavf_configure(adapter); /* iavf_up_complete() will switch device back * to __IAVF_RUNNING */ iavf_up_complete(adapter); iavf_irq_enable(adapter, true); } else { iavf_change_state(adapter, __IAVF_DOWN); wake_up(&adapter->down_waitqueue); } adapter->flags &= ~IAVF_FLAG_REINIT_ITR_NEEDED; wake_up(&adapter->reset_waitqueue); mutex_unlock(&adapter->client_lock); mutex_unlock(&adapter->crit_lock); return; reset_err: if (running) { set_bit(__IAVF_VSI_DOWN, adapter->vsi.state); iavf_free_traffic_irqs(adapter); } iavf_disable_vf(adapter); mutex_unlock(&adapter->client_lock); mutex_unlock(&adapter->crit_lock); dev_err(&adapter->pdev->dev, "failed to allocate resources during reinit\n"); } /** * iavf_adminq_task - worker thread to clean the admin queue * @work: pointer to work_struct containing our data **/ static void iavf_adminq_task(struct work_struct *work) { struct iavf_adapter *adapter = container_of(work, struct iavf_adapter, adminq_task); struct iavf_hw *hw = &adapter->hw; struct iavf_arq_event_info event; enum virtchnl_ops v_op; enum iavf_status ret, v_ret; u32 val, oldval; u16 pending; if (!mutex_trylock(&adapter->crit_lock)) { if (adapter->state == __IAVF_REMOVE) return; queue_work(adapter->wq, &adapter->adminq_task); goto out; } if (adapter->flags & IAVF_FLAG_PF_COMMS_FAILED) goto unlock; event.buf_len = IAVF_MAX_AQ_BUF_SIZE; event.msg_buf = kzalloc(event.buf_len, GFP_KERNEL); if (!event.msg_buf) goto unlock; do { ret = iavf_clean_arq_element(hw, &event, &pending); v_op = (enum virtchnl_ops)le32_to_cpu(event.desc.cookie_high); v_ret = (enum iavf_status)le32_to_cpu(event.desc.cookie_low); if (ret || !v_op) break; /* No event to process or error cleaning ARQ */ iavf_virtchnl_completion(adapter, v_op, v_ret, event.msg_buf, event.msg_len); if (pending != 0) memset(event.msg_buf, 0, IAVF_MAX_AQ_BUF_SIZE); } while (pending); if (iavf_is_reset_in_progress(adapter)) goto freedom; /* check for error indications */ val = rd32(hw, hw->aq.arq.len); if (val == 0xdeadbeef || val == 0xffffffff) /* device in reset */ goto freedom; oldval = val; if (val & IAVF_VF_ARQLEN1_ARQVFE_MASK) { dev_info(&adapter->pdev->dev, "ARQ VF Error detected\n"); val &= ~IAVF_VF_ARQLEN1_ARQVFE_MASK; } if (val & IAVF_VF_ARQLEN1_ARQOVFL_MASK) { dev_info(&adapter->pdev->dev, "ARQ Overflow Error detected\n"); val &= ~IAVF_VF_ARQLEN1_ARQOVFL_MASK; } if (val & IAVF_VF_ARQLEN1_ARQCRIT_MASK) { dev_info(&adapter->pdev->dev, "ARQ Critical Error detected\n"); val &= ~IAVF_VF_ARQLEN1_ARQCRIT_MASK; } if (oldval != val) wr32(hw, hw->aq.arq.len, val); val = rd32(hw, hw->aq.asq.len); oldval = val; if (val & IAVF_VF_ATQLEN1_ATQVFE_MASK) { dev_info(&adapter->pdev->dev, "ASQ VF Error detected\n"); val &= ~IAVF_VF_ATQLEN1_ATQVFE_MASK; } if (val & IAVF_VF_ATQLEN1_ATQOVFL_MASK) { dev_info(&adapter->pdev->dev, "ASQ Overflow Error detected\n"); val &= ~IAVF_VF_ATQLEN1_ATQOVFL_MASK; } if (val & IAVF_VF_ATQLEN1_ATQCRIT_MASK) { dev_info(&adapter->pdev->dev, "ASQ Critical Error detected\n"); val &= ~IAVF_VF_ATQLEN1_ATQCRIT_MASK; } if (oldval != val) wr32(hw, hw->aq.asq.len, val); freedom: kfree(event.msg_buf); unlock: mutex_unlock(&adapter->crit_lock); out: /* re-enable Admin queue interrupt cause */ iavf_misc_irq_enable(adapter); } /** * iavf_client_task - worker thread to perform client work * @work: pointer to work_struct containing our data * * This task handles client interactions. Because client calls can be * reentrant, we can't handle them in the watchdog. **/ static void iavf_client_task(struct work_struct *work) { struct iavf_adapter *adapter = container_of(work, struct iavf_adapter, client_task.work); /* If we can't get the client bit, just give up. We'll be rescheduled * later. */ if (!mutex_trylock(&adapter->client_lock)) return; if (adapter->flags & IAVF_FLAG_SERVICE_CLIENT_REQUESTED) { iavf_client_subtask(adapter); adapter->flags &= ~IAVF_FLAG_SERVICE_CLIENT_REQUESTED; goto out; } if (adapter->flags & IAVF_FLAG_CLIENT_NEEDS_L2_PARAMS) { iavf_notify_client_l2_params(&adapter->vsi); adapter->flags &= ~IAVF_FLAG_CLIENT_NEEDS_L2_PARAMS; goto out; } if (adapter->flags & IAVF_FLAG_CLIENT_NEEDS_CLOSE) { iavf_notify_client_close(&adapter->vsi, false); adapter->flags &= ~IAVF_FLAG_CLIENT_NEEDS_CLOSE; goto out; } if (adapter->flags & IAVF_FLAG_CLIENT_NEEDS_OPEN) { iavf_notify_client_open(&adapter->vsi); adapter->flags &= ~IAVF_FLAG_CLIENT_NEEDS_OPEN; } out: mutex_unlock(&adapter->client_lock); } /** * iavf_free_all_tx_resources - Free Tx Resources for All Queues * @adapter: board private structure * * Free all transmit software resources **/ void iavf_free_all_tx_resources(struct iavf_adapter *adapter) { int i; if (!adapter->tx_rings) return; for (i = 0; i < adapter->num_active_queues; i++) if (adapter->tx_rings[i].desc) iavf_free_tx_resources(&adapter->tx_rings[i]); } /** * iavf_setup_all_tx_resources - allocate all queues Tx resources * @adapter: board private structure * * If this function returns with an error, then it's possible one or * more of the rings is populated (while the rest are not). It is the * callers duty to clean those orphaned rings. * * Return 0 on success, negative on failure **/ static int iavf_setup_all_tx_resources(struct iavf_adapter *adapter) { int i, err = 0; for (i = 0; i < adapter->num_active_queues; i++) { adapter->tx_rings[i].count = adapter->tx_desc_count; err = iavf_setup_tx_descriptors(&adapter->tx_rings[i]); if (!err) continue; dev_err(&adapter->pdev->dev, "Allocation for Tx Queue %u failed\n", i); break; } return err; } /** * iavf_setup_all_rx_resources - allocate all queues Rx resources * @adapter: board private structure * * If this function returns with an error, then it's possible one or * more of the rings is populated (while the rest are not). It is the * callers duty to clean those orphaned rings. * * Return 0 on success, negative on failure **/ static int iavf_setup_all_rx_resources(struct iavf_adapter *adapter) { int i, err = 0; for (i = 0; i < adapter->num_active_queues; i++) { adapter->rx_rings[i].count = adapter->rx_desc_count; err = iavf_setup_rx_descriptors(&adapter->rx_rings[i]); if (!err) continue; dev_err(&adapter->pdev->dev, "Allocation for Rx Queue %u failed\n", i); break; } return err; } /** * iavf_free_all_rx_resources - Free Rx Resources for All Queues * @adapter: board private structure * * Free all receive software resources **/ void iavf_free_all_rx_resources(struct iavf_adapter *adapter) { int i; if (!adapter->rx_rings) return; for (i = 0; i < adapter->num_active_queues; i++) if (adapter->rx_rings[i].desc) iavf_free_rx_resources(&adapter->rx_rings[i]); } /** * iavf_validate_tx_bandwidth - validate the max Tx bandwidth * @adapter: board private structure * @max_tx_rate: max Tx bw for a tc **/ static int iavf_validate_tx_bandwidth(struct iavf_adapter *adapter, u64 max_tx_rate) { int speed = 0, ret = 0; if (ADV_LINK_SUPPORT(adapter)) { if (adapter->link_speed_mbps < U32_MAX) { speed = adapter->link_speed_mbps; goto validate_bw; } else { dev_err(&adapter->pdev->dev, "Unknown link speed\n"); return -EINVAL; } } switch (adapter->link_speed) { case VIRTCHNL_LINK_SPEED_40GB: speed = SPEED_40000; break; case VIRTCHNL_LINK_SPEED_25GB: speed = SPEED_25000; break; case VIRTCHNL_LINK_SPEED_20GB: speed = SPEED_20000; break; case VIRTCHNL_LINK_SPEED_10GB: speed = SPEED_10000; break; case VIRTCHNL_LINK_SPEED_5GB: speed = SPEED_5000; break; case VIRTCHNL_LINK_SPEED_2_5GB: speed = SPEED_2500; break; case VIRTCHNL_LINK_SPEED_1GB: speed = SPEED_1000; break; case VIRTCHNL_LINK_SPEED_100MB: speed = SPEED_100; break; default: break; } validate_bw: if (max_tx_rate > speed) { dev_err(&adapter->pdev->dev, "Invalid tx rate specified\n"); ret = -EINVAL; } return ret; } /** * iavf_validate_ch_config - validate queue mapping info * @adapter: board private structure * @mqprio_qopt: queue parameters * * This function validates if the config provided by the user to * configure queue channels is valid or not. Returns 0 on a valid * config. **/ static int iavf_validate_ch_config(struct iavf_adapter *adapter, struct tc_mqprio_qopt_offload *mqprio_qopt) { u64 total_max_rate = 0; u32 tx_rate_rem = 0; int i, num_qps = 0; u64 tx_rate = 0; int ret = 0; if (mqprio_qopt->qopt.num_tc > IAVF_MAX_TRAFFIC_CLASS || mqprio_qopt->qopt.num_tc < 1) return -EINVAL; for (i = 0; i <= mqprio_qopt->qopt.num_tc - 1; i++) { if (!mqprio_qopt->qopt.count[i] || mqprio_qopt->qopt.offset[i] != num_qps) return -EINVAL; if (mqprio_qopt->min_rate[i]) { dev_err(&adapter->pdev->dev, "Invalid min tx rate (greater than 0) specified for TC%d\n", i); return -EINVAL; } /* convert to Mbps */ tx_rate = div_u64(mqprio_qopt->max_rate[i], IAVF_MBPS_DIVISOR); if (mqprio_qopt->max_rate[i] && tx_rate < IAVF_MBPS_QUANTA) { dev_err(&adapter->pdev->dev, "Invalid max tx rate for TC%d, minimum %dMbps\n", i, IAVF_MBPS_QUANTA); return -EINVAL; } (void)div_u64_rem(tx_rate, IAVF_MBPS_QUANTA, &tx_rate_rem); if (tx_rate_rem != 0) { dev_err(&adapter->pdev->dev, "Invalid max tx rate for TC%d, not divisible by %d\n", i, IAVF_MBPS_QUANTA); return -EINVAL; } total_max_rate += tx_rate; num_qps += mqprio_qopt->qopt.count[i]; } if (num_qps > adapter->num_active_queues) { dev_err(&adapter->pdev->dev, "Cannot support requested number of queues\n"); return -EINVAL; } ret = iavf_validate_tx_bandwidth(adapter, total_max_rate); return ret; } /** * iavf_del_all_cloud_filters - delete all cloud filters on the traffic classes * @adapter: board private structure **/ static void iavf_del_all_cloud_filters(struct iavf_adapter *adapter) { struct iavf_cloud_filter *cf, *cftmp; spin_lock_bh(&adapter->cloud_filter_list_lock); list_for_each_entry_safe(cf, cftmp, &adapter->cloud_filter_list, list) { list_del(&cf->list); kfree(cf); adapter->num_cloud_filters--; } spin_unlock_bh(&adapter->cloud_filter_list_lock); } /** * __iavf_setup_tc - configure multiple traffic classes * @netdev: network interface device structure * @type_data: tc offload data * * This function processes the config information provided by the * user to configure traffic classes/queue channels and packages the * information to request the PF to setup traffic classes. * * Returns 0 on success. **/ static int __iavf_setup_tc(struct net_device *netdev, void *type_data) { struct tc_mqprio_qopt_offload *mqprio_qopt = type_data; struct iavf_adapter *adapter = netdev_priv(netdev); struct virtchnl_vf_resource *vfres = adapter->vf_res; u8 num_tc = 0, total_qps = 0; int ret = 0, netdev_tc = 0; u64 max_tx_rate; u16 mode; int i; num_tc = mqprio_qopt->qopt.num_tc; mode = mqprio_qopt->mode; /* delete queue_channel */ if (!mqprio_qopt->qopt.hw) { if (adapter->ch_config.state == __IAVF_TC_RUNNING) { /* reset the tc configuration */ netdev_reset_tc(netdev); adapter->num_tc = 0; netif_tx_stop_all_queues(netdev); netif_tx_disable(netdev); iavf_del_all_cloud_filters(adapter); adapter->aq_required = IAVF_FLAG_AQ_DISABLE_CHANNELS; total_qps = adapter->orig_num_active_queues; goto exit; } else { return -EINVAL; } } /* add queue channel */ if (mode == TC_MQPRIO_MODE_CHANNEL) { if (!(vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ADQ)) { dev_err(&adapter->pdev->dev, "ADq not supported\n"); return -EOPNOTSUPP; } if (adapter->ch_config.state != __IAVF_TC_INVALID) { dev_err(&adapter->pdev->dev, "TC configuration already exists\n"); return -EINVAL; } ret = iavf_validate_ch_config(adapter, mqprio_qopt); if (ret) return ret; /* Return if same TC config is requested */ if (adapter->num_tc == num_tc) return 0; adapter->num_tc = num_tc; for (i = 0; i < IAVF_MAX_TRAFFIC_CLASS; i++) { if (i < num_tc) { adapter->ch_config.ch_info[i].count = mqprio_qopt->qopt.count[i]; adapter->ch_config.ch_info[i].offset = mqprio_qopt->qopt.offset[i]; total_qps += mqprio_qopt->qopt.count[i]; max_tx_rate = mqprio_qopt->max_rate[i]; /* convert to Mbps */ max_tx_rate = div_u64(max_tx_rate, IAVF_MBPS_DIVISOR); adapter->ch_config.ch_info[i].max_tx_rate = max_tx_rate; } else { adapter->ch_config.ch_info[i].count = 1; adapter->ch_config.ch_info[i].offset = 0; } } /* Take snapshot of original config such as "num_active_queues" * It is used later when delete ADQ flow is exercised, so that * once delete ADQ flow completes, VF shall go back to its * original queue configuration */ adapter->orig_num_active_queues = adapter->num_active_queues; /* Store queue info based on TC so that VF gets configured * with correct number of queues when VF completes ADQ config * flow */ adapter->ch_config.total_qps = total_qps; netif_tx_stop_all_queues(netdev); netif_tx_disable(netdev); adapter->aq_required |= IAVF_FLAG_AQ_ENABLE_CHANNELS; netdev_reset_tc(netdev); /* Report the tc mapping up the stack */ netdev_set_num_tc(adapter->netdev, num_tc); for (i = 0; i < IAVF_MAX_TRAFFIC_CLASS; i++) { u16 qcount = mqprio_qopt->qopt.count[i]; u16 qoffset = mqprio_qopt->qopt.offset[i]; if (i < num_tc) netdev_set_tc_queue(netdev, netdev_tc++, qcount, qoffset); } } exit: if (test_bit(__IAVF_IN_REMOVE_TASK, &adapter->crit_section)) return 0; netif_set_real_num_rx_queues(netdev, total_qps); netif_set_real_num_tx_queues(netdev, total_qps); return ret; } /** * iavf_parse_cls_flower - Parse tc flower filters provided by kernel * @adapter: board private structure * @f: pointer to struct flow_cls_offload * @filter: pointer to cloud filter structure */ static int iavf_parse_cls_flower(struct iavf_adapter *adapter, struct flow_cls_offload *f, struct iavf_cloud_filter *filter) { struct flow_rule *rule = flow_cls_offload_flow_rule(f); struct flow_dissector *dissector = rule->match.dissector; u16 n_proto_mask = 0; u16 n_proto_key = 0; u8 field_flags = 0; u16 addr_type = 0; u16 n_proto = 0; int i = 0; struct virtchnl_filter *vf = &filter->f; if (dissector->used_keys & ~(BIT_ULL(FLOW_DISSECTOR_KEY_CONTROL) | BIT_ULL(FLOW_DISSECTOR_KEY_BASIC) | BIT_ULL(FLOW_DISSECTOR_KEY_ETH_ADDRS) | BIT_ULL(FLOW_DISSECTOR_KEY_VLAN) | BIT_ULL(FLOW_DISSECTOR_KEY_IPV4_ADDRS) | BIT_ULL(FLOW_DISSECTOR_KEY_IPV6_ADDRS) | BIT_ULL(FLOW_DISSECTOR_KEY_PORTS) | BIT_ULL(FLOW_DISSECTOR_KEY_ENC_KEYID))) { dev_err(&adapter->pdev->dev, "Unsupported key used: 0x%llx\n", dissector->used_keys); return -EOPNOTSUPP; } if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_KEYID)) { struct flow_match_enc_keyid match; flow_rule_match_enc_keyid(rule, &match); if (match.mask->keyid != 0) field_flags |= IAVF_CLOUD_FIELD_TEN_ID; } if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) { struct flow_match_basic match; flow_rule_match_basic(rule, &match); n_proto_key = ntohs(match.key->n_proto); n_proto_mask = ntohs(match.mask->n_proto); if (n_proto_key == ETH_P_ALL) { n_proto_key = 0; n_proto_mask = 0; } n_proto = n_proto_key & n_proto_mask; if (n_proto != ETH_P_IP && n_proto != ETH_P_IPV6) return -EINVAL; if (n_proto == ETH_P_IPV6) { /* specify flow type as TCP IPv6 */ vf->flow_type = VIRTCHNL_TCP_V6_FLOW; } if (match.key->ip_proto != IPPROTO_TCP) { dev_info(&adapter->pdev->dev, "Only TCP transport is supported\n"); return -EINVAL; } } if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) { struct flow_match_eth_addrs match; flow_rule_match_eth_addrs(rule, &match); /* use is_broadcast and is_zero to check for all 0xf or 0 */ if (!is_zero_ether_addr(match.mask->dst)) { if (is_broadcast_ether_addr(match.mask->dst)) { field_flags |= IAVF_CLOUD_FIELD_OMAC; } else { dev_err(&adapter->pdev->dev, "Bad ether dest mask %pM\n", match.mask->dst); return -EINVAL; } } if (!is_zero_ether_addr(match.mask->src)) { if (is_broadcast_ether_addr(match.mask->src)) { field_flags |= IAVF_CLOUD_FIELD_IMAC; } else { dev_err(&adapter->pdev->dev, "Bad ether src mask %pM\n", match.mask->src); return -EINVAL; } } if (!is_zero_ether_addr(match.key->dst)) if (is_valid_ether_addr(match.key->dst) || is_multicast_ether_addr(match.key->dst)) { /* set the mask if a valid dst_mac address */ for (i = 0; i < ETH_ALEN; i++) vf->mask.tcp_spec.dst_mac[i] |= 0xff; ether_addr_copy(vf->data.tcp_spec.dst_mac, match.key->dst); } if (!is_zero_ether_addr(match.key->src)) if (is_valid_ether_addr(match.key->src) || is_multicast_ether_addr(match.key->src)) { /* set the mask if a valid dst_mac address */ for (i = 0; i < ETH_ALEN; i++) vf->mask.tcp_spec.src_mac[i] |= 0xff; ether_addr_copy(vf->data.tcp_spec.src_mac, match.key->src); } } if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) { struct flow_match_vlan match; flow_rule_match_vlan(rule, &match); if (match.mask->vlan_id) { if (match.mask->vlan_id == VLAN_VID_MASK) { field_flags |= IAVF_CLOUD_FIELD_IVLAN; } else { dev_err(&adapter->pdev->dev, "Bad vlan mask %u\n", match.mask->vlan_id); return -EINVAL; } } vf->mask.tcp_spec.vlan_id |= cpu_to_be16(0xffff); vf->data.tcp_spec.vlan_id = cpu_to_be16(match.key->vlan_id); } if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CONTROL)) { struct flow_match_control match; flow_rule_match_control(rule, &match); addr_type = match.key->addr_type; } if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) { struct flow_match_ipv4_addrs match; flow_rule_match_ipv4_addrs(rule, &match); if (match.mask->dst) { if (match.mask->dst == cpu_to_be32(0xffffffff)) { field_flags |= IAVF_CLOUD_FIELD_IIP; } else { dev_err(&adapter->pdev->dev, "Bad ip dst mask 0x%08x\n", be32_to_cpu(match.mask->dst)); return -EINVAL; } } if (match.mask->src) { if (match.mask->src == cpu_to_be32(0xffffffff)) { field_flags |= IAVF_CLOUD_FIELD_IIP; } else { dev_err(&adapter->pdev->dev, "Bad ip src mask 0x%08x\n", be32_to_cpu(match.mask->src)); return -EINVAL; } } if (field_flags & IAVF_CLOUD_FIELD_TEN_ID) { dev_info(&adapter->pdev->dev, "Tenant id not allowed for ip filter\n"); return -EINVAL; } if (match.key->dst) { vf->mask.tcp_spec.dst_ip[0] |= cpu_to_be32(0xffffffff); vf->data.tcp_spec.dst_ip[0] = match.key->dst; } if (match.key->src) { vf->mask.tcp_spec.src_ip[0] |= cpu_to_be32(0xffffffff); vf->data.tcp_spec.src_ip[0] = match.key->src; } } if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) { struct flow_match_ipv6_addrs match; flow_rule_match_ipv6_addrs(rule, &match); /* validate mask, make sure it is not IPV6_ADDR_ANY */ if (ipv6_addr_any(&match.mask->dst)) { dev_err(&adapter->pdev->dev, "Bad ipv6 dst mask 0x%02x\n", IPV6_ADDR_ANY); return -EINVAL; } /* src and dest IPv6 address should not be LOOPBACK * (0:0:0:0:0:0:0:1) which can be represented as ::1 */ if (ipv6_addr_loopback(&match.key->dst) || ipv6_addr_loopback(&match.key->src)) { dev_err(&adapter->pdev->dev, "ipv6 addr should not be loopback\n"); return -EINVAL; } if (!ipv6_addr_any(&match.mask->dst) || !ipv6_addr_any(&match.mask->src)) field_flags |= IAVF_CLOUD_FIELD_IIP; for (i = 0; i < 4; i++) vf->mask.tcp_spec.dst_ip[i] |= cpu_to_be32(0xffffffff); memcpy(&vf->data.tcp_spec.dst_ip, &match.key->dst.s6_addr32, sizeof(vf->data.tcp_spec.dst_ip)); for (i = 0; i < 4; i++) vf->mask.tcp_spec.src_ip[i] |= cpu_to_be32(0xffffffff); memcpy(&vf->data.tcp_spec.src_ip, &match.key->src.s6_addr32, sizeof(vf->data.tcp_spec.src_ip)); } if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS)) { struct flow_match_ports match; flow_rule_match_ports(rule, &match); if (match.mask->src) { if (match.mask->src == cpu_to_be16(0xffff)) { field_flags |= IAVF_CLOUD_FIELD_IIP; } else { dev_err(&adapter->pdev->dev, "Bad src port mask %u\n", be16_to_cpu(match.mask->src)); return -EINVAL; } } if (match.mask->dst) { if (match.mask->dst == cpu_to_be16(0xffff)) { field_flags |= IAVF_CLOUD_FIELD_IIP; } else { dev_err(&adapter->pdev->dev, "Bad dst port mask %u\n", be16_to_cpu(match.mask->dst)); return -EINVAL; } } if (match.key->dst) { vf->mask.tcp_spec.dst_port |= cpu_to_be16(0xffff); vf->data.tcp_spec.dst_port = match.key->dst; } if (match.key->src) { vf->mask.tcp_spec.src_port |= cpu_to_be16(0xffff); vf->data.tcp_spec.src_port = match.key->src; } } vf->field_flags = field_flags; return 0; } /** * iavf_handle_tclass - Forward to a traffic class on the device * @adapter: board private structure * @tc: traffic class index on the device * @filter: pointer to cloud filter structure */ static int iavf_handle_tclass(struct iavf_adapter *adapter, u32 tc, struct iavf_cloud_filter *filter) { if (tc == 0) return 0; if (tc < adapter->num_tc) { if (!filter->f.data.tcp_spec.dst_port) { dev_err(&adapter->pdev->dev, "Specify destination port to redirect to traffic class other than TC0\n"); return -EINVAL; } } /* redirect to a traffic class on the same device */ filter->f.action = VIRTCHNL_ACTION_TC_REDIRECT; filter->f.action_meta = tc; return 0; } /** * iavf_find_cf - Find the cloud filter in the list * @adapter: Board private structure * @cookie: filter specific cookie * * Returns ptr to the filter object or NULL. Must be called while holding the * cloud_filter_list_lock. */ static struct iavf_cloud_filter *iavf_find_cf(struct iavf_adapter *adapter, unsigned long *cookie) { struct iavf_cloud_filter *filter = NULL; if (!cookie) return NULL; list_for_each_entry(filter, &adapter->cloud_filter_list, list) { if (!memcmp(cookie, &filter->cookie, sizeof(filter->cookie))) return filter; } return NULL; } /** * iavf_configure_clsflower - Add tc flower filters * @adapter: board private structure * @cls_flower: Pointer to struct flow_cls_offload */ static int iavf_configure_clsflower(struct iavf_adapter *adapter, struct flow_cls_offload *cls_flower) { int tc = tc_classid_to_hwtc(adapter->netdev, cls_flower->classid); struct iavf_cloud_filter *filter = NULL; int err = -EINVAL, count = 50; if (tc < 0) { dev_err(&adapter->pdev->dev, "Invalid traffic class\n"); return -EINVAL; } filter = kzalloc(sizeof(*filter), GFP_KERNEL); if (!filter) return -ENOMEM; while (!mutex_trylock(&adapter->crit_lock)) { if (--count == 0) { kfree(filter); return err; } udelay(1); } filter->cookie = cls_flower->cookie; /* bail out here if filter already exists */ spin_lock_bh(&adapter->cloud_filter_list_lock); if (iavf_find_cf(adapter, &cls_flower->cookie)) { dev_err(&adapter->pdev->dev, "Failed to add TC Flower filter, it already exists\n"); err = -EEXIST; goto spin_unlock; } spin_unlock_bh(&adapter->cloud_filter_list_lock); /* set the mask to all zeroes to begin with */ memset(&filter->f.mask.tcp_spec, 0, sizeof(struct virtchnl_l4_spec)); /* start out with flow type and eth type IPv4 to begin with */ filter->f.flow_type = VIRTCHNL_TCP_V4_FLOW; err = iavf_parse_cls_flower(adapter, cls_flower, filter); if (err) goto err; err = iavf_handle_tclass(adapter, tc, filter); if (err) goto err; /* add filter to the list */ spin_lock_bh(&adapter->cloud_filter_list_lock); list_add_tail(&filter->list, &adapter->cloud_filter_list); adapter->num_cloud_filters++; filter->add = true; adapter->aq_required |= IAVF_FLAG_AQ_ADD_CLOUD_FILTER; spin_unlock: spin_unlock_bh(&adapter->cloud_filter_list_lock); err: if (err) kfree(filter); mutex_unlock(&adapter->crit_lock); return err; } /** * iavf_delete_clsflower - Remove tc flower filters * @adapter: board private structure * @cls_flower: Pointer to struct flow_cls_offload */ static int iavf_delete_clsflower(struct iavf_adapter *adapter, struct flow_cls_offload *cls_flower) { struct iavf_cloud_filter *filter = NULL; int err = 0; spin_lock_bh(&adapter->cloud_filter_list_lock); filter = iavf_find_cf(adapter, &cls_flower->cookie); if (filter) { filter->del = true; adapter->aq_required |= IAVF_FLAG_AQ_DEL_CLOUD_FILTER; } else { err = -EINVAL; } spin_unlock_bh(&adapter->cloud_filter_list_lock); return err; } /** * iavf_setup_tc_cls_flower - flower classifier offloads * @adapter: board private structure * @cls_flower: pointer to flow_cls_offload struct with flow info */ static int iavf_setup_tc_cls_flower(struct iavf_adapter *adapter, struct flow_cls_offload *cls_flower) { switch (cls_flower->command) { case FLOW_CLS_REPLACE: return iavf_configure_clsflower(adapter, cls_flower); case FLOW_CLS_DESTROY: return iavf_delete_clsflower(adapter, cls_flower); case FLOW_CLS_STATS: return -EOPNOTSUPP; default: return -EOPNOTSUPP; } } /** * iavf_setup_tc_block_cb - block callback for tc * @type: type of offload * @type_data: offload data * @cb_priv: * * This function is the block callback for traffic classes **/ static int iavf_setup_tc_block_cb(enum tc_setup_type type, void *type_data, void *cb_priv) { struct iavf_adapter *adapter = cb_priv; if (!tc_cls_can_offload_and_chain0(adapter->netdev, type_data)) return -EOPNOTSUPP; switch (type) { case TC_SETUP_CLSFLOWER: return iavf_setup_tc_cls_flower(cb_priv, type_data); default: return -EOPNOTSUPP; } } static LIST_HEAD(iavf_block_cb_list); /** * iavf_setup_tc - configure multiple traffic classes * @netdev: network interface device structure * @type: type of offload * @type_data: tc offload data * * This function is the callback to ndo_setup_tc in the * netdev_ops. * * Returns 0 on success **/ static int iavf_setup_tc(struct net_device *netdev, enum tc_setup_type type, void *type_data) { struct iavf_adapter *adapter = netdev_priv(netdev); switch (type) { case TC_SETUP_QDISC_MQPRIO: return __iavf_setup_tc(netdev, type_data); case TC_SETUP_BLOCK: return flow_block_cb_setup_simple(type_data, &iavf_block_cb_list, iavf_setup_tc_block_cb, adapter, adapter, true); default: return -EOPNOTSUPP; } } /** * iavf_open - Called when a network interface is made active * @netdev: network interface device structure * * Returns 0 on success, negative value on failure * * The open entry point is called when a network interface is made * active by the system (IFF_UP). At this point all resources needed * for transmit and receive operations are allocated, the interrupt * handler is registered with the OS, the watchdog is started, * and the stack is notified that the interface is ready. **/ static int iavf_open(struct net_device *netdev) { struct iavf_adapter *adapter = netdev_priv(netdev); int err; if (adapter->flags & IAVF_FLAG_PF_COMMS_FAILED) { dev_err(&adapter->pdev->dev, "Unable to open device due to PF driver failure.\n"); return -EIO; } while (!mutex_trylock(&adapter->crit_lock)) { /* If we are in __IAVF_INIT_CONFIG_ADAPTER state the crit_lock * is already taken and iavf_open is called from an upper * device's notifier reacting on NETDEV_REGISTER event. * We have to leave here to avoid dead lock. */ if (adapter->state == __IAVF_INIT_CONFIG_ADAPTER) return -EBUSY; usleep_range(500, 1000); } if (adapter->state != __IAVF_DOWN) { err = -EBUSY; goto err_unlock; } if (adapter->state == __IAVF_RUNNING && !test_bit(__IAVF_VSI_DOWN, adapter->vsi.state)) { dev_dbg(&adapter->pdev->dev, "VF is already open.\n"); err = 0; goto err_unlock; } /* allocate transmit descriptors */ err = iavf_setup_all_tx_resources(adapter); if (err) goto err_setup_tx; /* allocate receive descriptors */ err = iavf_setup_all_rx_resources(adapter); if (err) goto err_setup_rx; /* clear any pending interrupts, may auto mask */ err = iavf_request_traffic_irqs(adapter, netdev->name); if (err) goto err_req_irq; spin_lock_bh(&adapter->mac_vlan_list_lock); iavf_add_filter(adapter, adapter->hw.mac.addr); spin_unlock_bh(&adapter->mac_vlan_list_lock); /* Restore VLAN filters that were removed with IFF_DOWN */ iavf_restore_filters(adapter); iavf_configure(adapter); iavf_up_complete(adapter); iavf_irq_enable(adapter, true); mutex_unlock(&adapter->crit_lock); return 0; err_req_irq: iavf_down(adapter); iavf_free_traffic_irqs(adapter); err_setup_rx: iavf_free_all_rx_resources(adapter); err_setup_tx: iavf_free_all_tx_resources(adapter); err_unlock: mutex_unlock(&adapter->crit_lock); return err; } /** * iavf_close - Disables a network interface * @netdev: network interface device structure * * Returns 0, this is not allowed to fail * * The close entry point is called when an interface is de-activated * by the OS. The hardware is still under the drivers control, but * needs to be disabled. All IRQs except vector 0 (reserved for admin queue) * are freed, along with all transmit and receive resources. **/ static int iavf_close(struct net_device *netdev) { struct iavf_adapter *adapter = netdev_priv(netdev); u64 aq_to_restore; int status; mutex_lock(&adapter->crit_lock); if (adapter->state <= __IAVF_DOWN_PENDING) { mutex_unlock(&adapter->crit_lock); return 0; } set_bit(__IAVF_VSI_DOWN, adapter->vsi.state); if (CLIENT_ENABLED(adapter)) adapter->flags |= IAVF_FLAG_CLIENT_NEEDS_CLOSE; /* We cannot send IAVF_FLAG_AQ_GET_OFFLOAD_VLAN_V2_CAPS before * IAVF_FLAG_AQ_DISABLE_QUEUES because in such case there is rtnl * deadlock with adminq_task() until iavf_close timeouts. We must send * IAVF_FLAG_AQ_GET_CONFIG before IAVF_FLAG_AQ_DISABLE_QUEUES to make * disable queues possible for vf. Give only necessary flags to * iavf_down and save other to set them right before iavf_close() * returns, when IAVF_FLAG_AQ_DISABLE_QUEUES will be already sent and * iavf will be in DOWN state. */ aq_to_restore = adapter->aq_required; adapter->aq_required &= IAVF_FLAG_AQ_GET_CONFIG; /* Remove flags which we do not want to send after close or we want to * send before disable queues. */ aq_to_restore &= ~(IAVF_FLAG_AQ_GET_CONFIG | IAVF_FLAG_AQ_ENABLE_QUEUES | IAVF_FLAG_AQ_CONFIGURE_QUEUES | IAVF_FLAG_AQ_ADD_VLAN_FILTER | IAVF_FLAG_AQ_ADD_MAC_FILTER | IAVF_FLAG_AQ_ADD_CLOUD_FILTER | IAVF_FLAG_AQ_ADD_FDIR_FILTER | IAVF_FLAG_AQ_ADD_ADV_RSS_CFG); iavf_down(adapter); iavf_change_state(adapter, __IAVF_DOWN_PENDING); iavf_free_traffic_irqs(adapter); mutex_unlock(&adapter->crit_lock); /* We explicitly don't free resources here because the hardware is * still active and can DMA into memory. Resources are cleared in * iavf_virtchnl_completion() after we get confirmation from the PF * driver that the rings have been stopped. * * Also, we wait for state to transition to __IAVF_DOWN before * returning. State change occurs in iavf_virtchnl_completion() after * VF resources are released (which occurs after PF driver processes and * responds to admin queue commands). */ status = wait_event_timeout(adapter->down_waitqueue, adapter->state == __IAVF_DOWN, msecs_to_jiffies(500)); if (!status) netdev_warn(netdev, "Device resources not yet released\n"); mutex_lock(&adapter->crit_lock); adapter->aq_required |= aq_to_restore; mutex_unlock(&adapter->crit_lock); return 0; } /** * iavf_change_mtu - Change the Maximum Transfer Unit * @netdev: network interface device structure * @new_mtu: new value for maximum frame size * * Returns 0 on success, negative on failure **/ static int iavf_change_mtu(struct net_device *netdev, int new_mtu) { struct iavf_adapter *adapter = netdev_priv(netdev); int ret = 0; netdev_dbg(netdev, "changing MTU from %d to %d\n", netdev->mtu, new_mtu); netdev->mtu = new_mtu; if (CLIENT_ENABLED(adapter)) { iavf_notify_client_l2_params(&adapter->vsi); adapter->flags |= IAVF_FLAG_SERVICE_CLIENT_REQUESTED; } if (netif_running(netdev)) { iavf_schedule_reset(adapter, IAVF_FLAG_RESET_NEEDED); ret = iavf_wait_for_reset(adapter); if (ret < 0) netdev_warn(netdev, "MTU change interrupted waiting for reset"); else if (ret) netdev_warn(netdev, "MTU change timed out waiting for reset"); } return ret; } #define NETIF_VLAN_OFFLOAD_FEATURES (NETIF_F_HW_VLAN_CTAG_RX | \ NETIF_F_HW_VLAN_CTAG_TX | \ NETIF_F_HW_VLAN_STAG_RX | \ NETIF_F_HW_VLAN_STAG_TX) /** * iavf_set_features - set the netdev feature flags * @netdev: ptr to the netdev being adjusted * @features: the feature set that the stack is suggesting * Note: expects to be called while under rtnl_lock() **/ static int iavf_set_features(struct net_device *netdev, netdev_features_t features) { struct iavf_adapter *adapter = netdev_priv(netdev); /* trigger update on any VLAN feature change */ if ((netdev->features & NETIF_VLAN_OFFLOAD_FEATURES) ^ (features & NETIF_VLAN_OFFLOAD_FEATURES)) iavf_set_vlan_offload_features(adapter, netdev->features, features); return 0; } /** * iavf_features_check - Validate encapsulated packet conforms to limits * @skb: skb buff * @dev: This physical port's netdev * @features: Offload features that the stack believes apply **/ static netdev_features_t iavf_features_check(struct sk_buff *skb, struct net_device *dev, netdev_features_t features) { size_t len; /* No point in doing any of this if neither checksum nor GSO are * being requested for this frame. We can rule out both by just * checking for CHECKSUM_PARTIAL */ if (skb->ip_summed != CHECKSUM_PARTIAL) return features; /* We cannot support GSO if the MSS is going to be less than * 64 bytes. If it is then we need to drop support for GSO. */ if (skb_is_gso(skb) && (skb_shinfo(skb)->gso_size < 64)) features &= ~NETIF_F_GSO_MASK; /* MACLEN can support at most 63 words */ len = skb_network_header(skb) - skb->data; if (len & ~(63 * 2)) goto out_err; /* IPLEN and EIPLEN can support at most 127 dwords */ len = skb_transport_header(skb) - skb_network_header(skb); if (len & ~(127 * 4)) goto out_err; if (skb->encapsulation) { /* L4TUNLEN can support 127 words */ len = skb_inner_network_header(skb) - skb_transport_header(skb); if (len & ~(127 * 2)) goto out_err; /* IPLEN can support at most 127 dwords */ len = skb_inner_transport_header(skb) - skb_inner_network_header(skb); if (len & ~(127 * 4)) goto out_err; } /* No need to validate L4LEN as TCP is the only protocol with a * flexible value and we support all possible values supported * by TCP, which is at most 15 dwords */ return features; out_err: return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK); } /** * iavf_get_netdev_vlan_hw_features - get NETDEV VLAN features that can toggle on/off * @adapter: board private structure * * Depending on whether VIRTHCNL_VF_OFFLOAD_VLAN or VIRTCHNL_VF_OFFLOAD_VLAN_V2 * were negotiated determine the VLAN features that can be toggled on and off. **/ static netdev_features_t iavf_get_netdev_vlan_hw_features(struct iavf_adapter *adapter) { netdev_features_t hw_features = 0; if (!adapter->vf_res || !adapter->vf_res->vf_cap_flags) return hw_features; /* Enable VLAN features if supported */ if (VLAN_ALLOWED(adapter)) { hw_features |= (NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX); } else if (VLAN_V2_ALLOWED(adapter)) { struct virtchnl_vlan_caps *vlan_v2_caps = &adapter->vlan_v2_caps; struct virtchnl_vlan_supported_caps *stripping_support = &vlan_v2_caps->offloads.stripping_support; struct virtchnl_vlan_supported_caps *insertion_support = &vlan_v2_caps->offloads.insertion_support; if (stripping_support->outer != VIRTCHNL_VLAN_UNSUPPORTED && stripping_support->outer & VIRTCHNL_VLAN_TOGGLE) { if (stripping_support->outer & VIRTCHNL_VLAN_ETHERTYPE_8100) hw_features |= NETIF_F_HW_VLAN_CTAG_RX; if (stripping_support->outer & VIRTCHNL_VLAN_ETHERTYPE_88A8) hw_features |= NETIF_F_HW_VLAN_STAG_RX; } else if (stripping_support->inner != VIRTCHNL_VLAN_UNSUPPORTED && stripping_support->inner & VIRTCHNL_VLAN_TOGGLE) { if (stripping_support->inner & VIRTCHNL_VLAN_ETHERTYPE_8100) hw_features |= NETIF_F_HW_VLAN_CTAG_RX; } if (insertion_support->outer != VIRTCHNL_VLAN_UNSUPPORTED && insertion_support->outer & VIRTCHNL_VLAN_TOGGLE) { if (insertion_support->outer & VIRTCHNL_VLAN_ETHERTYPE_8100) hw_features |= NETIF_F_HW_VLAN_CTAG_TX; if (insertion_support->outer & VIRTCHNL_VLAN_ETHERTYPE_88A8) hw_features |= NETIF_F_HW_VLAN_STAG_TX; } else if (insertion_support->inner && insertion_support->inner & VIRTCHNL_VLAN_TOGGLE) { if (insertion_support->inner & VIRTCHNL_VLAN_ETHERTYPE_8100) hw_features |= NETIF_F_HW_VLAN_CTAG_TX; } } return hw_features; } /** * iavf_get_netdev_vlan_features - get the enabled NETDEV VLAN fetures * @adapter: board private structure * * Depending on whether VIRTHCNL_VF_OFFLOAD_VLAN or VIRTCHNL_VF_OFFLOAD_VLAN_V2 * were negotiated determine the VLAN features that are enabled by default. **/ static netdev_features_t iavf_get_netdev_vlan_features(struct iavf_adapter *adapter) { netdev_features_t features = 0; if (!adapter->vf_res || !adapter->vf_res->vf_cap_flags) return features; if (VLAN_ALLOWED(adapter)) { features |= NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_TX; } else if (VLAN_V2_ALLOWED(adapter)) { struct virtchnl_vlan_caps *vlan_v2_caps = &adapter->vlan_v2_caps; struct virtchnl_vlan_supported_caps *filtering_support = &vlan_v2_caps->filtering.filtering_support; struct virtchnl_vlan_supported_caps *stripping_support = &vlan_v2_caps->offloads.stripping_support; struct virtchnl_vlan_supported_caps *insertion_support = &vlan_v2_caps->offloads.insertion_support; u32 ethertype_init; /* give priority to outer stripping and don't support both outer * and inner stripping */ ethertype_init = vlan_v2_caps->offloads.ethertype_init; if (stripping_support->outer != VIRTCHNL_VLAN_UNSUPPORTED) { if (stripping_support->outer & VIRTCHNL_VLAN_ETHERTYPE_8100 && ethertype_init & VIRTCHNL_VLAN_ETHERTYPE_8100) features |= NETIF_F_HW_VLAN_CTAG_RX; else if (stripping_support->outer & VIRTCHNL_VLAN_ETHERTYPE_88A8 && ethertype_init & VIRTCHNL_VLAN_ETHERTYPE_88A8) features |= NETIF_F_HW_VLAN_STAG_RX; } else if (stripping_support->inner != VIRTCHNL_VLAN_UNSUPPORTED) { if (stripping_support->inner & VIRTCHNL_VLAN_ETHERTYPE_8100 && ethertype_init & VIRTCHNL_VLAN_ETHERTYPE_8100) features |= NETIF_F_HW_VLAN_CTAG_RX; } /* give priority to outer insertion and don't support both outer * and inner insertion */ if (insertion_support->outer != VIRTCHNL_VLAN_UNSUPPORTED) { if (insertion_support->outer & VIRTCHNL_VLAN_ETHERTYPE_8100 && ethertype_init & VIRTCHNL_VLAN_ETHERTYPE_8100) features |= NETIF_F_HW_VLAN_CTAG_TX; else if (insertion_support->outer & VIRTCHNL_VLAN_ETHERTYPE_88A8 && ethertype_init & VIRTCHNL_VLAN_ETHERTYPE_88A8) features |= NETIF_F_HW_VLAN_STAG_TX; } else if (insertion_support->inner != VIRTCHNL_VLAN_UNSUPPORTED) { if (insertion_support->inner & VIRTCHNL_VLAN_ETHERTYPE_8100 && ethertype_init & VIRTCHNL_VLAN_ETHERTYPE_8100) features |= NETIF_F_HW_VLAN_CTAG_TX; } /* give priority to outer filtering and don't bother if both * outer and inner filtering are enabled */ ethertype_init = vlan_v2_caps->filtering.ethertype_init; if (filtering_support->outer != VIRTCHNL_VLAN_UNSUPPORTED) { if (filtering_support->outer & VIRTCHNL_VLAN_ETHERTYPE_8100 && ethertype_init & VIRTCHNL_VLAN_ETHERTYPE_8100) features |= NETIF_F_HW_VLAN_CTAG_FILTER; if (filtering_support->outer & VIRTCHNL_VLAN_ETHERTYPE_88A8 && ethertype_init & VIRTCHNL_VLAN_ETHERTYPE_88A8) features |= NETIF_F_HW_VLAN_STAG_FILTER; } else if (filtering_support->inner != VIRTCHNL_VLAN_UNSUPPORTED) { if (filtering_support->inner & VIRTCHNL_VLAN_ETHERTYPE_8100 && ethertype_init & VIRTCHNL_VLAN_ETHERTYPE_8100) features |= NETIF_F_HW_VLAN_CTAG_FILTER; if (filtering_support->inner & VIRTCHNL_VLAN_ETHERTYPE_88A8 && ethertype_init & VIRTCHNL_VLAN_ETHERTYPE_88A8) features |= NETIF_F_HW_VLAN_STAG_FILTER; } } return features; } #define IAVF_NETDEV_VLAN_FEATURE_ALLOWED(requested, allowed, feature_bit) \ (!(((requested) & (feature_bit)) && \ !((allowed) & (feature_bit)))) /** * iavf_fix_netdev_vlan_features - fix NETDEV VLAN features based on support * @adapter: board private structure * @requested_features: stack requested NETDEV features **/ static netdev_features_t iavf_fix_netdev_vlan_features(struct iavf_adapter *adapter, netdev_features_t requested_features) { netdev_features_t allowed_features; allowed_features = iavf_get_netdev_vlan_hw_features(adapter) | iavf_get_netdev_vlan_features(adapter); if (!IAVF_NETDEV_VLAN_FEATURE_ALLOWED(requested_features, allowed_features, NETIF_F_HW_VLAN_CTAG_TX)) requested_features &= ~NETIF_F_HW_VLAN_CTAG_TX; if (!IAVF_NETDEV_VLAN_FEATURE_ALLOWED(requested_features, allowed_features, NETIF_F_HW_VLAN_CTAG_RX)) requested_features &= ~NETIF_F_HW_VLAN_CTAG_RX; if (!IAVF_NETDEV_VLAN_FEATURE_ALLOWED(requested_features, allowed_features, NETIF_F_HW_VLAN_STAG_TX)) requested_features &= ~NETIF_F_HW_VLAN_STAG_TX; if (!IAVF_NETDEV_VLAN_FEATURE_ALLOWED(requested_features, allowed_features, NETIF_F_HW_VLAN_STAG_RX)) requested_features &= ~NETIF_F_HW_VLAN_STAG_RX; if (!IAVF_NETDEV_VLAN_FEATURE_ALLOWED(requested_features, allowed_features, NETIF_F_HW_VLAN_CTAG_FILTER)) requested_features &= ~NETIF_F_HW_VLAN_CTAG_FILTER; if (!IAVF_NETDEV_VLAN_FEATURE_ALLOWED(requested_features, allowed_features, NETIF_F_HW_VLAN_STAG_FILTER)) requested_features &= ~NETIF_F_HW_VLAN_STAG_FILTER; if ((requested_features & (NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_TX)) && (requested_features & (NETIF_F_HW_VLAN_STAG_RX | NETIF_F_HW_VLAN_STAG_TX)) && adapter->vlan_v2_caps.offloads.ethertype_match == VIRTCHNL_ETHERTYPE_STRIPPING_MATCHES_INSERTION) { netdev_warn(adapter->netdev, "cannot support CTAG and STAG VLAN stripping and/or insertion simultaneously since CTAG and STAG offloads are mutually exclusive, clearing STAG offload settings\n"); requested_features &= ~(NETIF_F_HW_VLAN_STAG_RX | NETIF_F_HW_VLAN_STAG_TX); } return requested_features; } /** * iavf_fix_features - fix up the netdev feature bits * @netdev: our net device * @features: desired feature bits * * Returns fixed-up features bits **/ static netdev_features_t iavf_fix_features(struct net_device *netdev, netdev_features_t features) { struct iavf_adapter *adapter = netdev_priv(netdev); return iavf_fix_netdev_vlan_features(adapter, features); } static const struct net_device_ops iavf_netdev_ops = { .ndo_open = iavf_open, .ndo_stop = iavf_close, .ndo_start_xmit = iavf_xmit_frame, .ndo_set_rx_mode = iavf_set_rx_mode, .ndo_validate_addr = eth_validate_addr, .ndo_set_mac_address = iavf_set_mac, .ndo_change_mtu = iavf_change_mtu, .ndo_tx_timeout = iavf_tx_timeout, .ndo_vlan_rx_add_vid = iavf_vlan_rx_add_vid, .ndo_vlan_rx_kill_vid = iavf_vlan_rx_kill_vid, .ndo_features_check = iavf_features_check, .ndo_fix_features = iavf_fix_features, .ndo_set_features = iavf_set_features, .ndo_setup_tc = iavf_setup_tc, }; /** * iavf_check_reset_complete - check that VF reset is complete * @hw: pointer to hw struct * * Returns 0 if device is ready to use, or -EBUSY if it's in reset. **/ static int iavf_check_reset_complete(struct iavf_hw *hw) { u32 rstat; int i; for (i = 0; i < IAVF_RESET_WAIT_COMPLETE_COUNT; i++) { rstat = rd32(hw, IAVF_VFGEN_RSTAT) & IAVF_VFGEN_RSTAT_VFR_STATE_MASK; if ((rstat == VIRTCHNL_VFR_VFACTIVE) || (rstat == VIRTCHNL_VFR_COMPLETED)) return 0; usleep_range(10, 20); } return -EBUSY; } /** * iavf_process_config - Process the config information we got from the PF * @adapter: board private structure * * Verify that we have a valid config struct, and set up our netdev features * and our VSI struct. **/ int iavf_process_config(struct iavf_adapter *adapter) { struct virtchnl_vf_resource *vfres = adapter->vf_res; netdev_features_t hw_vlan_features, vlan_features; struct net_device *netdev = adapter->netdev; netdev_features_t hw_enc_features; netdev_features_t hw_features; hw_enc_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_HIGHDMA | NETIF_F_SOFT_FEATURES | NETIF_F_TSO | NETIF_F_TSO_ECN | NETIF_F_TSO6 | NETIF_F_SCTP_CRC | NETIF_F_RXHASH | NETIF_F_RXCSUM | 0; /* advertise to stack only if offloads for encapsulated packets is * supported */ if (vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ENCAP) { hw_enc_features |= NETIF_F_GSO_UDP_TUNNEL | NETIF_F_GSO_GRE | NETIF_F_GSO_GRE_CSUM | NETIF_F_GSO_IPXIP4 | NETIF_F_GSO_IPXIP6 | NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_GSO_PARTIAL | 0; if (!(vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM)) netdev->gso_partial_features |= NETIF_F_GSO_UDP_TUNNEL_CSUM; netdev->gso_partial_features |= NETIF_F_GSO_GRE_CSUM; netdev->hw_enc_features |= NETIF_F_TSO_MANGLEID; netdev->hw_enc_features |= hw_enc_features; } /* record features VLANs can make use of */ netdev->vlan_features |= hw_enc_features | NETIF_F_TSO_MANGLEID; /* Write features and hw_features separately to avoid polluting * with, or dropping, features that are set when we registered. */ hw_features = hw_enc_features; /* get HW VLAN features that can be toggled */ hw_vlan_features = iavf_get_netdev_vlan_hw_features(adapter); /* Enable cloud filter if ADQ is supported */ if (vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ADQ) hw_features |= NETIF_F_HW_TC; if (vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_USO) hw_features |= NETIF_F_GSO_UDP_L4; netdev->hw_features |= hw_features | hw_vlan_features; vlan_features = iavf_get_netdev_vlan_features(adapter); netdev->features |= hw_features | vlan_features; if (vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_VLAN) netdev->features |= NETIF_F_HW_VLAN_CTAG_FILTER; netdev->priv_flags |= IFF_UNICAST_FLT; /* Do not turn on offloads when they are requested to be turned off. * TSO needs minimum 576 bytes to work correctly. */ if (netdev->wanted_features) { if (!(netdev->wanted_features & NETIF_F_TSO) || netdev->mtu < 576) netdev->features &= ~NETIF_F_TSO; if (!(netdev->wanted_features & NETIF_F_TSO6) || netdev->mtu < 576) netdev->features &= ~NETIF_F_TSO6; if (!(netdev->wanted_features & NETIF_F_TSO_ECN)) netdev->features &= ~NETIF_F_TSO_ECN; if (!(netdev->wanted_features & NETIF_F_GRO)) netdev->features &= ~NETIF_F_GRO; if (!(netdev->wanted_features & NETIF_F_GSO)) netdev->features &= ~NETIF_F_GSO; } return 0; } /** * iavf_shutdown - Shutdown the device in preparation for a reboot * @pdev: pci device structure **/ static void iavf_shutdown(struct pci_dev *pdev) { struct iavf_adapter *adapter = iavf_pdev_to_adapter(pdev); struct net_device *netdev = adapter->netdev; netif_device_detach(netdev); if (netif_running(netdev)) iavf_close(netdev); if (iavf_lock_timeout(&adapter->crit_lock, 5000)) dev_warn(&adapter->pdev->dev, "%s: failed to acquire crit_lock\n", __func__); /* Prevent the watchdog from running. */ iavf_change_state(adapter, __IAVF_REMOVE); adapter->aq_required = 0; mutex_unlock(&adapter->crit_lock); #ifdef CONFIG_PM pci_save_state(pdev); #endif pci_disable_device(pdev); } /** * iavf_probe - Device Initialization Routine * @pdev: PCI device information struct * @ent: entry in iavf_pci_tbl * * Returns 0 on success, negative on failure * * iavf_probe initializes an adapter identified by a pci_dev structure. * The OS initialization, configuring of the adapter private structure, * and a hardware reset occur. **/ static int iavf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) { struct net_device *netdev; struct iavf_adapter *adapter = NULL; struct iavf_hw *hw = NULL; int err; err = pci_enable_device(pdev); if (err) return err; err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); if (err) { dev_err(&pdev->dev, "DMA configuration failed: 0x%x\n", err); goto err_dma; } err = pci_request_regions(pdev, iavf_driver_name); if (err) { dev_err(&pdev->dev, "pci_request_regions failed 0x%x\n", err); goto err_pci_reg; } pci_set_master(pdev); netdev = alloc_etherdev_mq(sizeof(struct iavf_adapter), IAVF_MAX_REQ_QUEUES); if (!netdev) { err = -ENOMEM; goto err_alloc_etherdev; } SET_NETDEV_DEV(netdev, &pdev->dev); pci_set_drvdata(pdev, netdev); adapter = netdev_priv(netdev); adapter->netdev = netdev; adapter->pdev = pdev; hw = &adapter->hw; hw->back = adapter; adapter->wq = alloc_ordered_workqueue("%s", WQ_MEM_RECLAIM, iavf_driver_name); if (!adapter->wq) { err = -ENOMEM; goto err_alloc_wq; } adapter->msg_enable = BIT(DEFAULT_DEBUG_LEVEL_SHIFT) - 1; iavf_change_state(adapter, __IAVF_STARTUP); /* Call save state here because it relies on the adapter struct. */ pci_save_state(pdev); hw->hw_addr = ioremap(pci_resource_start(pdev, 0), pci_resource_len(pdev, 0)); if (!hw->hw_addr) { err = -EIO; goto err_ioremap; } hw->vendor_id = pdev->vendor; hw->device_id = pdev->device; pci_read_config_byte(pdev, PCI_REVISION_ID, &hw->revision_id); hw->subsystem_vendor_id = pdev->subsystem_vendor; hw->subsystem_device_id = pdev->subsystem_device; hw->bus.device = PCI_SLOT(pdev->devfn); hw->bus.func = PCI_FUNC(pdev->devfn); hw->bus.bus_id = pdev->bus->number; /* set up the locks for the AQ, do this only once in probe * and destroy them only once in remove */ mutex_init(&adapter->crit_lock); mutex_init(&adapter->client_lock); mutex_init(&hw->aq.asq_mutex); mutex_init(&hw->aq.arq_mutex); spin_lock_init(&adapter->mac_vlan_list_lock); spin_lock_init(&adapter->cloud_filter_list_lock); spin_lock_init(&adapter->fdir_fltr_lock); spin_lock_init(&adapter->adv_rss_lock); INIT_LIST_HEAD(&adapter->mac_filter_list); INIT_LIST_HEAD(&adapter->vlan_filter_list); INIT_LIST_HEAD(&adapter->cloud_filter_list); INIT_LIST_HEAD(&adapter->fdir_list_head); INIT_LIST_HEAD(&adapter->adv_rss_list_head); INIT_WORK(&adapter->reset_task, iavf_reset_task); INIT_WORK(&adapter->adminq_task, iavf_adminq_task); INIT_WORK(&adapter->finish_config, iavf_finish_config); INIT_DELAYED_WORK(&adapter->watchdog_task, iavf_watchdog_task); INIT_DELAYED_WORK(&adapter->client_task, iavf_client_task); queue_delayed_work(adapter->wq, &adapter->watchdog_task, msecs_to_jiffies(5 * (pdev->devfn & 0x07))); /* Setup the wait queue for indicating transition to down status */ init_waitqueue_head(&adapter->down_waitqueue); /* Setup the wait queue for indicating transition to running state */ init_waitqueue_head(&adapter->reset_waitqueue); /* Setup the wait queue for indicating virtchannel events */ init_waitqueue_head(&adapter->vc_waitqueue); return 0; err_ioremap: destroy_workqueue(adapter->wq); err_alloc_wq: free_netdev(netdev); err_alloc_etherdev: pci_release_regions(pdev); err_pci_reg: err_dma: pci_disable_device(pdev); return err; } /** * iavf_suspend - Power management suspend routine * @dev_d: device info pointer * * Called when the system (VM) is entering sleep/suspend. **/ static int __maybe_unused iavf_suspend(struct device *dev_d) { struct net_device *netdev = dev_get_drvdata(dev_d); struct iavf_adapter *adapter = netdev_priv(netdev); netif_device_detach(netdev); while (!mutex_trylock(&adapter->crit_lock)) usleep_range(500, 1000); if (netif_running(netdev)) { rtnl_lock(); iavf_down(adapter); rtnl_unlock(); } iavf_free_misc_irq(adapter); iavf_reset_interrupt_capability(adapter); mutex_unlock(&adapter->crit_lock); return 0; } /** * iavf_resume - Power management resume routine * @dev_d: device info pointer * * Called when the system (VM) is resumed from sleep/suspend. **/ static int __maybe_unused iavf_resume(struct device *dev_d) { struct pci_dev *pdev = to_pci_dev(dev_d); struct iavf_adapter *adapter; u32 err; adapter = iavf_pdev_to_adapter(pdev); pci_set_master(pdev); rtnl_lock(); err = iavf_set_interrupt_capability(adapter); if (err) { rtnl_unlock(); dev_err(&pdev->dev, "Cannot enable MSI-X interrupts.\n"); return err; } err = iavf_request_misc_irq(adapter); rtnl_unlock(); if (err) { dev_err(&pdev->dev, "Cannot get interrupt vector.\n"); return err; } queue_work(adapter->wq, &adapter->reset_task); netif_device_attach(adapter->netdev); return err; } /** * iavf_remove - Device Removal Routine * @pdev: PCI device information struct * * iavf_remove is called by the PCI subsystem to alert the driver * that it should release a PCI device. The could be caused by a * Hot-Plug event, or because the driver is going to be removed from * memory. **/ static void iavf_remove(struct pci_dev *pdev) { struct iavf_adapter *adapter = iavf_pdev_to_adapter(pdev); struct iavf_fdir_fltr *fdir, *fdirtmp; struct iavf_vlan_filter *vlf, *vlftmp; struct iavf_cloud_filter *cf, *cftmp; struct iavf_adv_rss *rss, *rsstmp; struct iavf_mac_filter *f, *ftmp; struct net_device *netdev; struct iavf_hw *hw; int err; netdev = adapter->netdev; hw = &adapter->hw; if (test_and_set_bit(__IAVF_IN_REMOVE_TASK, &adapter->crit_section)) return; /* Wait until port initialization is complete. * There are flows where register/unregister netdev may race. */ while (1) { mutex_lock(&adapter->crit_lock); if (adapter->state == __IAVF_RUNNING || adapter->state == __IAVF_DOWN || adapter->state == __IAVF_INIT_FAILED) { mutex_unlock(&adapter->crit_lock); break; } /* Simply return if we already went through iavf_shutdown */ if (adapter->state == __IAVF_REMOVE) { mutex_unlock(&adapter->crit_lock); return; } mutex_unlock(&adapter->crit_lock); usleep_range(500, 1000); } cancel_delayed_work_sync(&adapter->watchdog_task); cancel_work_sync(&adapter->finish_config); rtnl_lock(); if (adapter->netdev_registered) { unregister_netdevice(netdev); adapter->netdev_registered = false; } rtnl_unlock(); if (CLIENT_ALLOWED(adapter)) { err = iavf_lan_del_device(adapter); if (err) dev_warn(&pdev->dev, "Failed to delete client device: %d\n", err); } mutex_lock(&adapter->crit_lock); dev_info(&adapter->pdev->dev, "Removing device\n"); iavf_change_state(adapter, __IAVF_REMOVE); iavf_request_reset(adapter); msleep(50); /* If the FW isn't responding, kick it once, but only once. */ if (!iavf_asq_done(hw)) { iavf_request_reset(adapter); msleep(50); } iavf_misc_irq_disable(adapter); /* Shut down all the garbage mashers on the detention level */ cancel_work_sync(&adapter->reset_task); cancel_delayed_work_sync(&adapter->watchdog_task); cancel_work_sync(&adapter->adminq_task); cancel_delayed_work_sync(&adapter->client_task); adapter->aq_required = 0; adapter->flags &= ~IAVF_FLAG_REINIT_ITR_NEEDED; iavf_free_all_tx_resources(adapter); iavf_free_all_rx_resources(adapter); iavf_free_misc_irq(adapter); iavf_reset_interrupt_capability(adapter); iavf_free_q_vectors(adapter); iavf_free_rss(adapter); if (hw->aq.asq.count) iavf_shutdown_adminq(hw); /* destroy the locks only once, here */ mutex_destroy(&hw->aq.arq_mutex); mutex_destroy(&hw->aq.asq_mutex); mutex_destroy(&adapter->client_lock); mutex_unlock(&adapter->crit_lock); mutex_destroy(&adapter->crit_lock); iounmap(hw->hw_addr); pci_release_regions(pdev); iavf_free_queues(adapter); kfree(adapter->vf_res); spin_lock_bh(&adapter->mac_vlan_list_lock); /* If we got removed before an up/down sequence, we've got a filter * hanging out there that we need to get rid of. */ list_for_each_entry_safe(f, ftmp, &adapter->mac_filter_list, list) { list_del(&f->list); kfree(f); } list_for_each_entry_safe(vlf, vlftmp, &adapter->vlan_filter_list, list) { list_del(&vlf->list); kfree(vlf); } spin_unlock_bh(&adapter->mac_vlan_list_lock); spin_lock_bh(&adapter->cloud_filter_list_lock); list_for_each_entry_safe(cf, cftmp, &adapter->cloud_filter_list, list) { list_del(&cf->list); kfree(cf); } spin_unlock_bh(&adapter->cloud_filter_list_lock); spin_lock_bh(&adapter->fdir_fltr_lock); list_for_each_entry_safe(fdir, fdirtmp, &adapter->fdir_list_head, list) { list_del(&fdir->list); kfree(fdir); } spin_unlock_bh(&adapter->fdir_fltr_lock); spin_lock_bh(&adapter->adv_rss_lock); list_for_each_entry_safe(rss, rsstmp, &adapter->adv_rss_list_head, list) { list_del(&rss->list); kfree(rss); } spin_unlock_bh(&adapter->adv_rss_lock); destroy_workqueue(adapter->wq); free_netdev(netdev); pci_disable_device(pdev); } static SIMPLE_DEV_PM_OPS(iavf_pm_ops, iavf_suspend, iavf_resume); static struct pci_driver iavf_driver = { .name = iavf_driver_name, .id_table = iavf_pci_tbl, .probe = iavf_probe, .remove = iavf_remove, .driver.pm = &iavf_pm_ops, .shutdown = iavf_shutdown, }; /** * iavf_init_module - Driver Registration Routine * * iavf_init_module is the first routine called when the driver is * loaded. All it does is register with the PCI subsystem. **/ static int __init iavf_init_module(void) { pr_info("iavf: %s\n", iavf_driver_string); pr_info("%s\n", iavf_copyright); return pci_register_driver(&iavf_driver); } module_init(iavf_init_module); /** * iavf_exit_module - Driver Exit Cleanup Routine * * iavf_exit_module is called just before the driver is removed * from memory. **/ static void __exit iavf_exit_module(void) { pci_unregister_driver(&iavf_driver); } module_exit(iavf_exit_module); /* iavf_main.c */
linux-master
drivers/net/ethernet/intel/iavf/iavf_main.c
// SPDX-License-Identifier: GPL-2.0 /* Copyright(c) 2013 - 2018 Intel Corporation. */ #include "iavf_type.h" #include "iavf_adminq.h" #include "iavf_prototype.h" #include <linux/avf/virtchnl.h> /** * iavf_set_mac_type - Sets MAC type * @hw: pointer to the HW structure * * This function sets the mac type of the adapter based on the * vendor ID and device ID stored in the hw structure. **/ enum iavf_status iavf_set_mac_type(struct iavf_hw *hw) { enum iavf_status status = 0; if (hw->vendor_id == PCI_VENDOR_ID_INTEL) { switch (hw->device_id) { case IAVF_DEV_ID_X722_VF: hw->mac.type = IAVF_MAC_X722_VF; break; case IAVF_DEV_ID_VF: case IAVF_DEV_ID_VF_HV: case IAVF_DEV_ID_ADAPTIVE_VF: hw->mac.type = IAVF_MAC_VF; break; default: hw->mac.type = IAVF_MAC_GENERIC; break; } } else { status = IAVF_ERR_DEVICE_NOT_SUPPORTED; } return status; } /** * iavf_aq_str - convert AQ err code to a string * @hw: pointer to the HW structure * @aq_err: the AQ error code to convert **/ const char *iavf_aq_str(struct iavf_hw *hw, enum iavf_admin_queue_err aq_err) { switch (aq_err) { case IAVF_AQ_RC_OK: return "OK"; case IAVF_AQ_RC_EPERM: return "IAVF_AQ_RC_EPERM"; case IAVF_AQ_RC_ENOENT: return "IAVF_AQ_RC_ENOENT"; case IAVF_AQ_RC_ESRCH: return "IAVF_AQ_RC_ESRCH"; case IAVF_AQ_RC_EINTR: return "IAVF_AQ_RC_EINTR"; case IAVF_AQ_RC_EIO: return "IAVF_AQ_RC_EIO"; case IAVF_AQ_RC_ENXIO: return "IAVF_AQ_RC_ENXIO"; case IAVF_AQ_RC_E2BIG: return "IAVF_AQ_RC_E2BIG"; case IAVF_AQ_RC_EAGAIN: return "IAVF_AQ_RC_EAGAIN"; case IAVF_AQ_RC_ENOMEM: return "IAVF_AQ_RC_ENOMEM"; case IAVF_AQ_RC_EACCES: return "IAVF_AQ_RC_EACCES"; case IAVF_AQ_RC_EFAULT: return "IAVF_AQ_RC_EFAULT"; case IAVF_AQ_RC_EBUSY: return "IAVF_AQ_RC_EBUSY"; case IAVF_AQ_RC_EEXIST: return "IAVF_AQ_RC_EEXIST"; case IAVF_AQ_RC_EINVAL: return "IAVF_AQ_RC_EINVAL"; case IAVF_AQ_RC_ENOTTY: return "IAVF_AQ_RC_ENOTTY"; case IAVF_AQ_RC_ENOSPC: return "IAVF_AQ_RC_ENOSPC"; case IAVF_AQ_RC_ENOSYS: return "IAVF_AQ_RC_ENOSYS"; case IAVF_AQ_RC_ERANGE: return "IAVF_AQ_RC_ERANGE"; case IAVF_AQ_RC_EFLUSHED: return "IAVF_AQ_RC_EFLUSHED"; case IAVF_AQ_RC_BAD_ADDR: return "IAVF_AQ_RC_BAD_ADDR"; case IAVF_AQ_RC_EMODE: return "IAVF_AQ_RC_EMODE"; case IAVF_AQ_RC_EFBIG: return "IAVF_AQ_RC_EFBIG"; } snprintf(hw->err_str, sizeof(hw->err_str), "%d", aq_err); return hw->err_str; } /** * iavf_stat_str - convert status err code to a string * @hw: pointer to the HW structure * @stat_err: the status error code to convert **/ const char *iavf_stat_str(struct iavf_hw *hw, enum iavf_status stat_err) { switch (stat_err) { case 0: return "OK"; case IAVF_ERR_NVM: return "IAVF_ERR_NVM"; case IAVF_ERR_NVM_CHECKSUM: return "IAVF_ERR_NVM_CHECKSUM"; case IAVF_ERR_PHY: return "IAVF_ERR_PHY"; case IAVF_ERR_CONFIG: return "IAVF_ERR_CONFIG"; case IAVF_ERR_PARAM: return "IAVF_ERR_PARAM"; case IAVF_ERR_MAC_TYPE: return "IAVF_ERR_MAC_TYPE"; case IAVF_ERR_UNKNOWN_PHY: return "IAVF_ERR_UNKNOWN_PHY"; case IAVF_ERR_LINK_SETUP: return "IAVF_ERR_LINK_SETUP"; case IAVF_ERR_ADAPTER_STOPPED: return "IAVF_ERR_ADAPTER_STOPPED"; case IAVF_ERR_INVALID_MAC_ADDR: return "IAVF_ERR_INVALID_MAC_ADDR"; case IAVF_ERR_DEVICE_NOT_SUPPORTED: return "IAVF_ERR_DEVICE_NOT_SUPPORTED"; case IAVF_ERR_PRIMARY_REQUESTS_PENDING: return "IAVF_ERR_PRIMARY_REQUESTS_PENDING"; case IAVF_ERR_INVALID_LINK_SETTINGS: return "IAVF_ERR_INVALID_LINK_SETTINGS"; case IAVF_ERR_AUTONEG_NOT_COMPLETE: return "IAVF_ERR_AUTONEG_NOT_COMPLETE"; case IAVF_ERR_RESET_FAILED: return "IAVF_ERR_RESET_FAILED"; case IAVF_ERR_SWFW_SYNC: return "IAVF_ERR_SWFW_SYNC"; case IAVF_ERR_NO_AVAILABLE_VSI: return "IAVF_ERR_NO_AVAILABLE_VSI"; case IAVF_ERR_NO_MEMORY: return "IAVF_ERR_NO_MEMORY"; case IAVF_ERR_BAD_PTR: return "IAVF_ERR_BAD_PTR"; case IAVF_ERR_RING_FULL: return "IAVF_ERR_RING_FULL"; case IAVF_ERR_INVALID_PD_ID: return "IAVF_ERR_INVALID_PD_ID"; case IAVF_ERR_INVALID_QP_ID: return "IAVF_ERR_INVALID_QP_ID"; case IAVF_ERR_INVALID_CQ_ID: return "IAVF_ERR_INVALID_CQ_ID"; case IAVF_ERR_INVALID_CEQ_ID: return "IAVF_ERR_INVALID_CEQ_ID"; case IAVF_ERR_INVALID_AEQ_ID: return "IAVF_ERR_INVALID_AEQ_ID"; case IAVF_ERR_INVALID_SIZE: return "IAVF_ERR_INVALID_SIZE"; case IAVF_ERR_INVALID_ARP_INDEX: return "IAVF_ERR_INVALID_ARP_INDEX"; case IAVF_ERR_INVALID_FPM_FUNC_ID: return "IAVF_ERR_INVALID_FPM_FUNC_ID"; case IAVF_ERR_QP_INVALID_MSG_SIZE: return "IAVF_ERR_QP_INVALID_MSG_SIZE"; case IAVF_ERR_QP_TOOMANY_WRS_POSTED: return "IAVF_ERR_QP_TOOMANY_WRS_POSTED"; case IAVF_ERR_INVALID_FRAG_COUNT: return "IAVF_ERR_INVALID_FRAG_COUNT"; case IAVF_ERR_QUEUE_EMPTY: return "IAVF_ERR_QUEUE_EMPTY"; case IAVF_ERR_INVALID_ALIGNMENT: return "IAVF_ERR_INVALID_ALIGNMENT"; case IAVF_ERR_FLUSHED_QUEUE: return "IAVF_ERR_FLUSHED_QUEUE"; case IAVF_ERR_INVALID_PUSH_PAGE_INDEX: return "IAVF_ERR_INVALID_PUSH_PAGE_INDEX"; case IAVF_ERR_INVALID_IMM_DATA_SIZE: return "IAVF_ERR_INVALID_IMM_DATA_SIZE"; case IAVF_ERR_TIMEOUT: return "IAVF_ERR_TIMEOUT"; case IAVF_ERR_OPCODE_MISMATCH: return "IAVF_ERR_OPCODE_MISMATCH"; case IAVF_ERR_CQP_COMPL_ERROR: return "IAVF_ERR_CQP_COMPL_ERROR"; case IAVF_ERR_INVALID_VF_ID: return "IAVF_ERR_INVALID_VF_ID"; case IAVF_ERR_INVALID_HMCFN_ID: return "IAVF_ERR_INVALID_HMCFN_ID"; case IAVF_ERR_BACKING_PAGE_ERROR: return "IAVF_ERR_BACKING_PAGE_ERROR"; case IAVF_ERR_NO_PBLCHUNKS_AVAILABLE: return "IAVF_ERR_NO_PBLCHUNKS_AVAILABLE"; case IAVF_ERR_INVALID_PBLE_INDEX: return "IAVF_ERR_INVALID_PBLE_INDEX"; case IAVF_ERR_INVALID_SD_INDEX: return "IAVF_ERR_INVALID_SD_INDEX"; case IAVF_ERR_INVALID_PAGE_DESC_INDEX: return "IAVF_ERR_INVALID_PAGE_DESC_INDEX"; case IAVF_ERR_INVALID_SD_TYPE: return "IAVF_ERR_INVALID_SD_TYPE"; case IAVF_ERR_MEMCPY_FAILED: return "IAVF_ERR_MEMCPY_FAILED"; case IAVF_ERR_INVALID_HMC_OBJ_INDEX: return "IAVF_ERR_INVALID_HMC_OBJ_INDEX"; case IAVF_ERR_INVALID_HMC_OBJ_COUNT: return "IAVF_ERR_INVALID_HMC_OBJ_COUNT"; case IAVF_ERR_INVALID_SRQ_ARM_LIMIT: return "IAVF_ERR_INVALID_SRQ_ARM_LIMIT"; case IAVF_ERR_SRQ_ENABLED: return "IAVF_ERR_SRQ_ENABLED"; case IAVF_ERR_ADMIN_QUEUE_ERROR: return "IAVF_ERR_ADMIN_QUEUE_ERROR"; case IAVF_ERR_ADMIN_QUEUE_TIMEOUT: return "IAVF_ERR_ADMIN_QUEUE_TIMEOUT"; case IAVF_ERR_BUF_TOO_SHORT: return "IAVF_ERR_BUF_TOO_SHORT"; case IAVF_ERR_ADMIN_QUEUE_FULL: return "IAVF_ERR_ADMIN_QUEUE_FULL"; case IAVF_ERR_ADMIN_QUEUE_NO_WORK: return "IAVF_ERR_ADMIN_QUEUE_NO_WORK"; case IAVF_ERR_BAD_RDMA_CQE: return "IAVF_ERR_BAD_RDMA_CQE"; case IAVF_ERR_NVM_BLANK_MODE: return "IAVF_ERR_NVM_BLANK_MODE"; case IAVF_ERR_NOT_IMPLEMENTED: return "IAVF_ERR_NOT_IMPLEMENTED"; case IAVF_ERR_PE_DOORBELL_NOT_ENABLED: return "IAVF_ERR_PE_DOORBELL_NOT_ENABLED"; case IAVF_ERR_DIAG_TEST_FAILED: return "IAVF_ERR_DIAG_TEST_FAILED"; case IAVF_ERR_NOT_READY: return "IAVF_ERR_NOT_READY"; case IAVF_NOT_SUPPORTED: return "IAVF_NOT_SUPPORTED"; case IAVF_ERR_FIRMWARE_API_VERSION: return "IAVF_ERR_FIRMWARE_API_VERSION"; case IAVF_ERR_ADMIN_QUEUE_CRITICAL_ERROR: return "IAVF_ERR_ADMIN_QUEUE_CRITICAL_ERROR"; } snprintf(hw->err_str, sizeof(hw->err_str), "%d", stat_err); return hw->err_str; } /** * iavf_debug_aq * @hw: debug mask related to admin queue * @mask: debug mask * @desc: pointer to admin queue descriptor * @buffer: pointer to command buffer * @buf_len: max length of buffer * * Dumps debug log about adminq command with descriptor contents. **/ void iavf_debug_aq(struct iavf_hw *hw, enum iavf_debug_mask mask, void *desc, void *buffer, u16 buf_len) { struct iavf_aq_desc *aq_desc = (struct iavf_aq_desc *)desc; u8 *buf = (u8 *)buffer; if ((!(mask & hw->debug_mask)) || !desc) return; iavf_debug(hw, mask, "AQ CMD: opcode 0x%04X, flags 0x%04X, datalen 0x%04X, retval 0x%04X\n", le16_to_cpu(aq_desc->opcode), le16_to_cpu(aq_desc->flags), le16_to_cpu(aq_desc->datalen), le16_to_cpu(aq_desc->retval)); iavf_debug(hw, mask, "\tcookie (h,l) 0x%08X 0x%08X\n", le32_to_cpu(aq_desc->cookie_high), le32_to_cpu(aq_desc->cookie_low)); iavf_debug(hw, mask, "\tparam (0,1) 0x%08X 0x%08X\n", le32_to_cpu(aq_desc->params.internal.param0), le32_to_cpu(aq_desc->params.internal.param1)); iavf_debug(hw, mask, "\taddr (h,l) 0x%08X 0x%08X\n", le32_to_cpu(aq_desc->params.external.addr_high), le32_to_cpu(aq_desc->params.external.addr_low)); if (buffer && aq_desc->datalen) { u16 len = le16_to_cpu(aq_desc->datalen); iavf_debug(hw, mask, "AQ CMD Buffer:\n"); if (buf_len < len) len = buf_len; /* write the full 16-byte chunks */ if (hw->debug_mask & mask) { char prefix[27]; snprintf(prefix, sizeof(prefix), "iavf %02x:%02x.%x: \t0x", hw->bus.bus_id, hw->bus.device, hw->bus.func); print_hex_dump(KERN_INFO, prefix, DUMP_PREFIX_OFFSET, 16, 1, buf, len, false); } } } /** * iavf_check_asq_alive * @hw: pointer to the hw struct * * Returns true if Queue is enabled else false. **/ bool iavf_check_asq_alive(struct iavf_hw *hw) { if (hw->aq.asq.len) return !!(rd32(hw, hw->aq.asq.len) & IAVF_VF_ATQLEN1_ATQENABLE_MASK); else return false; } /** * iavf_aq_queue_shutdown * @hw: pointer to the hw struct * @unloading: is the driver unloading itself * * Tell the Firmware that we're shutting down the AdminQ and whether * or not the driver is unloading as well. **/ enum iavf_status iavf_aq_queue_shutdown(struct iavf_hw *hw, bool unloading) { struct iavf_aq_desc desc; struct iavf_aqc_queue_shutdown *cmd = (struct iavf_aqc_queue_shutdown *)&desc.params.raw; enum iavf_status status; iavf_fill_default_direct_cmd_desc(&desc, iavf_aqc_opc_queue_shutdown); if (unloading) cmd->driver_unloading = cpu_to_le32(IAVF_AQ_DRIVER_UNLOADING); status = iavf_asq_send_command(hw, &desc, NULL, 0, NULL); return status; } /** * iavf_aq_get_set_rss_lut * @hw: pointer to the hardware structure * @vsi_id: vsi fw index * @pf_lut: for PF table set true, for VSI table set false * @lut: pointer to the lut buffer provided by the caller * @lut_size: size of the lut buffer * @set: set true to set the table, false to get the table * * Internal function to get or set RSS look up table **/ static enum iavf_status iavf_aq_get_set_rss_lut(struct iavf_hw *hw, u16 vsi_id, bool pf_lut, u8 *lut, u16 lut_size, bool set) { enum iavf_status status; struct iavf_aq_desc desc; struct iavf_aqc_get_set_rss_lut *cmd_resp = (struct iavf_aqc_get_set_rss_lut *)&desc.params.raw; if (set) iavf_fill_default_direct_cmd_desc(&desc, iavf_aqc_opc_set_rss_lut); else iavf_fill_default_direct_cmd_desc(&desc, iavf_aqc_opc_get_rss_lut); /* Indirect command */ desc.flags |= cpu_to_le16((u16)IAVF_AQ_FLAG_BUF); desc.flags |= cpu_to_le16((u16)IAVF_AQ_FLAG_RD); cmd_resp->vsi_id = cpu_to_le16((u16)((vsi_id << IAVF_AQC_SET_RSS_LUT_VSI_ID_SHIFT) & IAVF_AQC_SET_RSS_LUT_VSI_ID_MASK)); cmd_resp->vsi_id |= cpu_to_le16((u16)IAVF_AQC_SET_RSS_LUT_VSI_VALID); if (pf_lut) cmd_resp->flags |= cpu_to_le16((u16) ((IAVF_AQC_SET_RSS_LUT_TABLE_TYPE_PF << IAVF_AQC_SET_RSS_LUT_TABLE_TYPE_SHIFT) & IAVF_AQC_SET_RSS_LUT_TABLE_TYPE_MASK)); else cmd_resp->flags |= cpu_to_le16((u16) ((IAVF_AQC_SET_RSS_LUT_TABLE_TYPE_VSI << IAVF_AQC_SET_RSS_LUT_TABLE_TYPE_SHIFT) & IAVF_AQC_SET_RSS_LUT_TABLE_TYPE_MASK)); status = iavf_asq_send_command(hw, &desc, lut, lut_size, NULL); return status; } /** * iavf_aq_set_rss_lut * @hw: pointer to the hardware structure * @vsi_id: vsi fw index * @pf_lut: for PF table set true, for VSI table set false * @lut: pointer to the lut buffer provided by the caller * @lut_size: size of the lut buffer * * set the RSS lookup table, PF or VSI type **/ enum iavf_status iavf_aq_set_rss_lut(struct iavf_hw *hw, u16 vsi_id, bool pf_lut, u8 *lut, u16 lut_size) { return iavf_aq_get_set_rss_lut(hw, vsi_id, pf_lut, lut, lut_size, true); } /** * iavf_aq_get_set_rss_key * @hw: pointer to the hw struct * @vsi_id: vsi fw index * @key: pointer to key info struct * @set: set true to set the key, false to get the key * * get the RSS key per VSI **/ static enum iavf_status iavf_aq_get_set_rss_key(struct iavf_hw *hw, u16 vsi_id, struct iavf_aqc_get_set_rss_key_data *key, bool set) { enum iavf_status status; struct iavf_aq_desc desc; struct iavf_aqc_get_set_rss_key *cmd_resp = (struct iavf_aqc_get_set_rss_key *)&desc.params.raw; u16 key_size = sizeof(struct iavf_aqc_get_set_rss_key_data); if (set) iavf_fill_default_direct_cmd_desc(&desc, iavf_aqc_opc_set_rss_key); else iavf_fill_default_direct_cmd_desc(&desc, iavf_aqc_opc_get_rss_key); /* Indirect command */ desc.flags |= cpu_to_le16((u16)IAVF_AQ_FLAG_BUF); desc.flags |= cpu_to_le16((u16)IAVF_AQ_FLAG_RD); cmd_resp->vsi_id = cpu_to_le16((u16)((vsi_id << IAVF_AQC_SET_RSS_KEY_VSI_ID_SHIFT) & IAVF_AQC_SET_RSS_KEY_VSI_ID_MASK)); cmd_resp->vsi_id |= cpu_to_le16((u16)IAVF_AQC_SET_RSS_KEY_VSI_VALID); status = iavf_asq_send_command(hw, &desc, key, key_size, NULL); return status; } /** * iavf_aq_set_rss_key * @hw: pointer to the hw struct * @vsi_id: vsi fw index * @key: pointer to key info struct * * set the RSS key per VSI **/ enum iavf_status iavf_aq_set_rss_key(struct iavf_hw *hw, u16 vsi_id, struct iavf_aqc_get_set_rss_key_data *key) { return iavf_aq_get_set_rss_key(hw, vsi_id, key, true); } /* The iavf_ptype_lookup table is used to convert from the 8-bit ptype in the * hardware to a bit-field that can be used by SW to more easily determine the * packet type. * * Macros are used to shorten the table lines and make this table human * readable. * * We store the PTYPE in the top byte of the bit field - this is just so that * we can check that the table doesn't have a row missing, as the index into * the table should be the PTYPE. * * Typical work flow: * * IF NOT iavf_ptype_lookup[ptype].known * THEN * Packet is unknown * ELSE IF iavf_ptype_lookup[ptype].outer_ip == IAVF_RX_PTYPE_OUTER_IP * Use the rest of the fields to look at the tunnels, inner protocols, etc * ELSE * Use the enum iavf_rx_l2_ptype to decode the packet type * ENDIF */ /* macro to make the table lines short, use explicit indexing with [PTYPE] */ #define IAVF_PTT(PTYPE, OUTER_IP, OUTER_IP_VER, OUTER_FRAG, T, TE, TEF, I, PL)\ [PTYPE] = { \ 1, \ IAVF_RX_PTYPE_OUTER_##OUTER_IP, \ IAVF_RX_PTYPE_OUTER_##OUTER_IP_VER, \ IAVF_RX_PTYPE_##OUTER_FRAG, \ IAVF_RX_PTYPE_TUNNEL_##T, \ IAVF_RX_PTYPE_TUNNEL_END_##TE, \ IAVF_RX_PTYPE_##TEF, \ IAVF_RX_PTYPE_INNER_PROT_##I, \ IAVF_RX_PTYPE_PAYLOAD_LAYER_##PL } #define IAVF_PTT_UNUSED_ENTRY(PTYPE) [PTYPE] = { 0, 0, 0, 0, 0, 0, 0, 0, 0 } /* shorter macros makes the table fit but are terse */ #define IAVF_RX_PTYPE_NOF IAVF_RX_PTYPE_NOT_FRAG #define IAVF_RX_PTYPE_FRG IAVF_RX_PTYPE_FRAG #define IAVF_RX_PTYPE_INNER_PROT_TS IAVF_RX_PTYPE_INNER_PROT_TIMESYNC /* Lookup table mapping the 8-bit HW PTYPE to the bit field for decoding */ struct iavf_rx_ptype_decoded iavf_ptype_lookup[BIT(8)] = { /* L2 Packet types */ IAVF_PTT_UNUSED_ENTRY(0), IAVF_PTT(1, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2), IAVF_PTT(2, L2, NONE, NOF, NONE, NONE, NOF, TS, PAY2), IAVF_PTT(3, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2), IAVF_PTT_UNUSED_ENTRY(4), IAVF_PTT_UNUSED_ENTRY(5), IAVF_PTT(6, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2), IAVF_PTT(7, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2), IAVF_PTT_UNUSED_ENTRY(8), IAVF_PTT_UNUSED_ENTRY(9), IAVF_PTT(10, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2), IAVF_PTT(11, L2, NONE, NOF, NONE, NONE, NOF, NONE, NONE), IAVF_PTT(12, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3), IAVF_PTT(13, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3), IAVF_PTT(14, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3), IAVF_PTT(15, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3), IAVF_PTT(16, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3), IAVF_PTT(17, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3), IAVF_PTT(18, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3), IAVF_PTT(19, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3), IAVF_PTT(20, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3), IAVF_PTT(21, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3), /* Non Tunneled IPv4 */ IAVF_PTT(22, IP, IPV4, FRG, NONE, NONE, NOF, NONE, PAY3), IAVF_PTT(23, IP, IPV4, NOF, NONE, NONE, NOF, NONE, PAY3), IAVF_PTT(24, IP, IPV4, NOF, NONE, NONE, NOF, UDP, PAY4), IAVF_PTT_UNUSED_ENTRY(25), IAVF_PTT(26, IP, IPV4, NOF, NONE, NONE, NOF, TCP, PAY4), IAVF_PTT(27, IP, IPV4, NOF, NONE, NONE, NOF, SCTP, PAY4), IAVF_PTT(28, IP, IPV4, NOF, NONE, NONE, NOF, ICMP, PAY4), /* IPv4 --> IPv4 */ IAVF_PTT(29, IP, IPV4, NOF, IP_IP, IPV4, FRG, NONE, PAY3), IAVF_PTT(30, IP, IPV4, NOF, IP_IP, IPV4, NOF, NONE, PAY3), IAVF_PTT(31, IP, IPV4, NOF, IP_IP, IPV4, NOF, UDP, PAY4), IAVF_PTT_UNUSED_ENTRY(32), IAVF_PTT(33, IP, IPV4, NOF, IP_IP, IPV4, NOF, TCP, PAY4), IAVF_PTT(34, IP, IPV4, NOF, IP_IP, IPV4, NOF, SCTP, PAY4), IAVF_PTT(35, IP, IPV4, NOF, IP_IP, IPV4, NOF, ICMP, PAY4), /* IPv4 --> IPv6 */ IAVF_PTT(36, IP, IPV4, NOF, IP_IP, IPV6, FRG, NONE, PAY3), IAVF_PTT(37, IP, IPV4, NOF, IP_IP, IPV6, NOF, NONE, PAY3), IAVF_PTT(38, IP, IPV4, NOF, IP_IP, IPV6, NOF, UDP, PAY4), IAVF_PTT_UNUSED_ENTRY(39), IAVF_PTT(40, IP, IPV4, NOF, IP_IP, IPV6, NOF, TCP, PAY4), IAVF_PTT(41, IP, IPV4, NOF, IP_IP, IPV6, NOF, SCTP, PAY4), IAVF_PTT(42, IP, IPV4, NOF, IP_IP, IPV6, NOF, ICMP, PAY4), /* IPv4 --> GRE/NAT */ IAVF_PTT(43, IP, IPV4, NOF, IP_GRENAT, NONE, NOF, NONE, PAY3), /* IPv4 --> GRE/NAT --> IPv4 */ IAVF_PTT(44, IP, IPV4, NOF, IP_GRENAT, IPV4, FRG, NONE, PAY3), IAVF_PTT(45, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, NONE, PAY3), IAVF_PTT(46, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, UDP, PAY4), IAVF_PTT_UNUSED_ENTRY(47), IAVF_PTT(48, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, TCP, PAY4), IAVF_PTT(49, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, SCTP, PAY4), IAVF_PTT(50, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, ICMP, PAY4), /* IPv4 --> GRE/NAT --> IPv6 */ IAVF_PTT(51, IP, IPV4, NOF, IP_GRENAT, IPV6, FRG, NONE, PAY3), IAVF_PTT(52, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, NONE, PAY3), IAVF_PTT(53, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, UDP, PAY4), IAVF_PTT_UNUSED_ENTRY(54), IAVF_PTT(55, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, TCP, PAY4), IAVF_PTT(56, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, SCTP, PAY4), IAVF_PTT(57, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, ICMP, PAY4), /* IPv4 --> GRE/NAT --> MAC */ IAVF_PTT(58, IP, IPV4, NOF, IP_GRENAT_MAC, NONE, NOF, NONE, PAY3), /* IPv4 --> GRE/NAT --> MAC --> IPv4 */ IAVF_PTT(59, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, FRG, NONE, PAY3), IAVF_PTT(60, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, NONE, PAY3), IAVF_PTT(61, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, UDP, PAY4), IAVF_PTT_UNUSED_ENTRY(62), IAVF_PTT(63, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, TCP, PAY4), IAVF_PTT(64, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, SCTP, PAY4), IAVF_PTT(65, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, ICMP, PAY4), /* IPv4 --> GRE/NAT -> MAC --> IPv6 */ IAVF_PTT(66, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, FRG, NONE, PAY3), IAVF_PTT(67, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, NONE, PAY3), IAVF_PTT(68, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, UDP, PAY4), IAVF_PTT_UNUSED_ENTRY(69), IAVF_PTT(70, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, TCP, PAY4), IAVF_PTT(71, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, SCTP, PAY4), IAVF_PTT(72, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, ICMP, PAY4), /* IPv4 --> GRE/NAT --> MAC/VLAN */ IAVF_PTT(73, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, NONE, NOF, NONE, PAY3), /* IPv4 ---> GRE/NAT -> MAC/VLAN --> IPv4 */ IAVF_PTT(74, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, FRG, NONE, PAY3), IAVF_PTT(75, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, NONE, PAY3), IAVF_PTT(76, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, UDP, PAY4), IAVF_PTT_UNUSED_ENTRY(77), IAVF_PTT(78, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, TCP, PAY4), IAVF_PTT(79, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, SCTP, PAY4), IAVF_PTT(80, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, ICMP, PAY4), /* IPv4 -> GRE/NAT -> MAC/VLAN --> IPv6 */ IAVF_PTT(81, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, FRG, NONE, PAY3), IAVF_PTT(82, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, NONE, PAY3), IAVF_PTT(83, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, UDP, PAY4), IAVF_PTT_UNUSED_ENTRY(84), IAVF_PTT(85, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, TCP, PAY4), IAVF_PTT(86, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, SCTP, PAY4), IAVF_PTT(87, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, ICMP, PAY4), /* Non Tunneled IPv6 */ IAVF_PTT(88, IP, IPV6, FRG, NONE, NONE, NOF, NONE, PAY3), IAVF_PTT(89, IP, IPV6, NOF, NONE, NONE, NOF, NONE, PAY3), IAVF_PTT(90, IP, IPV6, NOF, NONE, NONE, NOF, UDP, PAY4), IAVF_PTT_UNUSED_ENTRY(91), IAVF_PTT(92, IP, IPV6, NOF, NONE, NONE, NOF, TCP, PAY4), IAVF_PTT(93, IP, IPV6, NOF, NONE, NONE, NOF, SCTP, PAY4), IAVF_PTT(94, IP, IPV6, NOF, NONE, NONE, NOF, ICMP, PAY4), /* IPv6 --> IPv4 */ IAVF_PTT(95, IP, IPV6, NOF, IP_IP, IPV4, FRG, NONE, PAY3), IAVF_PTT(96, IP, IPV6, NOF, IP_IP, IPV4, NOF, NONE, PAY3), IAVF_PTT(97, IP, IPV6, NOF, IP_IP, IPV4, NOF, UDP, PAY4), IAVF_PTT_UNUSED_ENTRY(98), IAVF_PTT(99, IP, IPV6, NOF, IP_IP, IPV4, NOF, TCP, PAY4), IAVF_PTT(100, IP, IPV6, NOF, IP_IP, IPV4, NOF, SCTP, PAY4), IAVF_PTT(101, IP, IPV6, NOF, IP_IP, IPV4, NOF, ICMP, PAY4), /* IPv6 --> IPv6 */ IAVF_PTT(102, IP, IPV6, NOF, IP_IP, IPV6, FRG, NONE, PAY3), IAVF_PTT(103, IP, IPV6, NOF, IP_IP, IPV6, NOF, NONE, PAY3), IAVF_PTT(104, IP, IPV6, NOF, IP_IP, IPV6, NOF, UDP, PAY4), IAVF_PTT_UNUSED_ENTRY(105), IAVF_PTT(106, IP, IPV6, NOF, IP_IP, IPV6, NOF, TCP, PAY4), IAVF_PTT(107, IP, IPV6, NOF, IP_IP, IPV6, NOF, SCTP, PAY4), IAVF_PTT(108, IP, IPV6, NOF, IP_IP, IPV6, NOF, ICMP, PAY4), /* IPv6 --> GRE/NAT */ IAVF_PTT(109, IP, IPV6, NOF, IP_GRENAT, NONE, NOF, NONE, PAY3), /* IPv6 --> GRE/NAT -> IPv4 */ IAVF_PTT(110, IP, IPV6, NOF, IP_GRENAT, IPV4, FRG, NONE, PAY3), IAVF_PTT(111, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, NONE, PAY3), IAVF_PTT(112, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, UDP, PAY4), IAVF_PTT_UNUSED_ENTRY(113), IAVF_PTT(114, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, TCP, PAY4), IAVF_PTT(115, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, SCTP, PAY4), IAVF_PTT(116, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, ICMP, PAY4), /* IPv6 --> GRE/NAT -> IPv6 */ IAVF_PTT(117, IP, IPV6, NOF, IP_GRENAT, IPV6, FRG, NONE, PAY3), IAVF_PTT(118, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, NONE, PAY3), IAVF_PTT(119, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, UDP, PAY4), IAVF_PTT_UNUSED_ENTRY(120), IAVF_PTT(121, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, TCP, PAY4), IAVF_PTT(122, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, SCTP, PAY4), IAVF_PTT(123, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, ICMP, PAY4), /* IPv6 --> GRE/NAT -> MAC */ IAVF_PTT(124, IP, IPV6, NOF, IP_GRENAT_MAC, NONE, NOF, NONE, PAY3), /* IPv6 --> GRE/NAT -> MAC -> IPv4 */ IAVF_PTT(125, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, FRG, NONE, PAY3), IAVF_PTT(126, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, NONE, PAY3), IAVF_PTT(127, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, UDP, PAY4), IAVF_PTT_UNUSED_ENTRY(128), IAVF_PTT(129, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, TCP, PAY4), IAVF_PTT(130, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, SCTP, PAY4), IAVF_PTT(131, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, ICMP, PAY4), /* IPv6 --> GRE/NAT -> MAC -> IPv6 */ IAVF_PTT(132, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, FRG, NONE, PAY3), IAVF_PTT(133, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, NONE, PAY3), IAVF_PTT(134, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, UDP, PAY4), IAVF_PTT_UNUSED_ENTRY(135), IAVF_PTT(136, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, TCP, PAY4), IAVF_PTT(137, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, SCTP, PAY4), IAVF_PTT(138, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, ICMP, PAY4), /* IPv6 --> GRE/NAT -> MAC/VLAN */ IAVF_PTT(139, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, NONE, NOF, NONE, PAY3), /* IPv6 --> GRE/NAT -> MAC/VLAN --> IPv4 */ IAVF_PTT(140, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, FRG, NONE, PAY3), IAVF_PTT(141, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, NONE, PAY3), IAVF_PTT(142, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, UDP, PAY4), IAVF_PTT_UNUSED_ENTRY(143), IAVF_PTT(144, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, TCP, PAY4), IAVF_PTT(145, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, SCTP, PAY4), IAVF_PTT(146, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, ICMP, PAY4), /* IPv6 --> GRE/NAT -> MAC/VLAN --> IPv6 */ IAVF_PTT(147, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, FRG, NONE, PAY3), IAVF_PTT(148, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, NONE, PAY3), IAVF_PTT(149, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, UDP, PAY4), IAVF_PTT_UNUSED_ENTRY(150), IAVF_PTT(151, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, TCP, PAY4), IAVF_PTT(152, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, SCTP, PAY4), IAVF_PTT(153, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, ICMP, PAY4), /* unused entries */ [154 ... 255] = { 0, 0, 0, 0, 0, 0, 0, 0, 0 } }; /** * iavf_aq_send_msg_to_pf * @hw: pointer to the hardware structure * @v_opcode: opcodes for VF-PF communication * @v_retval: return error code * @msg: pointer to the msg buffer * @msglen: msg length * @cmd_details: pointer to command details * * Send message to PF driver using admin queue. By default, this message * is sent asynchronously, i.e. iavf_asq_send_command() does not wait for * completion before returning. **/ enum iavf_status iavf_aq_send_msg_to_pf(struct iavf_hw *hw, enum virtchnl_ops v_opcode, enum iavf_status v_retval, u8 *msg, u16 msglen, struct iavf_asq_cmd_details *cmd_details) { struct iavf_asq_cmd_details details; struct iavf_aq_desc desc; enum iavf_status status; iavf_fill_default_direct_cmd_desc(&desc, iavf_aqc_opc_send_msg_to_pf); desc.flags |= cpu_to_le16((u16)IAVF_AQ_FLAG_SI); desc.cookie_high = cpu_to_le32(v_opcode); desc.cookie_low = cpu_to_le32(v_retval); if (msglen) { desc.flags |= cpu_to_le16((u16)(IAVF_AQ_FLAG_BUF | IAVF_AQ_FLAG_RD)); if (msglen > IAVF_AQ_LARGE_BUF) desc.flags |= cpu_to_le16((u16)IAVF_AQ_FLAG_LB); desc.datalen = cpu_to_le16(msglen); } if (!cmd_details) { memset(&details, 0, sizeof(details)); details.async = true; cmd_details = &details; } status = iavf_asq_send_command(hw, &desc, msg, msglen, cmd_details); return status; } /** * iavf_vf_parse_hw_config * @hw: pointer to the hardware structure * @msg: pointer to the virtual channel VF resource structure * * Given a VF resource message from the PF, populate the hw struct * with appropriate information. **/ void iavf_vf_parse_hw_config(struct iavf_hw *hw, struct virtchnl_vf_resource *msg) { struct virtchnl_vsi_resource *vsi_res; int i; vsi_res = &msg->vsi_res[0]; hw->dev_caps.num_vsis = msg->num_vsis; hw->dev_caps.num_rx_qp = msg->num_queue_pairs; hw->dev_caps.num_tx_qp = msg->num_queue_pairs; hw->dev_caps.num_msix_vectors_vf = msg->max_vectors; hw->dev_caps.dcb = msg->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_L2; hw->dev_caps.fcoe = 0; for (i = 0; i < msg->num_vsis; i++) { if (vsi_res->vsi_type == VIRTCHNL_VSI_SRIOV) { ether_addr_copy(hw->mac.perm_addr, vsi_res->default_mac_addr); ether_addr_copy(hw->mac.addr, vsi_res->default_mac_addr); } vsi_res++; } }
linux-master
drivers/net/ethernet/intel/iavf/iavf_common.c
// SPDX-License-Identifier: GPL-2.0 /* Copyright(c) 2013 - 2018 Intel Corporation. */ #include <linux/prefetch.h> #include "iavf.h" #include "iavf_trace.h" #include "iavf_prototype.h" static inline __le64 build_ctob(u32 td_cmd, u32 td_offset, unsigned int size, u32 td_tag) { return cpu_to_le64(IAVF_TX_DESC_DTYPE_DATA | ((u64)td_cmd << IAVF_TXD_QW1_CMD_SHIFT) | ((u64)td_offset << IAVF_TXD_QW1_OFFSET_SHIFT) | ((u64)size << IAVF_TXD_QW1_TX_BUF_SZ_SHIFT) | ((u64)td_tag << IAVF_TXD_QW1_L2TAG1_SHIFT)); } #define IAVF_TXD_CMD (IAVF_TX_DESC_CMD_EOP | IAVF_TX_DESC_CMD_RS) /** * iavf_unmap_and_free_tx_resource - Release a Tx buffer * @ring: the ring that owns the buffer * @tx_buffer: the buffer to free **/ static void iavf_unmap_and_free_tx_resource(struct iavf_ring *ring, struct iavf_tx_buffer *tx_buffer) { if (tx_buffer->skb) { if (tx_buffer->tx_flags & IAVF_TX_FLAGS_FD_SB) kfree(tx_buffer->raw_buf); else dev_kfree_skb_any(tx_buffer->skb); if (dma_unmap_len(tx_buffer, len)) dma_unmap_single(ring->dev, dma_unmap_addr(tx_buffer, dma), dma_unmap_len(tx_buffer, len), DMA_TO_DEVICE); } else if (dma_unmap_len(tx_buffer, len)) { dma_unmap_page(ring->dev, dma_unmap_addr(tx_buffer, dma), dma_unmap_len(tx_buffer, len), DMA_TO_DEVICE); } tx_buffer->next_to_watch = NULL; tx_buffer->skb = NULL; dma_unmap_len_set(tx_buffer, len, 0); /* tx_buffer must be completely set up in the transmit path */ } /** * iavf_clean_tx_ring - Free any empty Tx buffers * @tx_ring: ring to be cleaned **/ static void iavf_clean_tx_ring(struct iavf_ring *tx_ring) { unsigned long bi_size; u16 i; /* ring already cleared, nothing to do */ if (!tx_ring->tx_bi) return; /* Free all the Tx ring sk_buffs */ for (i = 0; i < tx_ring->count; i++) iavf_unmap_and_free_tx_resource(tx_ring, &tx_ring->tx_bi[i]); bi_size = sizeof(struct iavf_tx_buffer) * tx_ring->count; memset(tx_ring->tx_bi, 0, bi_size); /* Zero out the descriptor ring */ memset(tx_ring->desc, 0, tx_ring->size); tx_ring->next_to_use = 0; tx_ring->next_to_clean = 0; if (!tx_ring->netdev) return; /* cleanup Tx queue statistics */ netdev_tx_reset_queue(txring_txq(tx_ring)); } /** * iavf_free_tx_resources - Free Tx resources per queue * @tx_ring: Tx descriptor ring for a specific queue * * Free all transmit software resources **/ void iavf_free_tx_resources(struct iavf_ring *tx_ring) { iavf_clean_tx_ring(tx_ring); kfree(tx_ring->tx_bi); tx_ring->tx_bi = NULL; if (tx_ring->desc) { dma_free_coherent(tx_ring->dev, tx_ring->size, tx_ring->desc, tx_ring->dma); tx_ring->desc = NULL; } } /** * iavf_get_tx_pending - how many Tx descriptors not processed * @ring: the ring of descriptors * @in_sw: is tx_pending being checked in SW or HW * * Since there is no access to the ring head register * in XL710, we need to use our local copies **/ static u32 iavf_get_tx_pending(struct iavf_ring *ring, bool in_sw) { u32 head, tail; /* underlying hardware might not allow access and/or always return * 0 for the head/tail registers so just use the cached values */ head = ring->next_to_clean; tail = ring->next_to_use; if (head != tail) return (head < tail) ? tail - head : (tail + ring->count - head); return 0; } /** * iavf_force_wb - Issue SW Interrupt so HW does a wb * @vsi: the VSI we care about * @q_vector: the vector on which to force writeback **/ static void iavf_force_wb(struct iavf_vsi *vsi, struct iavf_q_vector *q_vector) { u32 val = IAVF_VFINT_DYN_CTLN1_INTENA_MASK | IAVF_VFINT_DYN_CTLN1_ITR_INDX_MASK | /* set noitr */ IAVF_VFINT_DYN_CTLN1_SWINT_TRIG_MASK | IAVF_VFINT_DYN_CTLN1_SW_ITR_INDX_ENA_MASK /* allow 00 to be written to the index */; wr32(&vsi->back->hw, IAVF_VFINT_DYN_CTLN1(q_vector->reg_idx), val); } /** * iavf_detect_recover_hung - Function to detect and recover hung_queues * @vsi: pointer to vsi struct with tx queues * * VSI has netdev and netdev has TX queues. This function is to check each of * those TX queues if they are hung, trigger recovery by issuing SW interrupt. **/ void iavf_detect_recover_hung(struct iavf_vsi *vsi) { struct iavf_ring *tx_ring = NULL; struct net_device *netdev; unsigned int i; int packets; if (!vsi) return; if (test_bit(__IAVF_VSI_DOWN, vsi->state)) return; netdev = vsi->netdev; if (!netdev) return; if (!netif_carrier_ok(netdev)) return; for (i = 0; i < vsi->back->num_active_queues; i++) { tx_ring = &vsi->back->tx_rings[i]; if (tx_ring && tx_ring->desc) { /* If packet counter has not changed the queue is * likely stalled, so force an interrupt for this * queue. * * prev_pkt_ctr would be negative if there was no * pending work. */ packets = tx_ring->stats.packets & INT_MAX; if (tx_ring->tx_stats.prev_pkt_ctr == packets) { iavf_force_wb(vsi, tx_ring->q_vector); continue; } /* Memory barrier between read of packet count and call * to iavf_get_tx_pending() */ smp_rmb(); tx_ring->tx_stats.prev_pkt_ctr = iavf_get_tx_pending(tx_ring, true) ? packets : -1; } } } #define WB_STRIDE 4 /** * iavf_clean_tx_irq - Reclaim resources after transmit completes * @vsi: the VSI we care about * @tx_ring: Tx ring to clean * @napi_budget: Used to determine if we are in netpoll * * Returns true if there's any budget left (e.g. the clean is finished) **/ static bool iavf_clean_tx_irq(struct iavf_vsi *vsi, struct iavf_ring *tx_ring, int napi_budget) { int i = tx_ring->next_to_clean; struct iavf_tx_buffer *tx_buf; struct iavf_tx_desc *tx_desc; unsigned int total_bytes = 0, total_packets = 0; unsigned int budget = IAVF_DEFAULT_IRQ_WORK; tx_buf = &tx_ring->tx_bi[i]; tx_desc = IAVF_TX_DESC(tx_ring, i); i -= tx_ring->count; do { struct iavf_tx_desc *eop_desc = tx_buf->next_to_watch; /* if next_to_watch is not set then there is no work pending */ if (!eop_desc) break; /* prevent any other reads prior to eop_desc */ smp_rmb(); iavf_trace(clean_tx_irq, tx_ring, tx_desc, tx_buf); /* if the descriptor isn't done, no work yet to do */ if (!(eop_desc->cmd_type_offset_bsz & cpu_to_le64(IAVF_TX_DESC_DTYPE_DESC_DONE))) break; /* clear next_to_watch to prevent false hangs */ tx_buf->next_to_watch = NULL; /* update the statistics for this packet */ total_bytes += tx_buf->bytecount; total_packets += tx_buf->gso_segs; /* free the skb */ napi_consume_skb(tx_buf->skb, napi_budget); /* unmap skb header data */ dma_unmap_single(tx_ring->dev, dma_unmap_addr(tx_buf, dma), dma_unmap_len(tx_buf, len), DMA_TO_DEVICE); /* clear tx_buffer data */ tx_buf->skb = NULL; dma_unmap_len_set(tx_buf, len, 0); /* unmap remaining buffers */ while (tx_desc != eop_desc) { iavf_trace(clean_tx_irq_unmap, tx_ring, tx_desc, tx_buf); tx_buf++; tx_desc++; i++; if (unlikely(!i)) { i -= tx_ring->count; tx_buf = tx_ring->tx_bi; tx_desc = IAVF_TX_DESC(tx_ring, 0); } /* unmap any remaining paged data */ if (dma_unmap_len(tx_buf, len)) { dma_unmap_page(tx_ring->dev, dma_unmap_addr(tx_buf, dma), dma_unmap_len(tx_buf, len), DMA_TO_DEVICE); dma_unmap_len_set(tx_buf, len, 0); } } /* move us one more past the eop_desc for start of next pkt */ tx_buf++; tx_desc++; i++; if (unlikely(!i)) { i -= tx_ring->count; tx_buf = tx_ring->tx_bi; tx_desc = IAVF_TX_DESC(tx_ring, 0); } prefetch(tx_desc); /* update budget accounting */ budget--; } while (likely(budget)); i += tx_ring->count; tx_ring->next_to_clean = i; u64_stats_update_begin(&tx_ring->syncp); tx_ring->stats.bytes += total_bytes; tx_ring->stats.packets += total_packets; u64_stats_update_end(&tx_ring->syncp); tx_ring->q_vector->tx.total_bytes += total_bytes; tx_ring->q_vector->tx.total_packets += total_packets; if (tx_ring->flags & IAVF_TXR_FLAGS_WB_ON_ITR) { /* check to see if there are < 4 descriptors * waiting to be written back, then kick the hardware to force * them to be written back in case we stay in NAPI. * In this mode on X722 we do not enable Interrupt. */ unsigned int j = iavf_get_tx_pending(tx_ring, false); if (budget && ((j / WB_STRIDE) == 0) && (j > 0) && !test_bit(__IAVF_VSI_DOWN, vsi->state) && (IAVF_DESC_UNUSED(tx_ring) != tx_ring->count)) tx_ring->arm_wb = true; } /* notify netdev of completed buffers */ netdev_tx_completed_queue(txring_txq(tx_ring), total_packets, total_bytes); #define TX_WAKE_THRESHOLD ((s16)(DESC_NEEDED * 2)) if (unlikely(total_packets && netif_carrier_ok(tx_ring->netdev) && (IAVF_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD))) { /* Make sure that anybody stopping the queue after this * sees the new next_to_clean. */ smp_mb(); if (__netif_subqueue_stopped(tx_ring->netdev, tx_ring->queue_index) && !test_bit(__IAVF_VSI_DOWN, vsi->state)) { netif_wake_subqueue(tx_ring->netdev, tx_ring->queue_index); ++tx_ring->tx_stats.restart_queue; } } return !!budget; } /** * iavf_enable_wb_on_itr - Arm hardware to do a wb, interrupts are not enabled * @vsi: the VSI we care about * @q_vector: the vector on which to enable writeback * **/ static void iavf_enable_wb_on_itr(struct iavf_vsi *vsi, struct iavf_q_vector *q_vector) { u16 flags = q_vector->tx.ring[0].flags; u32 val; if (!(flags & IAVF_TXR_FLAGS_WB_ON_ITR)) return; if (q_vector->arm_wb_state) return; val = IAVF_VFINT_DYN_CTLN1_WB_ON_ITR_MASK | IAVF_VFINT_DYN_CTLN1_ITR_INDX_MASK; /* set noitr */ wr32(&vsi->back->hw, IAVF_VFINT_DYN_CTLN1(q_vector->reg_idx), val); q_vector->arm_wb_state = true; } static inline bool iavf_container_is_rx(struct iavf_q_vector *q_vector, struct iavf_ring_container *rc) { return &q_vector->rx == rc; } #define IAVF_AIM_MULTIPLIER_100G 2560 #define IAVF_AIM_MULTIPLIER_50G 1280 #define IAVF_AIM_MULTIPLIER_40G 1024 #define IAVF_AIM_MULTIPLIER_20G 512 #define IAVF_AIM_MULTIPLIER_10G 256 #define IAVF_AIM_MULTIPLIER_1G 32 static unsigned int iavf_mbps_itr_multiplier(u32 speed_mbps) { switch (speed_mbps) { case SPEED_100000: return IAVF_AIM_MULTIPLIER_100G; case SPEED_50000: return IAVF_AIM_MULTIPLIER_50G; case SPEED_40000: return IAVF_AIM_MULTIPLIER_40G; case SPEED_25000: case SPEED_20000: return IAVF_AIM_MULTIPLIER_20G; case SPEED_10000: default: return IAVF_AIM_MULTIPLIER_10G; case SPEED_1000: case SPEED_100: return IAVF_AIM_MULTIPLIER_1G; } } static unsigned int iavf_virtchnl_itr_multiplier(enum virtchnl_link_speed speed_virtchnl) { switch (speed_virtchnl) { case VIRTCHNL_LINK_SPEED_40GB: return IAVF_AIM_MULTIPLIER_40G; case VIRTCHNL_LINK_SPEED_25GB: case VIRTCHNL_LINK_SPEED_20GB: return IAVF_AIM_MULTIPLIER_20G; case VIRTCHNL_LINK_SPEED_10GB: default: return IAVF_AIM_MULTIPLIER_10G; case VIRTCHNL_LINK_SPEED_1GB: case VIRTCHNL_LINK_SPEED_100MB: return IAVF_AIM_MULTIPLIER_1G; } } static unsigned int iavf_itr_divisor(struct iavf_adapter *adapter) { if (ADV_LINK_SUPPORT(adapter)) return IAVF_ITR_ADAPTIVE_MIN_INC * iavf_mbps_itr_multiplier(adapter->link_speed_mbps); else return IAVF_ITR_ADAPTIVE_MIN_INC * iavf_virtchnl_itr_multiplier(adapter->link_speed); } /** * iavf_update_itr - update the dynamic ITR value based on statistics * @q_vector: structure containing interrupt and ring information * @rc: structure containing ring performance data * * Stores a new ITR value based on packets and byte * counts during the last interrupt. The advantage of per interrupt * computation is faster updates and more accurate ITR for the current * traffic pattern. Constants in this function were computed * based on theoretical maximum wire speed and thresholds were set based * on testing data as well as attempting to minimize response time * while increasing bulk throughput. **/ static void iavf_update_itr(struct iavf_q_vector *q_vector, struct iavf_ring_container *rc) { unsigned int avg_wire_size, packets, bytes, itr; unsigned long next_update = jiffies; /* If we don't have any rings just leave ourselves set for maximum * possible latency so we take ourselves out of the equation. */ if (!rc->ring || !ITR_IS_DYNAMIC(rc->ring->itr_setting)) return; /* For Rx we want to push the delay up and default to low latency. * for Tx we want to pull the delay down and default to high latency. */ itr = iavf_container_is_rx(q_vector, rc) ? IAVF_ITR_ADAPTIVE_MIN_USECS | IAVF_ITR_ADAPTIVE_LATENCY : IAVF_ITR_ADAPTIVE_MAX_USECS | IAVF_ITR_ADAPTIVE_LATENCY; /* If we didn't update within up to 1 - 2 jiffies we can assume * that either packets are coming in so slow there hasn't been * any work, or that there is so much work that NAPI is dealing * with interrupt moderation and we don't need to do anything. */ if (time_after(next_update, rc->next_update)) goto clear_counts; /* If itr_countdown is set it means we programmed an ITR within * the last 4 interrupt cycles. This has a side effect of us * potentially firing an early interrupt. In order to work around * this we need to throw out any data received for a few * interrupts following the update. */ if (q_vector->itr_countdown) { itr = rc->target_itr; goto clear_counts; } packets = rc->total_packets; bytes = rc->total_bytes; if (iavf_container_is_rx(q_vector, rc)) { /* If Rx there are 1 to 4 packets and bytes are less than * 9000 assume insufficient data to use bulk rate limiting * approach unless Tx is already in bulk rate limiting. We * are likely latency driven. */ if (packets && packets < 4 && bytes < 9000 && (q_vector->tx.target_itr & IAVF_ITR_ADAPTIVE_LATENCY)) { itr = IAVF_ITR_ADAPTIVE_LATENCY; goto adjust_by_size; } } else if (packets < 4) { /* If we have Tx and Rx ITR maxed and Tx ITR is running in * bulk mode and we are receiving 4 or fewer packets just * reset the ITR_ADAPTIVE_LATENCY bit for latency mode so * that the Rx can relax. */ if (rc->target_itr == IAVF_ITR_ADAPTIVE_MAX_USECS && (q_vector->rx.target_itr & IAVF_ITR_MASK) == IAVF_ITR_ADAPTIVE_MAX_USECS) goto clear_counts; } else if (packets > 32) { /* If we have processed over 32 packets in a single interrupt * for Tx assume we need to switch over to "bulk" mode. */ rc->target_itr &= ~IAVF_ITR_ADAPTIVE_LATENCY; } /* We have no packets to actually measure against. This means * either one of the other queues on this vector is active or * we are a Tx queue doing TSO with too high of an interrupt rate. * * Between 4 and 56 we can assume that our current interrupt delay * is only slightly too low. As such we should increase it by a small * fixed amount. */ if (packets < 56) { itr = rc->target_itr + IAVF_ITR_ADAPTIVE_MIN_INC; if ((itr & IAVF_ITR_MASK) > IAVF_ITR_ADAPTIVE_MAX_USECS) { itr &= IAVF_ITR_ADAPTIVE_LATENCY; itr += IAVF_ITR_ADAPTIVE_MAX_USECS; } goto clear_counts; } if (packets <= 256) { itr = min(q_vector->tx.current_itr, q_vector->rx.current_itr); itr &= IAVF_ITR_MASK; /* Between 56 and 112 is our "goldilocks" zone where we are * working out "just right". Just report that our current * ITR is good for us. */ if (packets <= 112) goto clear_counts; /* If packet count is 128 or greater we are likely looking * at a slight overrun of the delay we want. Try halving * our delay to see if that will cut the number of packets * in half per interrupt. */ itr /= 2; itr &= IAVF_ITR_MASK; if (itr < IAVF_ITR_ADAPTIVE_MIN_USECS) itr = IAVF_ITR_ADAPTIVE_MIN_USECS; goto clear_counts; } /* The paths below assume we are dealing with a bulk ITR since * number of packets is greater than 256. We are just going to have * to compute a value and try to bring the count under control, * though for smaller packet sizes there isn't much we can do as * NAPI polling will likely be kicking in sooner rather than later. */ itr = IAVF_ITR_ADAPTIVE_BULK; adjust_by_size: /* If packet counts are 256 or greater we can assume we have a gross * overestimation of what the rate should be. Instead of trying to fine * tune it just use the formula below to try and dial in an exact value * give the current packet size of the frame. */ avg_wire_size = bytes / packets; /* The following is a crude approximation of: * wmem_default / (size + overhead) = desired_pkts_per_int * rate / bits_per_byte / (size + ethernet overhead) = pkt_rate * (desired_pkt_rate / pkt_rate) * usecs_per_sec = ITR value * * Assuming wmem_default is 212992 and overhead is 640 bytes per * packet, (256 skb, 64 headroom, 320 shared info), we can reduce the * formula down to * * (170 * (size + 24)) / (size + 640) = ITR * * We first do some math on the packet size and then finally bitshift * by 8 after rounding up. We also have to account for PCIe link speed * difference as ITR scales based on this. */ if (avg_wire_size <= 60) { /* Start at 250k ints/sec */ avg_wire_size = 4096; } else if (avg_wire_size <= 380) { /* 250K ints/sec to 60K ints/sec */ avg_wire_size *= 40; avg_wire_size += 1696; } else if (avg_wire_size <= 1084) { /* 60K ints/sec to 36K ints/sec */ avg_wire_size *= 15; avg_wire_size += 11452; } else if (avg_wire_size <= 1980) { /* 36K ints/sec to 30K ints/sec */ avg_wire_size *= 5; avg_wire_size += 22420; } else { /* plateau at a limit of 30K ints/sec */ avg_wire_size = 32256; } /* If we are in low latency mode halve our delay which doubles the * rate to somewhere between 100K to 16K ints/sec */ if (itr & IAVF_ITR_ADAPTIVE_LATENCY) avg_wire_size /= 2; /* Resultant value is 256 times larger than it needs to be. This * gives us room to adjust the value as needed to either increase * or decrease the value based on link speeds of 10G, 2.5G, 1G, etc. * * Use addition as we have already recorded the new latency flag * for the ITR value. */ itr += DIV_ROUND_UP(avg_wire_size, iavf_itr_divisor(q_vector->adapter)) * IAVF_ITR_ADAPTIVE_MIN_INC; if ((itr & IAVF_ITR_MASK) > IAVF_ITR_ADAPTIVE_MAX_USECS) { itr &= IAVF_ITR_ADAPTIVE_LATENCY; itr += IAVF_ITR_ADAPTIVE_MAX_USECS; } clear_counts: /* write back value */ rc->target_itr = itr; /* next update should occur within next jiffy */ rc->next_update = next_update + 1; rc->total_bytes = 0; rc->total_packets = 0; } /** * iavf_setup_tx_descriptors - Allocate the Tx descriptors * @tx_ring: the tx ring to set up * * Return 0 on success, negative on error **/ int iavf_setup_tx_descriptors(struct iavf_ring *tx_ring) { struct device *dev = tx_ring->dev; int bi_size; if (!dev) return -ENOMEM; /* warn if we are about to overwrite the pointer */ WARN_ON(tx_ring->tx_bi); bi_size = sizeof(struct iavf_tx_buffer) * tx_ring->count; tx_ring->tx_bi = kzalloc(bi_size, GFP_KERNEL); if (!tx_ring->tx_bi) goto err; /* round up to nearest 4K */ tx_ring->size = tx_ring->count * sizeof(struct iavf_tx_desc); tx_ring->size = ALIGN(tx_ring->size, 4096); tx_ring->desc = dma_alloc_coherent(dev, tx_ring->size, &tx_ring->dma, GFP_KERNEL); if (!tx_ring->desc) { dev_info(dev, "Unable to allocate memory for the Tx descriptor ring, size=%d\n", tx_ring->size); goto err; } tx_ring->next_to_use = 0; tx_ring->next_to_clean = 0; tx_ring->tx_stats.prev_pkt_ctr = -1; return 0; err: kfree(tx_ring->tx_bi); tx_ring->tx_bi = NULL; return -ENOMEM; } /** * iavf_clean_rx_ring - Free Rx buffers * @rx_ring: ring to be cleaned **/ static void iavf_clean_rx_ring(struct iavf_ring *rx_ring) { unsigned long bi_size; u16 i; /* ring already cleared, nothing to do */ if (!rx_ring->rx_bi) return; if (rx_ring->skb) { dev_kfree_skb(rx_ring->skb); rx_ring->skb = NULL; } /* Free all the Rx ring sk_buffs */ for (i = 0; i < rx_ring->count; i++) { struct iavf_rx_buffer *rx_bi = &rx_ring->rx_bi[i]; if (!rx_bi->page) continue; /* Invalidate cache lines that may have been written to by * device so that we avoid corrupting memory. */ dma_sync_single_range_for_cpu(rx_ring->dev, rx_bi->dma, rx_bi->page_offset, rx_ring->rx_buf_len, DMA_FROM_DEVICE); /* free resources associated with mapping */ dma_unmap_page_attrs(rx_ring->dev, rx_bi->dma, iavf_rx_pg_size(rx_ring), DMA_FROM_DEVICE, IAVF_RX_DMA_ATTR); __page_frag_cache_drain(rx_bi->page, rx_bi->pagecnt_bias); rx_bi->page = NULL; rx_bi->page_offset = 0; } bi_size = sizeof(struct iavf_rx_buffer) * rx_ring->count; memset(rx_ring->rx_bi, 0, bi_size); /* Zero out the descriptor ring */ memset(rx_ring->desc, 0, rx_ring->size); rx_ring->next_to_alloc = 0; rx_ring->next_to_clean = 0; rx_ring->next_to_use = 0; } /** * iavf_free_rx_resources - Free Rx resources * @rx_ring: ring to clean the resources from * * Free all receive software resources **/ void iavf_free_rx_resources(struct iavf_ring *rx_ring) { iavf_clean_rx_ring(rx_ring); kfree(rx_ring->rx_bi); rx_ring->rx_bi = NULL; if (rx_ring->desc) { dma_free_coherent(rx_ring->dev, rx_ring->size, rx_ring->desc, rx_ring->dma); rx_ring->desc = NULL; } } /** * iavf_setup_rx_descriptors - Allocate Rx descriptors * @rx_ring: Rx descriptor ring (for a specific queue) to setup * * Returns 0 on success, negative on failure **/ int iavf_setup_rx_descriptors(struct iavf_ring *rx_ring) { struct device *dev = rx_ring->dev; int bi_size; /* warn if we are about to overwrite the pointer */ WARN_ON(rx_ring->rx_bi); bi_size = sizeof(struct iavf_rx_buffer) * rx_ring->count; rx_ring->rx_bi = kzalloc(bi_size, GFP_KERNEL); if (!rx_ring->rx_bi) goto err; u64_stats_init(&rx_ring->syncp); /* Round up to nearest 4K */ rx_ring->size = rx_ring->count * sizeof(union iavf_32byte_rx_desc); rx_ring->size = ALIGN(rx_ring->size, 4096); rx_ring->desc = dma_alloc_coherent(dev, rx_ring->size, &rx_ring->dma, GFP_KERNEL); if (!rx_ring->desc) { dev_info(dev, "Unable to allocate memory for the Rx descriptor ring, size=%d\n", rx_ring->size); goto err; } rx_ring->next_to_alloc = 0; rx_ring->next_to_clean = 0; rx_ring->next_to_use = 0; return 0; err: kfree(rx_ring->rx_bi); rx_ring->rx_bi = NULL; return -ENOMEM; } /** * iavf_release_rx_desc - Store the new tail and head values * @rx_ring: ring to bump * @val: new head index **/ static inline void iavf_release_rx_desc(struct iavf_ring *rx_ring, u32 val) { rx_ring->next_to_use = val; /* update next to alloc since we have filled the ring */ rx_ring->next_to_alloc = val; /* Force memory writes to complete before letting h/w * know there are new descriptors to fetch. (Only * applicable for weak-ordered memory model archs, * such as IA-64). */ wmb(); writel(val, rx_ring->tail); } /** * iavf_rx_offset - Return expected offset into page to access data * @rx_ring: Ring we are requesting offset of * * Returns the offset value for ring into the data buffer. */ static inline unsigned int iavf_rx_offset(struct iavf_ring *rx_ring) { return ring_uses_build_skb(rx_ring) ? IAVF_SKB_PAD : 0; } /** * iavf_alloc_mapped_page - recycle or make a new page * @rx_ring: ring to use * @bi: rx_buffer struct to modify * * Returns true if the page was successfully allocated or * reused. **/ static bool iavf_alloc_mapped_page(struct iavf_ring *rx_ring, struct iavf_rx_buffer *bi) { struct page *page = bi->page; dma_addr_t dma; /* since we are recycling buffers we should seldom need to alloc */ if (likely(page)) { rx_ring->rx_stats.page_reuse_count++; return true; } /* alloc new page for storage */ page = dev_alloc_pages(iavf_rx_pg_order(rx_ring)); if (unlikely(!page)) { rx_ring->rx_stats.alloc_page_failed++; return false; } /* map page for use */ dma = dma_map_page_attrs(rx_ring->dev, page, 0, iavf_rx_pg_size(rx_ring), DMA_FROM_DEVICE, IAVF_RX_DMA_ATTR); /* if mapping failed free memory back to system since * there isn't much point in holding memory we can't use */ if (dma_mapping_error(rx_ring->dev, dma)) { __free_pages(page, iavf_rx_pg_order(rx_ring)); rx_ring->rx_stats.alloc_page_failed++; return false; } bi->dma = dma; bi->page = page; bi->page_offset = iavf_rx_offset(rx_ring); /* initialize pagecnt_bias to 1 representing we fully own page */ bi->pagecnt_bias = 1; return true; } /** * iavf_receive_skb - Send a completed packet up the stack * @rx_ring: rx ring in play * @skb: packet to send up * @vlan_tag: vlan tag for packet **/ static void iavf_receive_skb(struct iavf_ring *rx_ring, struct sk_buff *skb, u16 vlan_tag) { struct iavf_q_vector *q_vector = rx_ring->q_vector; if ((rx_ring->netdev->features & NETIF_F_HW_VLAN_CTAG_RX) && (vlan_tag & VLAN_VID_MASK)) __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tag); else if ((rx_ring->netdev->features & NETIF_F_HW_VLAN_STAG_RX) && vlan_tag & VLAN_VID_MASK) __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021AD), vlan_tag); napi_gro_receive(&q_vector->napi, skb); } /** * iavf_alloc_rx_buffers - Replace used receive buffers * @rx_ring: ring to place buffers on * @cleaned_count: number of buffers to replace * * Returns false if all allocations were successful, true if any fail **/ bool iavf_alloc_rx_buffers(struct iavf_ring *rx_ring, u16 cleaned_count) { u16 ntu = rx_ring->next_to_use; union iavf_rx_desc *rx_desc; struct iavf_rx_buffer *bi; /* do nothing if no valid netdev defined */ if (!rx_ring->netdev || !cleaned_count) return false; rx_desc = IAVF_RX_DESC(rx_ring, ntu); bi = &rx_ring->rx_bi[ntu]; do { if (!iavf_alloc_mapped_page(rx_ring, bi)) goto no_buffers; /* sync the buffer for use by the device */ dma_sync_single_range_for_device(rx_ring->dev, bi->dma, bi->page_offset, rx_ring->rx_buf_len, DMA_FROM_DEVICE); /* Refresh the desc even if buffer_addrs didn't change * because each write-back erases this info. */ rx_desc->read.pkt_addr = cpu_to_le64(bi->dma + bi->page_offset); rx_desc++; bi++; ntu++; if (unlikely(ntu == rx_ring->count)) { rx_desc = IAVF_RX_DESC(rx_ring, 0); bi = rx_ring->rx_bi; ntu = 0; } /* clear the status bits for the next_to_use descriptor */ rx_desc->wb.qword1.status_error_len = 0; cleaned_count--; } while (cleaned_count); if (rx_ring->next_to_use != ntu) iavf_release_rx_desc(rx_ring, ntu); return false; no_buffers: if (rx_ring->next_to_use != ntu) iavf_release_rx_desc(rx_ring, ntu); /* make sure to come back via polling to try again after * allocation failure */ return true; } /** * iavf_rx_checksum - Indicate in skb if hw indicated a good cksum * @vsi: the VSI we care about * @skb: skb currently being received and modified * @rx_desc: the receive descriptor **/ static inline void iavf_rx_checksum(struct iavf_vsi *vsi, struct sk_buff *skb, union iavf_rx_desc *rx_desc) { struct iavf_rx_ptype_decoded decoded; u32 rx_error, rx_status; bool ipv4, ipv6; u8 ptype; u64 qword; qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len); ptype = (qword & IAVF_RXD_QW1_PTYPE_MASK) >> IAVF_RXD_QW1_PTYPE_SHIFT; rx_error = (qword & IAVF_RXD_QW1_ERROR_MASK) >> IAVF_RXD_QW1_ERROR_SHIFT; rx_status = (qword & IAVF_RXD_QW1_STATUS_MASK) >> IAVF_RXD_QW1_STATUS_SHIFT; decoded = decode_rx_desc_ptype(ptype); skb->ip_summed = CHECKSUM_NONE; skb_checksum_none_assert(skb); /* Rx csum enabled and ip headers found? */ if (!(vsi->netdev->features & NETIF_F_RXCSUM)) return; /* did the hardware decode the packet and checksum? */ if (!(rx_status & BIT(IAVF_RX_DESC_STATUS_L3L4P_SHIFT))) return; /* both known and outer_ip must be set for the below code to work */ if (!(decoded.known && decoded.outer_ip)) return; ipv4 = (decoded.outer_ip == IAVF_RX_PTYPE_OUTER_IP) && (decoded.outer_ip_ver == IAVF_RX_PTYPE_OUTER_IPV4); ipv6 = (decoded.outer_ip == IAVF_RX_PTYPE_OUTER_IP) && (decoded.outer_ip_ver == IAVF_RX_PTYPE_OUTER_IPV6); if (ipv4 && (rx_error & (BIT(IAVF_RX_DESC_ERROR_IPE_SHIFT) | BIT(IAVF_RX_DESC_ERROR_EIPE_SHIFT)))) goto checksum_fail; /* likely incorrect csum if alternate IP extension headers found */ if (ipv6 && rx_status & BIT(IAVF_RX_DESC_STATUS_IPV6EXADD_SHIFT)) /* don't increment checksum err here, non-fatal err */ return; /* there was some L4 error, count error and punt packet to the stack */ if (rx_error & BIT(IAVF_RX_DESC_ERROR_L4E_SHIFT)) goto checksum_fail; /* handle packets that were not able to be checksummed due * to arrival speed, in this case the stack can compute * the csum. */ if (rx_error & BIT(IAVF_RX_DESC_ERROR_PPRS_SHIFT)) return; /* Only report checksum unnecessary for TCP, UDP, or SCTP */ switch (decoded.inner_prot) { case IAVF_RX_PTYPE_INNER_PROT_TCP: case IAVF_RX_PTYPE_INNER_PROT_UDP: case IAVF_RX_PTYPE_INNER_PROT_SCTP: skb->ip_summed = CHECKSUM_UNNECESSARY; fallthrough; default: break; } return; checksum_fail: vsi->back->hw_csum_rx_error++; } /** * iavf_ptype_to_htype - get a hash type * @ptype: the ptype value from the descriptor * * Returns a hash type to be used by skb_set_hash **/ static inline int iavf_ptype_to_htype(u8 ptype) { struct iavf_rx_ptype_decoded decoded = decode_rx_desc_ptype(ptype); if (!decoded.known) return PKT_HASH_TYPE_NONE; if (decoded.outer_ip == IAVF_RX_PTYPE_OUTER_IP && decoded.payload_layer == IAVF_RX_PTYPE_PAYLOAD_LAYER_PAY4) return PKT_HASH_TYPE_L4; else if (decoded.outer_ip == IAVF_RX_PTYPE_OUTER_IP && decoded.payload_layer == IAVF_RX_PTYPE_PAYLOAD_LAYER_PAY3) return PKT_HASH_TYPE_L3; else return PKT_HASH_TYPE_L2; } /** * iavf_rx_hash - set the hash value in the skb * @ring: descriptor ring * @rx_desc: specific descriptor * @skb: skb currently being received and modified * @rx_ptype: Rx packet type **/ static inline void iavf_rx_hash(struct iavf_ring *ring, union iavf_rx_desc *rx_desc, struct sk_buff *skb, u8 rx_ptype) { u32 hash; const __le64 rss_mask = cpu_to_le64((u64)IAVF_RX_DESC_FLTSTAT_RSS_HASH << IAVF_RX_DESC_STATUS_FLTSTAT_SHIFT); if (!(ring->netdev->features & NETIF_F_RXHASH)) return; if ((rx_desc->wb.qword1.status_error_len & rss_mask) == rss_mask) { hash = le32_to_cpu(rx_desc->wb.qword0.hi_dword.rss); skb_set_hash(skb, hash, iavf_ptype_to_htype(rx_ptype)); } } /** * iavf_process_skb_fields - Populate skb header fields from Rx descriptor * @rx_ring: rx descriptor ring packet is being transacted on * @rx_desc: pointer to the EOP Rx descriptor * @skb: pointer to current skb being populated * @rx_ptype: the packet type decoded by hardware * * This function checks the ring, descriptor, and packet information in * order to populate the hash, checksum, VLAN, protocol, and * other fields within the skb. **/ static inline void iavf_process_skb_fields(struct iavf_ring *rx_ring, union iavf_rx_desc *rx_desc, struct sk_buff *skb, u8 rx_ptype) { iavf_rx_hash(rx_ring, rx_desc, skb, rx_ptype); iavf_rx_checksum(rx_ring->vsi, skb, rx_desc); skb_record_rx_queue(skb, rx_ring->queue_index); /* modifies the skb - consumes the enet header */ skb->protocol = eth_type_trans(skb, rx_ring->netdev); } /** * iavf_cleanup_headers - Correct empty headers * @rx_ring: rx descriptor ring packet is being transacted on * @skb: pointer to current skb being fixed * * Also address the case where we are pulling data in on pages only * and as such no data is present in the skb header. * * In addition if skb is not at least 60 bytes we need to pad it so that * it is large enough to qualify as a valid Ethernet frame. * * Returns true if an error was encountered and skb was freed. **/ static bool iavf_cleanup_headers(struct iavf_ring *rx_ring, struct sk_buff *skb) { /* if eth_skb_pad returns an error the skb was freed */ if (eth_skb_pad(skb)) return true; return false; } /** * iavf_reuse_rx_page - page flip buffer and store it back on the ring * @rx_ring: rx descriptor ring to store buffers on * @old_buff: donor buffer to have page reused * * Synchronizes page for reuse by the adapter **/ static void iavf_reuse_rx_page(struct iavf_ring *rx_ring, struct iavf_rx_buffer *old_buff) { struct iavf_rx_buffer *new_buff; u16 nta = rx_ring->next_to_alloc; new_buff = &rx_ring->rx_bi[nta]; /* update, and store next to alloc */ nta++; rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0; /* transfer page from old buffer to new buffer */ new_buff->dma = old_buff->dma; new_buff->page = old_buff->page; new_buff->page_offset = old_buff->page_offset; new_buff->pagecnt_bias = old_buff->pagecnt_bias; } /** * iavf_can_reuse_rx_page - Determine if this page can be reused by * the adapter for another receive * * @rx_buffer: buffer containing the page * * If page is reusable, rx_buffer->page_offset is adjusted to point to * an unused region in the page. * * For small pages, @truesize will be a constant value, half the size * of the memory at page. We'll attempt to alternate between high and * low halves of the page, with one half ready for use by the hardware * and the other half being consumed by the stack. We use the page * ref count to determine whether the stack has finished consuming the * portion of this page that was passed up with a previous packet. If * the page ref count is >1, we'll assume the "other" half page is * still busy, and this page cannot be reused. * * For larger pages, @truesize will be the actual space used by the * received packet (adjusted upward to an even multiple of the cache * line size). This will advance through the page by the amount * actually consumed by the received packets while there is still * space for a buffer. Each region of larger pages will be used at * most once, after which the page will not be reused. * * In either case, if the page is reusable its refcount is increased. **/ static bool iavf_can_reuse_rx_page(struct iavf_rx_buffer *rx_buffer) { unsigned int pagecnt_bias = rx_buffer->pagecnt_bias; struct page *page = rx_buffer->page; /* Is any reuse possible? */ if (!dev_page_is_reusable(page)) return false; #if (PAGE_SIZE < 8192) /* if we are only owner of page we can reuse it */ if (unlikely((page_count(page) - pagecnt_bias) > 1)) return false; #else #define IAVF_LAST_OFFSET \ (SKB_WITH_OVERHEAD(PAGE_SIZE) - IAVF_RXBUFFER_2048) if (rx_buffer->page_offset > IAVF_LAST_OFFSET) return false; #endif /* If we have drained the page fragment pool we need to update * the pagecnt_bias and page count so that we fully restock the * number of references the driver holds. */ if (unlikely(!pagecnt_bias)) { page_ref_add(page, USHRT_MAX); rx_buffer->pagecnt_bias = USHRT_MAX; } return true; } /** * iavf_add_rx_frag - Add contents of Rx buffer to sk_buff * @rx_ring: rx descriptor ring to transact packets on * @rx_buffer: buffer containing page to add * @skb: sk_buff to place the data into * @size: packet length from rx_desc * * This function will add the data contained in rx_buffer->page to the skb. * It will just attach the page as a frag to the skb. * * The function will then update the page offset. **/ static void iavf_add_rx_frag(struct iavf_ring *rx_ring, struct iavf_rx_buffer *rx_buffer, struct sk_buff *skb, unsigned int size) { #if (PAGE_SIZE < 8192) unsigned int truesize = iavf_rx_pg_size(rx_ring) / 2; #else unsigned int truesize = SKB_DATA_ALIGN(size + iavf_rx_offset(rx_ring)); #endif if (!size) return; skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_buffer->page, rx_buffer->page_offset, size, truesize); /* page is being used so we must update the page offset */ #if (PAGE_SIZE < 8192) rx_buffer->page_offset ^= truesize; #else rx_buffer->page_offset += truesize; #endif } /** * iavf_get_rx_buffer - Fetch Rx buffer and synchronize data for use * @rx_ring: rx descriptor ring to transact packets on * @size: size of buffer to add to skb * * This function will pull an Rx buffer from the ring and synchronize it * for use by the CPU. */ static struct iavf_rx_buffer *iavf_get_rx_buffer(struct iavf_ring *rx_ring, const unsigned int size) { struct iavf_rx_buffer *rx_buffer; rx_buffer = &rx_ring->rx_bi[rx_ring->next_to_clean]; prefetchw(rx_buffer->page); if (!size) return rx_buffer; /* we are reusing so sync this buffer for CPU use */ dma_sync_single_range_for_cpu(rx_ring->dev, rx_buffer->dma, rx_buffer->page_offset, size, DMA_FROM_DEVICE); /* We have pulled a buffer for use, so decrement pagecnt_bias */ rx_buffer->pagecnt_bias--; return rx_buffer; } /** * iavf_construct_skb - Allocate skb and populate it * @rx_ring: rx descriptor ring to transact packets on * @rx_buffer: rx buffer to pull data from * @size: size of buffer to add to skb * * This function allocates an skb. It then populates it with the page * data from the current receive descriptor, taking care to set up the * skb correctly. */ static struct sk_buff *iavf_construct_skb(struct iavf_ring *rx_ring, struct iavf_rx_buffer *rx_buffer, unsigned int size) { void *va; #if (PAGE_SIZE < 8192) unsigned int truesize = iavf_rx_pg_size(rx_ring) / 2; #else unsigned int truesize = SKB_DATA_ALIGN(size); #endif unsigned int headlen; struct sk_buff *skb; if (!rx_buffer) return NULL; /* prefetch first cache line of first page */ va = page_address(rx_buffer->page) + rx_buffer->page_offset; net_prefetch(va); /* allocate a skb to store the frags */ skb = __napi_alloc_skb(&rx_ring->q_vector->napi, IAVF_RX_HDR_SIZE, GFP_ATOMIC | __GFP_NOWARN); if (unlikely(!skb)) return NULL; /* Determine available headroom for copy */ headlen = size; if (headlen > IAVF_RX_HDR_SIZE) headlen = eth_get_headlen(skb->dev, va, IAVF_RX_HDR_SIZE); /* align pull length to size of long to optimize memcpy performance */ memcpy(__skb_put(skb, headlen), va, ALIGN(headlen, sizeof(long))); /* update all of the pointers */ size -= headlen; if (size) { skb_add_rx_frag(skb, 0, rx_buffer->page, rx_buffer->page_offset + headlen, size, truesize); /* buffer is used by skb, update page_offset */ #if (PAGE_SIZE < 8192) rx_buffer->page_offset ^= truesize; #else rx_buffer->page_offset += truesize; #endif } else { /* buffer is unused, reset bias back to rx_buffer */ rx_buffer->pagecnt_bias++; } return skb; } /** * iavf_build_skb - Build skb around an existing buffer * @rx_ring: Rx descriptor ring to transact packets on * @rx_buffer: Rx buffer to pull data from * @size: size of buffer to add to skb * * This function builds an skb around an existing Rx buffer, taking care * to set up the skb correctly and avoid any memcpy overhead. */ static struct sk_buff *iavf_build_skb(struct iavf_ring *rx_ring, struct iavf_rx_buffer *rx_buffer, unsigned int size) { void *va; #if (PAGE_SIZE < 8192) unsigned int truesize = iavf_rx_pg_size(rx_ring) / 2; #else unsigned int truesize = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) + SKB_DATA_ALIGN(IAVF_SKB_PAD + size); #endif struct sk_buff *skb; if (!rx_buffer || !size) return NULL; /* prefetch first cache line of first page */ va = page_address(rx_buffer->page) + rx_buffer->page_offset; net_prefetch(va); /* build an skb around the page buffer */ skb = napi_build_skb(va - IAVF_SKB_PAD, truesize); if (unlikely(!skb)) return NULL; /* update pointers within the skb to store the data */ skb_reserve(skb, IAVF_SKB_PAD); __skb_put(skb, size); /* buffer is used by skb, update page_offset */ #if (PAGE_SIZE < 8192) rx_buffer->page_offset ^= truesize; #else rx_buffer->page_offset += truesize; #endif return skb; } /** * iavf_put_rx_buffer - Clean up used buffer and either recycle or free * @rx_ring: rx descriptor ring to transact packets on * @rx_buffer: rx buffer to pull data from * * This function will clean up the contents of the rx_buffer. It will * either recycle the buffer or unmap it and free the associated resources. */ static void iavf_put_rx_buffer(struct iavf_ring *rx_ring, struct iavf_rx_buffer *rx_buffer) { if (!rx_buffer) return; if (iavf_can_reuse_rx_page(rx_buffer)) { /* hand second half of page back to the ring */ iavf_reuse_rx_page(rx_ring, rx_buffer); rx_ring->rx_stats.page_reuse_count++; } else { /* we are not reusing the buffer so unmap it */ dma_unmap_page_attrs(rx_ring->dev, rx_buffer->dma, iavf_rx_pg_size(rx_ring), DMA_FROM_DEVICE, IAVF_RX_DMA_ATTR); __page_frag_cache_drain(rx_buffer->page, rx_buffer->pagecnt_bias); } /* clear contents of buffer_info */ rx_buffer->page = NULL; } /** * iavf_is_non_eop - process handling of non-EOP buffers * @rx_ring: Rx ring being processed * @rx_desc: Rx descriptor for current buffer * @skb: Current socket buffer containing buffer in progress * * This function updates next to clean. If the buffer is an EOP buffer * this function exits returning false, otherwise it will place the * sk_buff in the next buffer to be chained and return true indicating * that this is in fact a non-EOP buffer. **/ static bool iavf_is_non_eop(struct iavf_ring *rx_ring, union iavf_rx_desc *rx_desc, struct sk_buff *skb) { u32 ntc = rx_ring->next_to_clean + 1; /* fetch, update, and store next to clean */ ntc = (ntc < rx_ring->count) ? ntc : 0; rx_ring->next_to_clean = ntc; prefetch(IAVF_RX_DESC(rx_ring, ntc)); /* if we are the last buffer then there is nothing else to do */ #define IAVF_RXD_EOF BIT(IAVF_RX_DESC_STATUS_EOF_SHIFT) if (likely(iavf_test_staterr(rx_desc, IAVF_RXD_EOF))) return false; rx_ring->rx_stats.non_eop_descs++; return true; } /** * iavf_clean_rx_irq - Clean completed descriptors from Rx ring - bounce buf * @rx_ring: rx descriptor ring to transact packets on * @budget: Total limit on number of packets to process * * This function provides a "bounce buffer" approach to Rx interrupt * processing. The advantage to this is that on systems that have * expensive overhead for IOMMU access this provides a means of avoiding * it by maintaining the mapping of the page to the system. * * Returns amount of work completed **/ static int iavf_clean_rx_irq(struct iavf_ring *rx_ring, int budget) { unsigned int total_rx_bytes = 0, total_rx_packets = 0; struct sk_buff *skb = rx_ring->skb; u16 cleaned_count = IAVF_DESC_UNUSED(rx_ring); bool failure = false; while (likely(total_rx_packets < (unsigned int)budget)) { struct iavf_rx_buffer *rx_buffer; union iavf_rx_desc *rx_desc; unsigned int size; u16 vlan_tag = 0; u8 rx_ptype; u64 qword; /* return some buffers to hardware, one at a time is too slow */ if (cleaned_count >= IAVF_RX_BUFFER_WRITE) { failure = failure || iavf_alloc_rx_buffers(rx_ring, cleaned_count); cleaned_count = 0; } rx_desc = IAVF_RX_DESC(rx_ring, rx_ring->next_to_clean); /* status_error_len will always be zero for unused descriptors * because it's cleared in cleanup, and overlaps with hdr_addr * which is always zero because packet split isn't used, if the * hardware wrote DD then the length will be non-zero */ qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len); /* This memory barrier is needed to keep us from reading * any other fields out of the rx_desc until we have * verified the descriptor has been written back. */ dma_rmb(); #define IAVF_RXD_DD BIT(IAVF_RX_DESC_STATUS_DD_SHIFT) if (!iavf_test_staterr(rx_desc, IAVF_RXD_DD)) break; size = (qword & IAVF_RXD_QW1_LENGTH_PBUF_MASK) >> IAVF_RXD_QW1_LENGTH_PBUF_SHIFT; iavf_trace(clean_rx_irq, rx_ring, rx_desc, skb); rx_buffer = iavf_get_rx_buffer(rx_ring, size); /* retrieve a buffer from the ring */ if (skb) iavf_add_rx_frag(rx_ring, rx_buffer, skb, size); else if (ring_uses_build_skb(rx_ring)) skb = iavf_build_skb(rx_ring, rx_buffer, size); else skb = iavf_construct_skb(rx_ring, rx_buffer, size); /* exit if we failed to retrieve a buffer */ if (!skb) { rx_ring->rx_stats.alloc_buff_failed++; if (rx_buffer && size) rx_buffer->pagecnt_bias++; break; } iavf_put_rx_buffer(rx_ring, rx_buffer); cleaned_count++; if (iavf_is_non_eop(rx_ring, rx_desc, skb)) continue; /* ERR_MASK will only have valid bits if EOP set, and * what we are doing here is actually checking * IAVF_RX_DESC_ERROR_RXE_SHIFT, since it is the zeroth bit in * the error field */ if (unlikely(iavf_test_staterr(rx_desc, BIT(IAVF_RXD_QW1_ERROR_SHIFT)))) { dev_kfree_skb_any(skb); skb = NULL; continue; } if (iavf_cleanup_headers(rx_ring, skb)) { skb = NULL; continue; } /* probably a little skewed due to removing CRC */ total_rx_bytes += skb->len; qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len); rx_ptype = (qword & IAVF_RXD_QW1_PTYPE_MASK) >> IAVF_RXD_QW1_PTYPE_SHIFT; /* populate checksum, VLAN, and protocol */ iavf_process_skb_fields(rx_ring, rx_desc, skb, rx_ptype); if (qword & BIT(IAVF_RX_DESC_STATUS_L2TAG1P_SHIFT) && rx_ring->flags & IAVF_TXRX_FLAGS_VLAN_TAG_LOC_L2TAG1) vlan_tag = le16_to_cpu(rx_desc->wb.qword0.lo_dword.l2tag1); if (rx_desc->wb.qword2.ext_status & cpu_to_le16(BIT(IAVF_RX_DESC_EXT_STATUS_L2TAG2P_SHIFT)) && rx_ring->flags & IAVF_RXR_FLAGS_VLAN_TAG_LOC_L2TAG2_2) vlan_tag = le16_to_cpu(rx_desc->wb.qword2.l2tag2_2); iavf_trace(clean_rx_irq_rx, rx_ring, rx_desc, skb); iavf_receive_skb(rx_ring, skb, vlan_tag); skb = NULL; /* update budget accounting */ total_rx_packets++; } rx_ring->skb = skb; u64_stats_update_begin(&rx_ring->syncp); rx_ring->stats.packets += total_rx_packets; rx_ring->stats.bytes += total_rx_bytes; u64_stats_update_end(&rx_ring->syncp); rx_ring->q_vector->rx.total_packets += total_rx_packets; rx_ring->q_vector->rx.total_bytes += total_rx_bytes; /* guarantee a trip back through this routine if there was a failure */ return failure ? budget : (int)total_rx_packets; } static inline u32 iavf_buildreg_itr(const int type, u16 itr) { u32 val; /* We don't bother with setting the CLEARPBA bit as the data sheet * points out doing so is "meaningless since it was already * auto-cleared". The auto-clearing happens when the interrupt is * asserted. * * Hardware errata 28 for also indicates that writing to a * xxINT_DYN_CTLx CSR with INTENA_MSK (bit 31) set to 0 will clear * an event in the PBA anyway so we need to rely on the automask * to hold pending events for us until the interrupt is re-enabled * * The itr value is reported in microseconds, and the register * value is recorded in 2 microsecond units. For this reason we * only need to shift by the interval shift - 1 instead of the * full value. */ itr &= IAVF_ITR_MASK; val = IAVF_VFINT_DYN_CTLN1_INTENA_MASK | (type << IAVF_VFINT_DYN_CTLN1_ITR_INDX_SHIFT) | (itr << (IAVF_VFINT_DYN_CTLN1_INTERVAL_SHIFT - 1)); return val; } /* a small macro to shorten up some long lines */ #define INTREG IAVF_VFINT_DYN_CTLN1 /* The act of updating the ITR will cause it to immediately trigger. In order * to prevent this from throwing off adaptive update statistics we defer the * update so that it can only happen so often. So after either Tx or Rx are * updated we make the adaptive scheme wait until either the ITR completely * expires via the next_update expiration or we have been through at least * 3 interrupts. */ #define ITR_COUNTDOWN_START 3 /** * iavf_update_enable_itr - Update itr and re-enable MSIX interrupt * @vsi: the VSI we care about * @q_vector: q_vector for which itr is being updated and interrupt enabled * **/ static inline void iavf_update_enable_itr(struct iavf_vsi *vsi, struct iavf_q_vector *q_vector) { struct iavf_hw *hw = &vsi->back->hw; u32 intval; /* These will do nothing if dynamic updates are not enabled */ iavf_update_itr(q_vector, &q_vector->tx); iavf_update_itr(q_vector, &q_vector->rx); /* This block of logic allows us to get away with only updating * one ITR value with each interrupt. The idea is to perform a * pseudo-lazy update with the following criteria. * * 1. Rx is given higher priority than Tx if both are in same state * 2. If we must reduce an ITR that is given highest priority. * 3. We then give priority to increasing ITR based on amount. */ if (q_vector->rx.target_itr < q_vector->rx.current_itr) { /* Rx ITR needs to be reduced, this is highest priority */ intval = iavf_buildreg_itr(IAVF_RX_ITR, q_vector->rx.target_itr); q_vector->rx.current_itr = q_vector->rx.target_itr; q_vector->itr_countdown = ITR_COUNTDOWN_START; } else if ((q_vector->tx.target_itr < q_vector->tx.current_itr) || ((q_vector->rx.target_itr - q_vector->rx.current_itr) < (q_vector->tx.target_itr - q_vector->tx.current_itr))) { /* Tx ITR needs to be reduced, this is second priority * Tx ITR needs to be increased more than Rx, fourth priority */ intval = iavf_buildreg_itr(IAVF_TX_ITR, q_vector->tx.target_itr); q_vector->tx.current_itr = q_vector->tx.target_itr; q_vector->itr_countdown = ITR_COUNTDOWN_START; } else if (q_vector->rx.current_itr != q_vector->rx.target_itr) { /* Rx ITR needs to be increased, third priority */ intval = iavf_buildreg_itr(IAVF_RX_ITR, q_vector->rx.target_itr); q_vector->rx.current_itr = q_vector->rx.target_itr; q_vector->itr_countdown = ITR_COUNTDOWN_START; } else { /* No ITR update, lowest priority */ intval = iavf_buildreg_itr(IAVF_ITR_NONE, 0); if (q_vector->itr_countdown) q_vector->itr_countdown--; } if (!test_bit(__IAVF_VSI_DOWN, vsi->state)) wr32(hw, INTREG(q_vector->reg_idx), intval); } /** * iavf_napi_poll - NAPI polling Rx/Tx cleanup routine * @napi: napi struct with our devices info in it * @budget: amount of work driver is allowed to do this pass, in packets * * This function will clean all queues associated with a q_vector. * * Returns the amount of work done **/ int iavf_napi_poll(struct napi_struct *napi, int budget) { struct iavf_q_vector *q_vector = container_of(napi, struct iavf_q_vector, napi); struct iavf_vsi *vsi = q_vector->vsi; struct iavf_ring *ring; bool clean_complete = true; bool arm_wb = false; int budget_per_ring; int work_done = 0; if (test_bit(__IAVF_VSI_DOWN, vsi->state)) { napi_complete(napi); return 0; } /* Since the actual Tx work is minimal, we can give the Tx a larger * budget and be more aggressive about cleaning up the Tx descriptors. */ iavf_for_each_ring(ring, q_vector->tx) { if (!iavf_clean_tx_irq(vsi, ring, budget)) { clean_complete = false; continue; } arm_wb |= ring->arm_wb; ring->arm_wb = false; } /* Handle case where we are called by netpoll with a budget of 0 */ if (budget <= 0) goto tx_only; /* We attempt to distribute budget to each Rx queue fairly, but don't * allow the budget to go below 1 because that would exit polling early. */ budget_per_ring = max(budget/q_vector->num_ringpairs, 1); iavf_for_each_ring(ring, q_vector->rx) { int cleaned = iavf_clean_rx_irq(ring, budget_per_ring); work_done += cleaned; /* if we clean as many as budgeted, we must not be done */ if (cleaned >= budget_per_ring) clean_complete = false; } /* If work not completed, return budget and polling will return */ if (!clean_complete) { int cpu_id = smp_processor_id(); /* It is possible that the interrupt affinity has changed but, * if the cpu is pegged at 100%, polling will never exit while * traffic continues and the interrupt will be stuck on this * cpu. We check to make sure affinity is correct before we * continue to poll, otherwise we must stop polling so the * interrupt can move to the correct cpu. */ if (!cpumask_test_cpu(cpu_id, &q_vector->affinity_mask)) { /* Tell napi that we are done polling */ napi_complete_done(napi, work_done); /* Force an interrupt */ iavf_force_wb(vsi, q_vector); /* Return budget-1 so that polling stops */ return budget - 1; } tx_only: if (arm_wb) { q_vector->tx.ring[0].tx_stats.tx_force_wb++; iavf_enable_wb_on_itr(vsi, q_vector); } return budget; } if (vsi->back->flags & IAVF_TXR_FLAGS_WB_ON_ITR) q_vector->arm_wb_state = false; /* Exit the polling mode, but don't re-enable interrupts if stack might * poll us due to busy-polling */ if (likely(napi_complete_done(napi, work_done))) iavf_update_enable_itr(vsi, q_vector); return min_t(int, work_done, budget - 1); } /** * iavf_tx_prepare_vlan_flags - prepare generic TX VLAN tagging flags for HW * @skb: send buffer * @tx_ring: ring to send buffer on * @flags: the tx flags to be set * * Checks the skb and set up correspondingly several generic transmit flags * related to VLAN tagging for the HW, such as VLAN, DCB, etc. * * Returns error code indicate the frame should be dropped upon error and the * otherwise returns 0 to indicate the flags has been set properly. **/ static void iavf_tx_prepare_vlan_flags(struct sk_buff *skb, struct iavf_ring *tx_ring, u32 *flags) { u32 tx_flags = 0; /* stack will only request hardware VLAN insertion offload for protocols * that the driver supports and has enabled */ if (!skb_vlan_tag_present(skb)) return; tx_flags |= skb_vlan_tag_get(skb) << IAVF_TX_FLAGS_VLAN_SHIFT; if (tx_ring->flags & IAVF_TXR_FLAGS_VLAN_TAG_LOC_L2TAG2) { tx_flags |= IAVF_TX_FLAGS_HW_OUTER_SINGLE_VLAN; } else if (tx_ring->flags & IAVF_TXRX_FLAGS_VLAN_TAG_LOC_L2TAG1) { tx_flags |= IAVF_TX_FLAGS_HW_VLAN; } else { dev_dbg(tx_ring->dev, "Unsupported Tx VLAN tag location requested\n"); return; } *flags = tx_flags; } /** * iavf_tso - set up the tso context descriptor * @first: pointer to first Tx buffer for xmit * @hdr_len: ptr to the size of the packet header * @cd_type_cmd_tso_mss: Quad Word 1 * * Returns 0 if no TSO can happen, 1 if tso is going, or error **/ static int iavf_tso(struct iavf_tx_buffer *first, u8 *hdr_len, u64 *cd_type_cmd_tso_mss) { struct sk_buff *skb = first->skb; u64 cd_cmd, cd_tso_len, cd_mss; union { struct iphdr *v4; struct ipv6hdr *v6; unsigned char *hdr; } ip; union { struct tcphdr *tcp; struct udphdr *udp; unsigned char *hdr; } l4; u32 paylen, l4_offset; u16 gso_segs, gso_size; int err; if (skb->ip_summed != CHECKSUM_PARTIAL) return 0; if (!skb_is_gso(skb)) return 0; err = skb_cow_head(skb, 0); if (err < 0) return err; ip.hdr = skb_network_header(skb); l4.hdr = skb_transport_header(skb); /* initialize outer IP header fields */ if (ip.v4->version == 4) { ip.v4->tot_len = 0; ip.v4->check = 0; } else { ip.v6->payload_len = 0; } if (skb_shinfo(skb)->gso_type & (SKB_GSO_GRE | SKB_GSO_GRE_CSUM | SKB_GSO_IPXIP4 | SKB_GSO_IPXIP6 | SKB_GSO_UDP_TUNNEL | SKB_GSO_UDP_TUNNEL_CSUM)) { if (!(skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL) && (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM)) { l4.udp->len = 0; /* determine offset of outer transport header */ l4_offset = l4.hdr - skb->data; /* remove payload length from outer checksum */ paylen = skb->len - l4_offset; csum_replace_by_diff(&l4.udp->check, (__force __wsum)htonl(paylen)); } /* reset pointers to inner headers */ ip.hdr = skb_inner_network_header(skb); l4.hdr = skb_inner_transport_header(skb); /* initialize inner IP header fields */ if (ip.v4->version == 4) { ip.v4->tot_len = 0; ip.v4->check = 0; } else { ip.v6->payload_len = 0; } } /* determine offset of inner transport header */ l4_offset = l4.hdr - skb->data; /* remove payload length from inner checksum */ paylen = skb->len - l4_offset; if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) { csum_replace_by_diff(&l4.udp->check, (__force __wsum)htonl(paylen)); /* compute length of UDP segmentation header */ *hdr_len = (u8)sizeof(l4.udp) + l4_offset; } else { csum_replace_by_diff(&l4.tcp->check, (__force __wsum)htonl(paylen)); /* compute length of TCP segmentation header */ *hdr_len = (u8)((l4.tcp->doff * 4) + l4_offset); } /* pull values out of skb_shinfo */ gso_size = skb_shinfo(skb)->gso_size; gso_segs = skb_shinfo(skb)->gso_segs; /* update GSO size and bytecount with header size */ first->gso_segs = gso_segs; first->bytecount += (first->gso_segs - 1) * *hdr_len; /* find the field values */ cd_cmd = IAVF_TX_CTX_DESC_TSO; cd_tso_len = skb->len - *hdr_len; cd_mss = gso_size; *cd_type_cmd_tso_mss |= (cd_cmd << IAVF_TXD_CTX_QW1_CMD_SHIFT) | (cd_tso_len << IAVF_TXD_CTX_QW1_TSO_LEN_SHIFT) | (cd_mss << IAVF_TXD_CTX_QW1_MSS_SHIFT); return 1; } /** * iavf_tx_enable_csum - Enable Tx checksum offloads * @skb: send buffer * @tx_flags: pointer to Tx flags currently set * @td_cmd: Tx descriptor command bits to set * @td_offset: Tx descriptor header offsets to set * @tx_ring: Tx descriptor ring * @cd_tunneling: ptr to context desc bits **/ static int iavf_tx_enable_csum(struct sk_buff *skb, u32 *tx_flags, u32 *td_cmd, u32 *td_offset, struct iavf_ring *tx_ring, u32 *cd_tunneling) { union { struct iphdr *v4; struct ipv6hdr *v6; unsigned char *hdr; } ip; union { struct tcphdr *tcp; struct udphdr *udp; unsigned char *hdr; } l4; unsigned char *exthdr; u32 offset, cmd = 0; __be16 frag_off; u8 l4_proto = 0; if (skb->ip_summed != CHECKSUM_PARTIAL) return 0; ip.hdr = skb_network_header(skb); l4.hdr = skb_transport_header(skb); /* compute outer L2 header size */ offset = ((ip.hdr - skb->data) / 2) << IAVF_TX_DESC_LENGTH_MACLEN_SHIFT; if (skb->encapsulation) { u32 tunnel = 0; /* define outer network header type */ if (*tx_flags & IAVF_TX_FLAGS_IPV4) { tunnel |= (*tx_flags & IAVF_TX_FLAGS_TSO) ? IAVF_TX_CTX_EXT_IP_IPV4 : IAVF_TX_CTX_EXT_IP_IPV4_NO_CSUM; l4_proto = ip.v4->protocol; } else if (*tx_flags & IAVF_TX_FLAGS_IPV6) { tunnel |= IAVF_TX_CTX_EXT_IP_IPV6; exthdr = ip.hdr + sizeof(*ip.v6); l4_proto = ip.v6->nexthdr; if (l4.hdr != exthdr) ipv6_skip_exthdr(skb, exthdr - skb->data, &l4_proto, &frag_off); } /* define outer transport */ switch (l4_proto) { case IPPROTO_UDP: tunnel |= IAVF_TXD_CTX_UDP_TUNNELING; *tx_flags |= IAVF_TX_FLAGS_VXLAN_TUNNEL; break; case IPPROTO_GRE: tunnel |= IAVF_TXD_CTX_GRE_TUNNELING; *tx_flags |= IAVF_TX_FLAGS_VXLAN_TUNNEL; break; case IPPROTO_IPIP: case IPPROTO_IPV6: *tx_flags |= IAVF_TX_FLAGS_VXLAN_TUNNEL; l4.hdr = skb_inner_network_header(skb); break; default: if (*tx_flags & IAVF_TX_FLAGS_TSO) return -1; skb_checksum_help(skb); return 0; } /* compute outer L3 header size */ tunnel |= ((l4.hdr - ip.hdr) / 4) << IAVF_TXD_CTX_QW0_EXT_IPLEN_SHIFT; /* switch IP header pointer from outer to inner header */ ip.hdr = skb_inner_network_header(skb); /* compute tunnel header size */ tunnel |= ((ip.hdr - l4.hdr) / 2) << IAVF_TXD_CTX_QW0_NATLEN_SHIFT; /* indicate if we need to offload outer UDP header */ if ((*tx_flags & IAVF_TX_FLAGS_TSO) && !(skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL) && (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM)) tunnel |= IAVF_TXD_CTX_QW0_L4T_CS_MASK; /* record tunnel offload values */ *cd_tunneling |= tunnel; /* switch L4 header pointer from outer to inner */ l4.hdr = skb_inner_transport_header(skb); l4_proto = 0; /* reset type as we transition from outer to inner headers */ *tx_flags &= ~(IAVF_TX_FLAGS_IPV4 | IAVF_TX_FLAGS_IPV6); if (ip.v4->version == 4) *tx_flags |= IAVF_TX_FLAGS_IPV4; if (ip.v6->version == 6) *tx_flags |= IAVF_TX_FLAGS_IPV6; } /* Enable IP checksum offloads */ if (*tx_flags & IAVF_TX_FLAGS_IPV4) { l4_proto = ip.v4->protocol; /* the stack computes the IP header already, the only time we * need the hardware to recompute it is in the case of TSO. */ cmd |= (*tx_flags & IAVF_TX_FLAGS_TSO) ? IAVF_TX_DESC_CMD_IIPT_IPV4_CSUM : IAVF_TX_DESC_CMD_IIPT_IPV4; } else if (*tx_flags & IAVF_TX_FLAGS_IPV6) { cmd |= IAVF_TX_DESC_CMD_IIPT_IPV6; exthdr = ip.hdr + sizeof(*ip.v6); l4_proto = ip.v6->nexthdr; if (l4.hdr != exthdr) ipv6_skip_exthdr(skb, exthdr - skb->data, &l4_proto, &frag_off); } /* compute inner L3 header size */ offset |= ((l4.hdr - ip.hdr) / 4) << IAVF_TX_DESC_LENGTH_IPLEN_SHIFT; /* Enable L4 checksum offloads */ switch (l4_proto) { case IPPROTO_TCP: /* enable checksum offloads */ cmd |= IAVF_TX_DESC_CMD_L4T_EOFT_TCP; offset |= l4.tcp->doff << IAVF_TX_DESC_LENGTH_L4_FC_LEN_SHIFT; break; case IPPROTO_SCTP: /* enable SCTP checksum offload */ cmd |= IAVF_TX_DESC_CMD_L4T_EOFT_SCTP; offset |= (sizeof(struct sctphdr) >> 2) << IAVF_TX_DESC_LENGTH_L4_FC_LEN_SHIFT; break; case IPPROTO_UDP: /* enable UDP checksum offload */ cmd |= IAVF_TX_DESC_CMD_L4T_EOFT_UDP; offset |= (sizeof(struct udphdr) >> 2) << IAVF_TX_DESC_LENGTH_L4_FC_LEN_SHIFT; break; default: if (*tx_flags & IAVF_TX_FLAGS_TSO) return -1; skb_checksum_help(skb); return 0; } *td_cmd |= cmd; *td_offset |= offset; return 1; } /** * iavf_create_tx_ctx - Build the Tx context descriptor * @tx_ring: ring to create the descriptor on * @cd_type_cmd_tso_mss: Quad Word 1 * @cd_tunneling: Quad Word 0 - bits 0-31 * @cd_l2tag2: Quad Word 0 - bits 32-63 **/ static void iavf_create_tx_ctx(struct iavf_ring *tx_ring, const u64 cd_type_cmd_tso_mss, const u32 cd_tunneling, const u32 cd_l2tag2) { struct iavf_tx_context_desc *context_desc; int i = tx_ring->next_to_use; if ((cd_type_cmd_tso_mss == IAVF_TX_DESC_DTYPE_CONTEXT) && !cd_tunneling && !cd_l2tag2) return; /* grab the next descriptor */ context_desc = IAVF_TX_CTXTDESC(tx_ring, i); i++; tx_ring->next_to_use = (i < tx_ring->count) ? i : 0; /* cpu_to_le32 and assign to struct fields */ context_desc->tunneling_params = cpu_to_le32(cd_tunneling); context_desc->l2tag2 = cpu_to_le16(cd_l2tag2); context_desc->rsvd = cpu_to_le16(0); context_desc->type_cmd_tso_mss = cpu_to_le64(cd_type_cmd_tso_mss); } /** * __iavf_chk_linearize - Check if there are more than 8 buffers per packet * @skb: send buffer * * Note: Our HW can't DMA more than 8 buffers to build a packet on the wire * and so we need to figure out the cases where we need to linearize the skb. * * For TSO we need to count the TSO header and segment payload separately. * As such we need to check cases where we have 7 fragments or more as we * can potentially require 9 DMA transactions, 1 for the TSO header, 1 for * the segment payload in the first descriptor, and another 7 for the * fragments. **/ bool __iavf_chk_linearize(struct sk_buff *skb) { const skb_frag_t *frag, *stale; int nr_frags, sum; /* no need to check if number of frags is less than 7 */ nr_frags = skb_shinfo(skb)->nr_frags; if (nr_frags < (IAVF_MAX_BUFFER_TXD - 1)) return false; /* We need to walk through the list and validate that each group * of 6 fragments totals at least gso_size. */ nr_frags -= IAVF_MAX_BUFFER_TXD - 2; frag = &skb_shinfo(skb)->frags[0]; /* Initialize size to the negative value of gso_size minus 1. We * use this as the worst case scenerio in which the frag ahead * of us only provides one byte which is why we are limited to 6 * descriptors for a single transmit as the header and previous * fragment are already consuming 2 descriptors. */ sum = 1 - skb_shinfo(skb)->gso_size; /* Add size of frags 0 through 4 to create our initial sum */ sum += skb_frag_size(frag++); sum += skb_frag_size(frag++); sum += skb_frag_size(frag++); sum += skb_frag_size(frag++); sum += skb_frag_size(frag++); /* Walk through fragments adding latest fragment, testing it, and * then removing stale fragments from the sum. */ for (stale = &skb_shinfo(skb)->frags[0];; stale++) { int stale_size = skb_frag_size(stale); sum += skb_frag_size(frag++); /* The stale fragment may present us with a smaller * descriptor than the actual fragment size. To account * for that we need to remove all the data on the front and * figure out what the remainder would be in the last * descriptor associated with the fragment. */ if (stale_size > IAVF_MAX_DATA_PER_TXD) { int align_pad = -(skb_frag_off(stale)) & (IAVF_MAX_READ_REQ_SIZE - 1); sum -= align_pad; stale_size -= align_pad; do { sum -= IAVF_MAX_DATA_PER_TXD_ALIGNED; stale_size -= IAVF_MAX_DATA_PER_TXD_ALIGNED; } while (stale_size > IAVF_MAX_DATA_PER_TXD); } /* if sum is negative we failed to make sufficient progress */ if (sum < 0) return true; if (!nr_frags--) break; sum -= stale_size; } return false; } /** * __iavf_maybe_stop_tx - 2nd level check for tx stop conditions * @tx_ring: the ring to be checked * @size: the size buffer we want to assure is available * * Returns -EBUSY if a stop is needed, else 0 **/ int __iavf_maybe_stop_tx(struct iavf_ring *tx_ring, int size) { netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index); /* Memory barrier before checking head and tail */ smp_mb(); /* Check again in a case another CPU has just made room available. */ if (likely(IAVF_DESC_UNUSED(tx_ring) < size)) return -EBUSY; /* A reprieve! - use start_queue because it doesn't call schedule */ netif_start_subqueue(tx_ring->netdev, tx_ring->queue_index); ++tx_ring->tx_stats.restart_queue; return 0; } /** * iavf_tx_map - Build the Tx descriptor * @tx_ring: ring to send buffer on * @skb: send buffer * @first: first buffer info buffer to use * @tx_flags: collected send information * @hdr_len: size of the packet header * @td_cmd: the command field in the descriptor * @td_offset: offset for checksum or crc **/ static inline void iavf_tx_map(struct iavf_ring *tx_ring, struct sk_buff *skb, struct iavf_tx_buffer *first, u32 tx_flags, const u8 hdr_len, u32 td_cmd, u32 td_offset) { unsigned int data_len = skb->data_len; unsigned int size = skb_headlen(skb); skb_frag_t *frag; struct iavf_tx_buffer *tx_bi; struct iavf_tx_desc *tx_desc; u16 i = tx_ring->next_to_use; u32 td_tag = 0; dma_addr_t dma; if (tx_flags & IAVF_TX_FLAGS_HW_VLAN) { td_cmd |= IAVF_TX_DESC_CMD_IL2TAG1; td_tag = (tx_flags & IAVF_TX_FLAGS_VLAN_MASK) >> IAVF_TX_FLAGS_VLAN_SHIFT; } first->tx_flags = tx_flags; dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE); tx_desc = IAVF_TX_DESC(tx_ring, i); tx_bi = first; for (frag = &skb_shinfo(skb)->frags[0];; frag++) { unsigned int max_data = IAVF_MAX_DATA_PER_TXD_ALIGNED; if (dma_mapping_error(tx_ring->dev, dma)) goto dma_error; /* record length, and DMA address */ dma_unmap_len_set(tx_bi, len, size); dma_unmap_addr_set(tx_bi, dma, dma); /* align size to end of page */ max_data += -dma & (IAVF_MAX_READ_REQ_SIZE - 1); tx_desc->buffer_addr = cpu_to_le64(dma); while (unlikely(size > IAVF_MAX_DATA_PER_TXD)) { tx_desc->cmd_type_offset_bsz = build_ctob(td_cmd, td_offset, max_data, td_tag); tx_desc++; i++; if (i == tx_ring->count) { tx_desc = IAVF_TX_DESC(tx_ring, 0); i = 0; } dma += max_data; size -= max_data; max_data = IAVF_MAX_DATA_PER_TXD_ALIGNED; tx_desc->buffer_addr = cpu_to_le64(dma); } if (likely(!data_len)) break; tx_desc->cmd_type_offset_bsz = build_ctob(td_cmd, td_offset, size, td_tag); tx_desc++; i++; if (i == tx_ring->count) { tx_desc = IAVF_TX_DESC(tx_ring, 0); i = 0; } size = skb_frag_size(frag); data_len -= size; dma = skb_frag_dma_map(tx_ring->dev, frag, 0, size, DMA_TO_DEVICE); tx_bi = &tx_ring->tx_bi[i]; } netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount); i++; if (i == tx_ring->count) i = 0; tx_ring->next_to_use = i; iavf_maybe_stop_tx(tx_ring, DESC_NEEDED); /* write last descriptor with RS and EOP bits */ td_cmd |= IAVF_TXD_CMD; tx_desc->cmd_type_offset_bsz = build_ctob(td_cmd, td_offset, size, td_tag); skb_tx_timestamp(skb); /* Force memory writes to complete before letting h/w know there * are new descriptors to fetch. * * We also use this memory barrier to make certain all of the * status bits have been updated before next_to_watch is written. */ wmb(); /* set next_to_watch value indicating a packet is present */ first->next_to_watch = tx_desc; /* notify HW of packet */ if (netif_xmit_stopped(txring_txq(tx_ring)) || !netdev_xmit_more()) { writel(i, tx_ring->tail); } return; dma_error: dev_info(tx_ring->dev, "TX DMA map failed\n"); /* clear dma mappings for failed tx_bi map */ for (;;) { tx_bi = &tx_ring->tx_bi[i]; iavf_unmap_and_free_tx_resource(tx_ring, tx_bi); if (tx_bi == first) break; if (i == 0) i = tx_ring->count; i--; } tx_ring->next_to_use = i; } /** * iavf_xmit_frame_ring - Sends buffer on Tx ring * @skb: send buffer * @tx_ring: ring to send buffer on * * Returns NETDEV_TX_OK if sent, else an error code **/ static netdev_tx_t iavf_xmit_frame_ring(struct sk_buff *skb, struct iavf_ring *tx_ring) { u64 cd_type_cmd_tso_mss = IAVF_TX_DESC_DTYPE_CONTEXT; u32 cd_tunneling = 0, cd_l2tag2 = 0; struct iavf_tx_buffer *first; u32 td_offset = 0; u32 tx_flags = 0; __be16 protocol; u32 td_cmd = 0; u8 hdr_len = 0; int tso, count; /* prefetch the data, we'll need it later */ prefetch(skb->data); iavf_trace(xmit_frame_ring, skb, tx_ring); count = iavf_xmit_descriptor_count(skb); if (iavf_chk_linearize(skb, count)) { if (__skb_linearize(skb)) { dev_kfree_skb_any(skb); return NETDEV_TX_OK; } count = iavf_txd_use_count(skb->len); tx_ring->tx_stats.tx_linearize++; } /* need: 1 descriptor per page * PAGE_SIZE/IAVF_MAX_DATA_PER_TXD, * + 1 desc for skb_head_len/IAVF_MAX_DATA_PER_TXD, * + 4 desc gap to avoid the cache line where head is, * + 1 desc for context descriptor, * otherwise try next time */ if (iavf_maybe_stop_tx(tx_ring, count + 4 + 1)) { tx_ring->tx_stats.tx_busy++; return NETDEV_TX_BUSY; } /* record the location of the first descriptor for this packet */ first = &tx_ring->tx_bi[tx_ring->next_to_use]; first->skb = skb; first->bytecount = skb->len; first->gso_segs = 1; /* prepare the xmit flags */ iavf_tx_prepare_vlan_flags(skb, tx_ring, &tx_flags); if (tx_flags & IAVF_TX_FLAGS_HW_OUTER_SINGLE_VLAN) { cd_type_cmd_tso_mss |= IAVF_TX_CTX_DESC_IL2TAG2 << IAVF_TXD_CTX_QW1_CMD_SHIFT; cd_l2tag2 = (tx_flags & IAVF_TX_FLAGS_VLAN_MASK) >> IAVF_TX_FLAGS_VLAN_SHIFT; } /* obtain protocol of skb */ protocol = vlan_get_protocol(skb); /* setup IPv4/IPv6 offloads */ if (protocol == htons(ETH_P_IP)) tx_flags |= IAVF_TX_FLAGS_IPV4; else if (protocol == htons(ETH_P_IPV6)) tx_flags |= IAVF_TX_FLAGS_IPV6; tso = iavf_tso(first, &hdr_len, &cd_type_cmd_tso_mss); if (tso < 0) goto out_drop; else if (tso) tx_flags |= IAVF_TX_FLAGS_TSO; /* Always offload the checksum, since it's in the data descriptor */ tso = iavf_tx_enable_csum(skb, &tx_flags, &td_cmd, &td_offset, tx_ring, &cd_tunneling); if (tso < 0) goto out_drop; /* always enable CRC insertion offload */ td_cmd |= IAVF_TX_DESC_CMD_ICRC; iavf_create_tx_ctx(tx_ring, cd_type_cmd_tso_mss, cd_tunneling, cd_l2tag2); iavf_tx_map(tx_ring, skb, first, tx_flags, hdr_len, td_cmd, td_offset); return NETDEV_TX_OK; out_drop: iavf_trace(xmit_frame_ring_drop, first->skb, tx_ring); dev_kfree_skb_any(first->skb); first->skb = NULL; return NETDEV_TX_OK; } /** * iavf_xmit_frame - Selects the correct VSI and Tx queue to send buffer * @skb: send buffer * @netdev: network interface device structure * * Returns NETDEV_TX_OK if sent, else an error code **/ netdev_tx_t iavf_xmit_frame(struct sk_buff *skb, struct net_device *netdev) { struct iavf_adapter *adapter = netdev_priv(netdev); struct iavf_ring *tx_ring = &adapter->tx_rings[skb->queue_mapping]; /* hardware can't handle really short frames, hardware padding works * beyond this point */ if (unlikely(skb->len < IAVF_MIN_TX_LEN)) { if (skb_pad(skb, IAVF_MIN_TX_LEN - skb->len)) return NETDEV_TX_OK; skb->len = IAVF_MIN_TX_LEN; skb_set_tail_pointer(skb, IAVF_MIN_TX_LEN); } return iavf_xmit_frame_ring(skb, tx_ring); }
linux-master
drivers/net/ethernet/intel/iavf/iavf_txrx.c
// SPDX-License-Identifier: GPL-2.0 /* Copyright(c) 2013 - 2018 Intel Corporation. */ #include "iavf.h" #include "iavf_prototype.h" #include "iavf_client.h" /** * iavf_send_pf_msg * @adapter: adapter structure * @op: virtual channel opcode * @msg: pointer to message buffer * @len: message length * * Send message to PF and print status if failure. **/ static int iavf_send_pf_msg(struct iavf_adapter *adapter, enum virtchnl_ops op, u8 *msg, u16 len) { struct iavf_hw *hw = &adapter->hw; enum iavf_status status; if (adapter->flags & IAVF_FLAG_PF_COMMS_FAILED) return 0; /* nothing to see here, move along */ status = iavf_aq_send_msg_to_pf(hw, op, 0, msg, len, NULL); if (status) dev_dbg(&adapter->pdev->dev, "Unable to send opcode %d to PF, status %s, aq_err %s\n", op, iavf_stat_str(hw, status), iavf_aq_str(hw, hw->aq.asq_last_status)); return iavf_status_to_errno(status); } /** * iavf_send_api_ver * @adapter: adapter structure * * Send API version admin queue message to the PF. The reply is not checked * in this function. Returns 0 if the message was successfully * sent, or one of the IAVF_ADMIN_QUEUE_ERROR_ statuses if not. **/ int iavf_send_api_ver(struct iavf_adapter *adapter) { struct virtchnl_version_info vvi; vvi.major = VIRTCHNL_VERSION_MAJOR; vvi.minor = VIRTCHNL_VERSION_MINOR; return iavf_send_pf_msg(adapter, VIRTCHNL_OP_VERSION, (u8 *)&vvi, sizeof(vvi)); } /** * iavf_poll_virtchnl_msg * @hw: HW configuration structure * @event: event to populate on success * @op_to_poll: requested virtchnl op to poll for * * Initialize poll for virtchnl msg matching the requested_op. Returns 0 * if a message of the correct opcode is in the queue or an error code * if no message matching the op code is waiting and other failures. */ static int iavf_poll_virtchnl_msg(struct iavf_hw *hw, struct iavf_arq_event_info *event, enum virtchnl_ops op_to_poll) { enum virtchnl_ops received_op; enum iavf_status status; u32 v_retval; while (1) { /* When the AQ is empty, iavf_clean_arq_element will return * nonzero and this loop will terminate. */ status = iavf_clean_arq_element(hw, event, NULL); if (status != IAVF_SUCCESS) return iavf_status_to_errno(status); received_op = (enum virtchnl_ops)le32_to_cpu(event->desc.cookie_high); if (op_to_poll == received_op) break; } v_retval = le32_to_cpu(event->desc.cookie_low); return virtchnl_status_to_errno((enum virtchnl_status_code)v_retval); } /** * iavf_verify_api_ver * @adapter: adapter structure * * Compare API versions with the PF. Must be called after admin queue is * initialized. Returns 0 if API versions match, -EIO if they do not, * IAVF_ERR_ADMIN_QUEUE_NO_WORK if the admin queue is empty, and any errors * from the firmware are propagated. **/ int iavf_verify_api_ver(struct iavf_adapter *adapter) { struct iavf_arq_event_info event; int err; event.buf_len = IAVF_MAX_AQ_BUF_SIZE; event.msg_buf = kzalloc(IAVF_MAX_AQ_BUF_SIZE, GFP_KERNEL); if (!event.msg_buf) return -ENOMEM; err = iavf_poll_virtchnl_msg(&adapter->hw, &event, VIRTCHNL_OP_VERSION); if (!err) { struct virtchnl_version_info *pf_vvi = (struct virtchnl_version_info *)event.msg_buf; adapter->pf_version = *pf_vvi; if (pf_vvi->major > VIRTCHNL_VERSION_MAJOR || (pf_vvi->major == VIRTCHNL_VERSION_MAJOR && pf_vvi->minor > VIRTCHNL_VERSION_MINOR)) err = -EIO; } kfree(event.msg_buf); return err; } /** * iavf_send_vf_config_msg * @adapter: adapter structure * * Send VF configuration request admin queue message to the PF. The reply * is not checked in this function. Returns 0 if the message was * successfully sent, or one of the IAVF_ADMIN_QUEUE_ERROR_ statuses if not. **/ int iavf_send_vf_config_msg(struct iavf_adapter *adapter) { u32 caps; caps = VIRTCHNL_VF_OFFLOAD_L2 | VIRTCHNL_VF_OFFLOAD_RSS_PF | VIRTCHNL_VF_OFFLOAD_RSS_AQ | VIRTCHNL_VF_OFFLOAD_RSS_REG | VIRTCHNL_VF_OFFLOAD_VLAN | VIRTCHNL_VF_OFFLOAD_WB_ON_ITR | VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2 | VIRTCHNL_VF_OFFLOAD_ENCAP | VIRTCHNL_VF_OFFLOAD_VLAN_V2 | VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM | VIRTCHNL_VF_OFFLOAD_REQ_QUEUES | VIRTCHNL_VF_OFFLOAD_ADQ | VIRTCHNL_VF_OFFLOAD_USO | VIRTCHNL_VF_OFFLOAD_FDIR_PF | VIRTCHNL_VF_OFFLOAD_ADV_RSS_PF | VIRTCHNL_VF_CAP_ADV_LINK_SPEED; adapter->current_op = VIRTCHNL_OP_GET_VF_RESOURCES; adapter->aq_required &= ~IAVF_FLAG_AQ_GET_CONFIG; if (PF_IS_V11(adapter)) return iavf_send_pf_msg(adapter, VIRTCHNL_OP_GET_VF_RESOURCES, (u8 *)&caps, sizeof(caps)); else return iavf_send_pf_msg(adapter, VIRTCHNL_OP_GET_VF_RESOURCES, NULL, 0); } int iavf_send_vf_offload_vlan_v2_msg(struct iavf_adapter *adapter) { adapter->aq_required &= ~IAVF_FLAG_AQ_GET_OFFLOAD_VLAN_V2_CAPS; if (!VLAN_V2_ALLOWED(adapter)) return -EOPNOTSUPP; adapter->current_op = VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS; return iavf_send_pf_msg(adapter, VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS, NULL, 0); } /** * iavf_validate_num_queues * @adapter: adapter structure * * Validate that the number of queues the PF has sent in * VIRTCHNL_OP_GET_VF_RESOURCES is not larger than the VF can handle. **/ static void iavf_validate_num_queues(struct iavf_adapter *adapter) { if (adapter->vf_res->num_queue_pairs > IAVF_MAX_REQ_QUEUES) { struct virtchnl_vsi_resource *vsi_res; int i; dev_info(&adapter->pdev->dev, "Received %d queues, but can only have a max of %d\n", adapter->vf_res->num_queue_pairs, IAVF_MAX_REQ_QUEUES); dev_info(&adapter->pdev->dev, "Fixing by reducing queues to %d\n", IAVF_MAX_REQ_QUEUES); adapter->vf_res->num_queue_pairs = IAVF_MAX_REQ_QUEUES; for (i = 0; i < adapter->vf_res->num_vsis; i++) { vsi_res = &adapter->vf_res->vsi_res[i]; vsi_res->num_queue_pairs = IAVF_MAX_REQ_QUEUES; } } } /** * iavf_get_vf_config * @adapter: private adapter structure * * Get VF configuration from PF and populate hw structure. Must be called after * admin queue is initialized. Busy waits until response is received from PF, * with maximum timeout. Response from PF is returned in the buffer for further * processing by the caller. **/ int iavf_get_vf_config(struct iavf_adapter *adapter) { struct iavf_hw *hw = &adapter->hw; struct iavf_arq_event_info event; u16 len; int err; len = IAVF_VIRTCHNL_VF_RESOURCE_SIZE; event.buf_len = len; event.msg_buf = kzalloc(len, GFP_KERNEL); if (!event.msg_buf) return -ENOMEM; err = iavf_poll_virtchnl_msg(hw, &event, VIRTCHNL_OP_GET_VF_RESOURCES); memcpy(adapter->vf_res, event.msg_buf, min(event.msg_len, len)); /* some PFs send more queues than we should have so validate that * we aren't getting too many queues */ if (!err) iavf_validate_num_queues(adapter); iavf_vf_parse_hw_config(hw, adapter->vf_res); kfree(event.msg_buf); return err; } int iavf_get_vf_vlan_v2_caps(struct iavf_adapter *adapter) { struct iavf_arq_event_info event; int err; u16 len; len = sizeof(struct virtchnl_vlan_caps); event.buf_len = len; event.msg_buf = kzalloc(len, GFP_KERNEL); if (!event.msg_buf) return -ENOMEM; err = iavf_poll_virtchnl_msg(&adapter->hw, &event, VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS); if (!err) memcpy(&adapter->vlan_v2_caps, event.msg_buf, min(event.msg_len, len)); kfree(event.msg_buf); return err; } /** * iavf_configure_queues * @adapter: adapter structure * * Request that the PF set up our (previously allocated) queues. **/ void iavf_configure_queues(struct iavf_adapter *adapter) { struct virtchnl_vsi_queue_config_info *vqci; int i, max_frame = adapter->vf_res->max_mtu; int pairs = adapter->num_active_queues; struct virtchnl_queue_pair_info *vqpi; size_t len; if (max_frame > IAVF_MAX_RXBUFFER || !max_frame) max_frame = IAVF_MAX_RXBUFFER; if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { /* bail because we already have a command pending */ dev_err(&adapter->pdev->dev, "Cannot configure queues, command %d pending\n", adapter->current_op); return; } adapter->current_op = VIRTCHNL_OP_CONFIG_VSI_QUEUES; len = virtchnl_struct_size(vqci, qpair, pairs); vqci = kzalloc(len, GFP_KERNEL); if (!vqci) return; /* Limit maximum frame size when jumbo frames is not enabled */ if (!(adapter->flags & IAVF_FLAG_LEGACY_RX) && (adapter->netdev->mtu <= ETH_DATA_LEN)) max_frame = IAVF_RXBUFFER_1536 - NET_IP_ALIGN; vqci->vsi_id = adapter->vsi_res->vsi_id; vqci->num_queue_pairs = pairs; vqpi = vqci->qpair; /* Size check is not needed here - HW max is 16 queue pairs, and we * can fit info for 31 of them into the AQ buffer before it overflows. */ for (i = 0; i < pairs; i++) { vqpi->txq.vsi_id = vqci->vsi_id; vqpi->txq.queue_id = i; vqpi->txq.ring_len = adapter->tx_rings[i].count; vqpi->txq.dma_ring_addr = adapter->tx_rings[i].dma; vqpi->rxq.vsi_id = vqci->vsi_id; vqpi->rxq.queue_id = i; vqpi->rxq.ring_len = adapter->rx_rings[i].count; vqpi->rxq.dma_ring_addr = adapter->rx_rings[i].dma; vqpi->rxq.max_pkt_size = max_frame; vqpi->rxq.databuffer_size = ALIGN(adapter->rx_rings[i].rx_buf_len, BIT_ULL(IAVF_RXQ_CTX_DBUFF_SHIFT)); vqpi++; } adapter->aq_required &= ~IAVF_FLAG_AQ_CONFIGURE_QUEUES; iavf_send_pf_msg(adapter, VIRTCHNL_OP_CONFIG_VSI_QUEUES, (u8 *)vqci, len); kfree(vqci); } /** * iavf_enable_queues * @adapter: adapter structure * * Request that the PF enable all of our queues. **/ void iavf_enable_queues(struct iavf_adapter *adapter) { struct virtchnl_queue_select vqs; if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { /* bail because we already have a command pending */ dev_err(&adapter->pdev->dev, "Cannot enable queues, command %d pending\n", adapter->current_op); return; } adapter->current_op = VIRTCHNL_OP_ENABLE_QUEUES; vqs.vsi_id = adapter->vsi_res->vsi_id; vqs.tx_queues = BIT(adapter->num_active_queues) - 1; vqs.rx_queues = vqs.tx_queues; adapter->aq_required &= ~IAVF_FLAG_AQ_ENABLE_QUEUES; iavf_send_pf_msg(adapter, VIRTCHNL_OP_ENABLE_QUEUES, (u8 *)&vqs, sizeof(vqs)); } /** * iavf_disable_queues * @adapter: adapter structure * * Request that the PF disable all of our queues. **/ void iavf_disable_queues(struct iavf_adapter *adapter) { struct virtchnl_queue_select vqs; if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { /* bail because we already have a command pending */ dev_err(&adapter->pdev->dev, "Cannot disable queues, command %d pending\n", adapter->current_op); return; } adapter->current_op = VIRTCHNL_OP_DISABLE_QUEUES; vqs.vsi_id = adapter->vsi_res->vsi_id; vqs.tx_queues = BIT(adapter->num_active_queues) - 1; vqs.rx_queues = vqs.tx_queues; adapter->aq_required &= ~IAVF_FLAG_AQ_DISABLE_QUEUES; iavf_send_pf_msg(adapter, VIRTCHNL_OP_DISABLE_QUEUES, (u8 *)&vqs, sizeof(vqs)); } /** * iavf_map_queues * @adapter: adapter structure * * Request that the PF map queues to interrupt vectors. Misc causes, including * admin queue, are always mapped to vector 0. **/ void iavf_map_queues(struct iavf_adapter *adapter) { struct virtchnl_irq_map_info *vimi; struct virtchnl_vector_map *vecmap; struct iavf_q_vector *q_vector; int v_idx, q_vectors; size_t len; if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { /* bail because we already have a command pending */ dev_err(&adapter->pdev->dev, "Cannot map queues to vectors, command %d pending\n", adapter->current_op); return; } adapter->current_op = VIRTCHNL_OP_CONFIG_IRQ_MAP; q_vectors = adapter->num_msix_vectors - NONQ_VECS; len = virtchnl_struct_size(vimi, vecmap, adapter->num_msix_vectors); vimi = kzalloc(len, GFP_KERNEL); if (!vimi) return; vimi->num_vectors = adapter->num_msix_vectors; /* Queue vectors first */ for (v_idx = 0; v_idx < q_vectors; v_idx++) { q_vector = &adapter->q_vectors[v_idx]; vecmap = &vimi->vecmap[v_idx]; vecmap->vsi_id = adapter->vsi_res->vsi_id; vecmap->vector_id = v_idx + NONQ_VECS; vecmap->txq_map = q_vector->ring_mask; vecmap->rxq_map = q_vector->ring_mask; vecmap->rxitr_idx = IAVF_RX_ITR; vecmap->txitr_idx = IAVF_TX_ITR; } /* Misc vector last - this is only for AdminQ messages */ vecmap = &vimi->vecmap[v_idx]; vecmap->vsi_id = adapter->vsi_res->vsi_id; vecmap->vector_id = 0; vecmap->txq_map = 0; vecmap->rxq_map = 0; adapter->aq_required &= ~IAVF_FLAG_AQ_MAP_VECTORS; iavf_send_pf_msg(adapter, VIRTCHNL_OP_CONFIG_IRQ_MAP, (u8 *)vimi, len); kfree(vimi); } /** * iavf_set_mac_addr_type - Set the correct request type from the filter type * @virtchnl_ether_addr: pointer to requested list element * @filter: pointer to requested filter **/ static void iavf_set_mac_addr_type(struct virtchnl_ether_addr *virtchnl_ether_addr, const struct iavf_mac_filter *filter) { virtchnl_ether_addr->type = filter->is_primary ? VIRTCHNL_ETHER_ADDR_PRIMARY : VIRTCHNL_ETHER_ADDR_EXTRA; } /** * iavf_add_ether_addrs * @adapter: adapter structure * * Request that the PF add one or more addresses to our filters. **/ void iavf_add_ether_addrs(struct iavf_adapter *adapter) { struct virtchnl_ether_addr_list *veal; struct iavf_mac_filter *f; int i = 0, count = 0; bool more = false; size_t len; if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { /* bail because we already have a command pending */ dev_err(&adapter->pdev->dev, "Cannot add filters, command %d pending\n", adapter->current_op); return; } spin_lock_bh(&adapter->mac_vlan_list_lock); list_for_each_entry(f, &adapter->mac_filter_list, list) { if (f->add) count++; } if (!count) { adapter->aq_required &= ~IAVF_FLAG_AQ_ADD_MAC_FILTER; spin_unlock_bh(&adapter->mac_vlan_list_lock); return; } adapter->current_op = VIRTCHNL_OP_ADD_ETH_ADDR; len = virtchnl_struct_size(veal, list, count); if (len > IAVF_MAX_AQ_BUF_SIZE) { dev_warn(&adapter->pdev->dev, "Too many add MAC changes in one request\n"); while (len > IAVF_MAX_AQ_BUF_SIZE) len = virtchnl_struct_size(veal, list, --count); more = true; } veal = kzalloc(len, GFP_ATOMIC); if (!veal) { spin_unlock_bh(&adapter->mac_vlan_list_lock); return; } veal->vsi_id = adapter->vsi_res->vsi_id; veal->num_elements = count; list_for_each_entry(f, &adapter->mac_filter_list, list) { if (f->add) { ether_addr_copy(veal->list[i].addr, f->macaddr); iavf_set_mac_addr_type(&veal->list[i], f); i++; f->add = false; if (i == count) break; } } if (!more) adapter->aq_required &= ~IAVF_FLAG_AQ_ADD_MAC_FILTER; spin_unlock_bh(&adapter->mac_vlan_list_lock); iavf_send_pf_msg(adapter, VIRTCHNL_OP_ADD_ETH_ADDR, (u8 *)veal, len); kfree(veal); } /** * iavf_del_ether_addrs * @adapter: adapter structure * * Request that the PF remove one or more addresses from our filters. **/ void iavf_del_ether_addrs(struct iavf_adapter *adapter) { struct virtchnl_ether_addr_list *veal; struct iavf_mac_filter *f, *ftmp; int i = 0, count = 0; bool more = false; size_t len; if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { /* bail because we already have a command pending */ dev_err(&adapter->pdev->dev, "Cannot remove filters, command %d pending\n", adapter->current_op); return; } spin_lock_bh(&adapter->mac_vlan_list_lock); list_for_each_entry(f, &adapter->mac_filter_list, list) { if (f->remove) count++; } if (!count) { adapter->aq_required &= ~IAVF_FLAG_AQ_DEL_MAC_FILTER; spin_unlock_bh(&adapter->mac_vlan_list_lock); return; } adapter->current_op = VIRTCHNL_OP_DEL_ETH_ADDR; len = virtchnl_struct_size(veal, list, count); if (len > IAVF_MAX_AQ_BUF_SIZE) { dev_warn(&adapter->pdev->dev, "Too many delete MAC changes in one request\n"); while (len > IAVF_MAX_AQ_BUF_SIZE) len = virtchnl_struct_size(veal, list, --count); more = true; } veal = kzalloc(len, GFP_ATOMIC); if (!veal) { spin_unlock_bh(&adapter->mac_vlan_list_lock); return; } veal->vsi_id = adapter->vsi_res->vsi_id; veal->num_elements = count; list_for_each_entry_safe(f, ftmp, &adapter->mac_filter_list, list) { if (f->remove) { ether_addr_copy(veal->list[i].addr, f->macaddr); iavf_set_mac_addr_type(&veal->list[i], f); i++; list_del(&f->list); kfree(f); if (i == count) break; } } if (!more) adapter->aq_required &= ~IAVF_FLAG_AQ_DEL_MAC_FILTER; spin_unlock_bh(&adapter->mac_vlan_list_lock); iavf_send_pf_msg(adapter, VIRTCHNL_OP_DEL_ETH_ADDR, (u8 *)veal, len); kfree(veal); } /** * iavf_mac_add_ok * @adapter: adapter structure * * Submit list of filters based on PF response. **/ static void iavf_mac_add_ok(struct iavf_adapter *adapter) { struct iavf_mac_filter *f, *ftmp; spin_lock_bh(&adapter->mac_vlan_list_lock); list_for_each_entry_safe(f, ftmp, &adapter->mac_filter_list, list) { f->is_new_mac = false; if (!f->add && !f->add_handled) f->add_handled = true; } spin_unlock_bh(&adapter->mac_vlan_list_lock); } /** * iavf_mac_add_reject * @adapter: adapter structure * * Remove filters from list based on PF response. **/ static void iavf_mac_add_reject(struct iavf_adapter *adapter) { struct net_device *netdev = adapter->netdev; struct iavf_mac_filter *f, *ftmp; spin_lock_bh(&adapter->mac_vlan_list_lock); list_for_each_entry_safe(f, ftmp, &adapter->mac_filter_list, list) { if (f->remove && ether_addr_equal(f->macaddr, netdev->dev_addr)) f->remove = false; if (!f->add && !f->add_handled) f->add_handled = true; if (f->is_new_mac) { list_del(&f->list); kfree(f); } } spin_unlock_bh(&adapter->mac_vlan_list_lock); } /** * iavf_vlan_add_reject * @adapter: adapter structure * * Remove VLAN filters from list based on PF response. **/ static void iavf_vlan_add_reject(struct iavf_adapter *adapter) { struct iavf_vlan_filter *f, *ftmp; spin_lock_bh(&adapter->mac_vlan_list_lock); list_for_each_entry_safe(f, ftmp, &adapter->vlan_filter_list, list) { if (f->state == IAVF_VLAN_IS_NEW) { list_del(&f->list); kfree(f); adapter->num_vlan_filters--; } } spin_unlock_bh(&adapter->mac_vlan_list_lock); } /** * iavf_add_vlans * @adapter: adapter structure * * Request that the PF add one or more VLAN filters to our VSI. **/ void iavf_add_vlans(struct iavf_adapter *adapter) { int len, i = 0, count = 0; struct iavf_vlan_filter *f; bool more = false; if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { /* bail because we already have a command pending */ dev_err(&adapter->pdev->dev, "Cannot add VLANs, command %d pending\n", adapter->current_op); return; } spin_lock_bh(&adapter->mac_vlan_list_lock); list_for_each_entry(f, &adapter->vlan_filter_list, list) { if (f->state == IAVF_VLAN_ADD) count++; } if (!count || !VLAN_FILTERING_ALLOWED(adapter)) { adapter->aq_required &= ~IAVF_FLAG_AQ_ADD_VLAN_FILTER; spin_unlock_bh(&adapter->mac_vlan_list_lock); return; } if (VLAN_ALLOWED(adapter)) { struct virtchnl_vlan_filter_list *vvfl; adapter->current_op = VIRTCHNL_OP_ADD_VLAN; len = virtchnl_struct_size(vvfl, vlan_id, count); if (len > IAVF_MAX_AQ_BUF_SIZE) { dev_warn(&adapter->pdev->dev, "Too many add VLAN changes in one request\n"); while (len > IAVF_MAX_AQ_BUF_SIZE) len = virtchnl_struct_size(vvfl, vlan_id, --count); more = true; } vvfl = kzalloc(len, GFP_ATOMIC); if (!vvfl) { spin_unlock_bh(&adapter->mac_vlan_list_lock); return; } vvfl->vsi_id = adapter->vsi_res->vsi_id; vvfl->num_elements = count; list_for_each_entry(f, &adapter->vlan_filter_list, list) { if (f->state == IAVF_VLAN_ADD) { vvfl->vlan_id[i] = f->vlan.vid; i++; f->state = IAVF_VLAN_IS_NEW; if (i == count) break; } } if (!more) adapter->aq_required &= ~IAVF_FLAG_AQ_ADD_VLAN_FILTER; spin_unlock_bh(&adapter->mac_vlan_list_lock); iavf_send_pf_msg(adapter, VIRTCHNL_OP_ADD_VLAN, (u8 *)vvfl, len); kfree(vvfl); } else { u16 max_vlans = adapter->vlan_v2_caps.filtering.max_filters; u16 current_vlans = iavf_get_num_vlans_added(adapter); struct virtchnl_vlan_filter_list_v2 *vvfl_v2; adapter->current_op = VIRTCHNL_OP_ADD_VLAN_V2; if ((count + current_vlans) > max_vlans && current_vlans < max_vlans) { count = max_vlans - iavf_get_num_vlans_added(adapter); more = true; } len = virtchnl_struct_size(vvfl_v2, filters, count); if (len > IAVF_MAX_AQ_BUF_SIZE) { dev_warn(&adapter->pdev->dev, "Too many add VLAN changes in one request\n"); while (len > IAVF_MAX_AQ_BUF_SIZE) len = virtchnl_struct_size(vvfl_v2, filters, --count); more = true; } vvfl_v2 = kzalloc(len, GFP_ATOMIC); if (!vvfl_v2) { spin_unlock_bh(&adapter->mac_vlan_list_lock); return; } vvfl_v2->vport_id = adapter->vsi_res->vsi_id; vvfl_v2->num_elements = count; list_for_each_entry(f, &adapter->vlan_filter_list, list) { if (f->state == IAVF_VLAN_ADD) { struct virtchnl_vlan_supported_caps *filtering_support = &adapter->vlan_v2_caps.filtering.filtering_support; struct virtchnl_vlan *vlan; if (i == count) break; /* give priority over outer if it's enabled */ if (filtering_support->outer) vlan = &vvfl_v2->filters[i].outer; else vlan = &vvfl_v2->filters[i].inner; vlan->tci = f->vlan.vid; vlan->tpid = f->vlan.tpid; i++; f->state = IAVF_VLAN_IS_NEW; } } if (!more) adapter->aq_required &= ~IAVF_FLAG_AQ_ADD_VLAN_FILTER; spin_unlock_bh(&adapter->mac_vlan_list_lock); iavf_send_pf_msg(adapter, VIRTCHNL_OP_ADD_VLAN_V2, (u8 *)vvfl_v2, len); kfree(vvfl_v2); } } /** * iavf_del_vlans * @adapter: adapter structure * * Request that the PF remove one or more VLAN filters from our VSI. **/ void iavf_del_vlans(struct iavf_adapter *adapter) { struct iavf_vlan_filter *f, *ftmp; int len, i = 0, count = 0; bool more = false; if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { /* bail because we already have a command pending */ dev_err(&adapter->pdev->dev, "Cannot remove VLANs, command %d pending\n", adapter->current_op); return; } spin_lock_bh(&adapter->mac_vlan_list_lock); list_for_each_entry_safe(f, ftmp, &adapter->vlan_filter_list, list) { /* since VLAN capabilities are not allowed, we dont want to send * a VLAN delete request because it will most likely fail and * create unnecessary errors/noise, so just free the VLAN * filters marked for removal to enable bailing out before * sending a virtchnl message */ if (f->state == IAVF_VLAN_REMOVE && !VLAN_FILTERING_ALLOWED(adapter)) { list_del(&f->list); kfree(f); adapter->num_vlan_filters--; } else if (f->state == IAVF_VLAN_DISABLE && !VLAN_FILTERING_ALLOWED(adapter)) { f->state = IAVF_VLAN_INACTIVE; } else if (f->state == IAVF_VLAN_REMOVE || f->state == IAVF_VLAN_DISABLE) { count++; } } if (!count || !VLAN_FILTERING_ALLOWED(adapter)) { adapter->aq_required &= ~IAVF_FLAG_AQ_DEL_VLAN_FILTER; spin_unlock_bh(&adapter->mac_vlan_list_lock); return; } if (VLAN_ALLOWED(adapter)) { struct virtchnl_vlan_filter_list *vvfl; adapter->current_op = VIRTCHNL_OP_DEL_VLAN; len = virtchnl_struct_size(vvfl, vlan_id, count); if (len > IAVF_MAX_AQ_BUF_SIZE) { dev_warn(&adapter->pdev->dev, "Too many delete VLAN changes in one request\n"); while (len > IAVF_MAX_AQ_BUF_SIZE) len = virtchnl_struct_size(vvfl, vlan_id, --count); more = true; } vvfl = kzalloc(len, GFP_ATOMIC); if (!vvfl) { spin_unlock_bh(&adapter->mac_vlan_list_lock); return; } vvfl->vsi_id = adapter->vsi_res->vsi_id; vvfl->num_elements = count; list_for_each_entry_safe(f, ftmp, &adapter->vlan_filter_list, list) { if (f->state == IAVF_VLAN_DISABLE) { vvfl->vlan_id[i] = f->vlan.vid; f->state = IAVF_VLAN_INACTIVE; i++; if (i == count) break; } else if (f->state == IAVF_VLAN_REMOVE) { vvfl->vlan_id[i] = f->vlan.vid; list_del(&f->list); kfree(f); adapter->num_vlan_filters--; i++; if (i == count) break; } } if (!more) adapter->aq_required &= ~IAVF_FLAG_AQ_DEL_VLAN_FILTER; spin_unlock_bh(&adapter->mac_vlan_list_lock); iavf_send_pf_msg(adapter, VIRTCHNL_OP_DEL_VLAN, (u8 *)vvfl, len); kfree(vvfl); } else { struct virtchnl_vlan_filter_list_v2 *vvfl_v2; adapter->current_op = VIRTCHNL_OP_DEL_VLAN_V2; len = virtchnl_struct_size(vvfl_v2, filters, count); if (len > IAVF_MAX_AQ_BUF_SIZE) { dev_warn(&adapter->pdev->dev, "Too many add VLAN changes in one request\n"); while (len > IAVF_MAX_AQ_BUF_SIZE) len = virtchnl_struct_size(vvfl_v2, filters, --count); more = true; } vvfl_v2 = kzalloc(len, GFP_ATOMIC); if (!vvfl_v2) { spin_unlock_bh(&adapter->mac_vlan_list_lock); return; } vvfl_v2->vport_id = adapter->vsi_res->vsi_id; vvfl_v2->num_elements = count; list_for_each_entry_safe(f, ftmp, &adapter->vlan_filter_list, list) { if (f->state == IAVF_VLAN_DISABLE || f->state == IAVF_VLAN_REMOVE) { struct virtchnl_vlan_supported_caps *filtering_support = &adapter->vlan_v2_caps.filtering.filtering_support; struct virtchnl_vlan *vlan; /* give priority over outer if it's enabled */ if (filtering_support->outer) vlan = &vvfl_v2->filters[i].outer; else vlan = &vvfl_v2->filters[i].inner; vlan->tci = f->vlan.vid; vlan->tpid = f->vlan.tpid; if (f->state == IAVF_VLAN_DISABLE) { f->state = IAVF_VLAN_INACTIVE; } else { list_del(&f->list); kfree(f); adapter->num_vlan_filters--; } i++; if (i == count) break; } } if (!more) adapter->aq_required &= ~IAVF_FLAG_AQ_DEL_VLAN_FILTER; spin_unlock_bh(&adapter->mac_vlan_list_lock); iavf_send_pf_msg(adapter, VIRTCHNL_OP_DEL_VLAN_V2, (u8 *)vvfl_v2, len); kfree(vvfl_v2); } } /** * iavf_set_promiscuous * @adapter: adapter structure * @flags: bitmask to control unicast/multicast promiscuous. * * Request that the PF enable promiscuous mode for our VSI. **/ void iavf_set_promiscuous(struct iavf_adapter *adapter, int flags) { struct virtchnl_promisc_info vpi; int promisc_all; if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { /* bail because we already have a command pending */ dev_err(&adapter->pdev->dev, "Cannot set promiscuous mode, command %d pending\n", adapter->current_op); return; } promisc_all = FLAG_VF_UNICAST_PROMISC | FLAG_VF_MULTICAST_PROMISC; if ((flags & promisc_all) == promisc_all) { adapter->flags |= IAVF_FLAG_PROMISC_ON; adapter->aq_required &= ~IAVF_FLAG_AQ_REQUEST_PROMISC; dev_info(&adapter->pdev->dev, "Entering promiscuous mode\n"); } if (flags & FLAG_VF_MULTICAST_PROMISC) { adapter->flags |= IAVF_FLAG_ALLMULTI_ON; adapter->aq_required &= ~IAVF_FLAG_AQ_REQUEST_ALLMULTI; dev_info(&adapter->pdev->dev, "%s is entering multicast promiscuous mode\n", adapter->netdev->name); } if (!flags) { if (adapter->flags & IAVF_FLAG_PROMISC_ON) { adapter->flags &= ~IAVF_FLAG_PROMISC_ON; adapter->aq_required &= ~IAVF_FLAG_AQ_RELEASE_PROMISC; dev_info(&adapter->pdev->dev, "Leaving promiscuous mode\n"); } if (adapter->flags & IAVF_FLAG_ALLMULTI_ON) { adapter->flags &= ~IAVF_FLAG_ALLMULTI_ON; adapter->aq_required &= ~IAVF_FLAG_AQ_RELEASE_ALLMULTI; dev_info(&adapter->pdev->dev, "%s is leaving multicast promiscuous mode\n", adapter->netdev->name); } } adapter->current_op = VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE; vpi.vsi_id = adapter->vsi_res->vsi_id; vpi.flags = flags; iavf_send_pf_msg(adapter, VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE, (u8 *)&vpi, sizeof(vpi)); } /** * iavf_request_stats * @adapter: adapter structure * * Request VSI statistics from PF. **/ void iavf_request_stats(struct iavf_adapter *adapter) { struct virtchnl_queue_select vqs; if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { /* no error message, this isn't crucial */ return; } adapter->aq_required &= ~IAVF_FLAG_AQ_REQUEST_STATS; adapter->current_op = VIRTCHNL_OP_GET_STATS; vqs.vsi_id = adapter->vsi_res->vsi_id; /* queue maps are ignored for this message - only the vsi is used */ if (iavf_send_pf_msg(adapter, VIRTCHNL_OP_GET_STATS, (u8 *)&vqs, sizeof(vqs))) /* if the request failed, don't lock out others */ adapter->current_op = VIRTCHNL_OP_UNKNOWN; } /** * iavf_get_hena * @adapter: adapter structure * * Request hash enable capabilities from PF **/ void iavf_get_hena(struct iavf_adapter *adapter) { if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { /* bail because we already have a command pending */ dev_err(&adapter->pdev->dev, "Cannot get RSS hash capabilities, command %d pending\n", adapter->current_op); return; } adapter->current_op = VIRTCHNL_OP_GET_RSS_HENA_CAPS; adapter->aq_required &= ~IAVF_FLAG_AQ_GET_HENA; iavf_send_pf_msg(adapter, VIRTCHNL_OP_GET_RSS_HENA_CAPS, NULL, 0); } /** * iavf_set_hena * @adapter: adapter structure * * Request the PF to set our RSS hash capabilities **/ void iavf_set_hena(struct iavf_adapter *adapter) { struct virtchnl_rss_hena vrh; if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { /* bail because we already have a command pending */ dev_err(&adapter->pdev->dev, "Cannot set RSS hash enable, command %d pending\n", adapter->current_op); return; } vrh.hena = adapter->hena; adapter->current_op = VIRTCHNL_OP_SET_RSS_HENA; adapter->aq_required &= ~IAVF_FLAG_AQ_SET_HENA; iavf_send_pf_msg(adapter, VIRTCHNL_OP_SET_RSS_HENA, (u8 *)&vrh, sizeof(vrh)); } /** * iavf_set_rss_key * @adapter: adapter structure * * Request the PF to set our RSS hash key **/ void iavf_set_rss_key(struct iavf_adapter *adapter) { struct virtchnl_rss_key *vrk; int len; if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { /* bail because we already have a command pending */ dev_err(&adapter->pdev->dev, "Cannot set RSS key, command %d pending\n", adapter->current_op); return; } len = virtchnl_struct_size(vrk, key, adapter->rss_key_size); vrk = kzalloc(len, GFP_KERNEL); if (!vrk) return; vrk->vsi_id = adapter->vsi.id; vrk->key_len = adapter->rss_key_size; memcpy(vrk->key, adapter->rss_key, adapter->rss_key_size); adapter->current_op = VIRTCHNL_OP_CONFIG_RSS_KEY; adapter->aq_required &= ~IAVF_FLAG_AQ_SET_RSS_KEY; iavf_send_pf_msg(adapter, VIRTCHNL_OP_CONFIG_RSS_KEY, (u8 *)vrk, len); kfree(vrk); } /** * iavf_set_rss_lut * @adapter: adapter structure * * Request the PF to set our RSS lookup table **/ void iavf_set_rss_lut(struct iavf_adapter *adapter) { struct virtchnl_rss_lut *vrl; int len; if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { /* bail because we already have a command pending */ dev_err(&adapter->pdev->dev, "Cannot set RSS LUT, command %d pending\n", adapter->current_op); return; } len = virtchnl_struct_size(vrl, lut, adapter->rss_lut_size); vrl = kzalloc(len, GFP_KERNEL); if (!vrl) return; vrl->vsi_id = adapter->vsi.id; vrl->lut_entries = adapter->rss_lut_size; memcpy(vrl->lut, adapter->rss_lut, adapter->rss_lut_size); adapter->current_op = VIRTCHNL_OP_CONFIG_RSS_LUT; adapter->aq_required &= ~IAVF_FLAG_AQ_SET_RSS_LUT; iavf_send_pf_msg(adapter, VIRTCHNL_OP_CONFIG_RSS_LUT, (u8 *)vrl, len); kfree(vrl); } /** * iavf_enable_vlan_stripping * @adapter: adapter structure * * Request VLAN header stripping to be enabled **/ void iavf_enable_vlan_stripping(struct iavf_adapter *adapter) { if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { /* bail because we already have a command pending */ dev_err(&adapter->pdev->dev, "Cannot enable stripping, command %d pending\n", adapter->current_op); return; } adapter->current_op = VIRTCHNL_OP_ENABLE_VLAN_STRIPPING; adapter->aq_required &= ~IAVF_FLAG_AQ_ENABLE_VLAN_STRIPPING; iavf_send_pf_msg(adapter, VIRTCHNL_OP_ENABLE_VLAN_STRIPPING, NULL, 0); } /** * iavf_disable_vlan_stripping * @adapter: adapter structure * * Request VLAN header stripping to be disabled **/ void iavf_disable_vlan_stripping(struct iavf_adapter *adapter) { if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { /* bail because we already have a command pending */ dev_err(&adapter->pdev->dev, "Cannot disable stripping, command %d pending\n", adapter->current_op); return; } adapter->current_op = VIRTCHNL_OP_DISABLE_VLAN_STRIPPING; adapter->aq_required &= ~IAVF_FLAG_AQ_DISABLE_VLAN_STRIPPING; iavf_send_pf_msg(adapter, VIRTCHNL_OP_DISABLE_VLAN_STRIPPING, NULL, 0); } /** * iavf_tpid_to_vc_ethertype - transform from VLAN TPID to virtchnl ethertype * @tpid: VLAN TPID (i.e. 0x8100, 0x88a8, etc.) */ static u32 iavf_tpid_to_vc_ethertype(u16 tpid) { switch (tpid) { case ETH_P_8021Q: return VIRTCHNL_VLAN_ETHERTYPE_8100; case ETH_P_8021AD: return VIRTCHNL_VLAN_ETHERTYPE_88A8; } return 0; } /** * iavf_set_vc_offload_ethertype - set virtchnl ethertype for offload message * @adapter: adapter structure * @msg: message structure used for updating offloads over virtchnl to update * @tpid: VLAN TPID (i.e. 0x8100, 0x88a8, etc.) * @offload_op: opcode used to determine which support structure to check */ static int iavf_set_vc_offload_ethertype(struct iavf_adapter *adapter, struct virtchnl_vlan_setting *msg, u16 tpid, enum virtchnl_ops offload_op) { struct virtchnl_vlan_supported_caps *offload_support; u16 vc_ethertype = iavf_tpid_to_vc_ethertype(tpid); /* reference the correct offload support structure */ switch (offload_op) { case VIRTCHNL_OP_ENABLE_VLAN_STRIPPING_V2: case VIRTCHNL_OP_DISABLE_VLAN_STRIPPING_V2: offload_support = &adapter->vlan_v2_caps.offloads.stripping_support; break; case VIRTCHNL_OP_ENABLE_VLAN_INSERTION_V2: case VIRTCHNL_OP_DISABLE_VLAN_INSERTION_V2: offload_support = &adapter->vlan_v2_caps.offloads.insertion_support; break; default: dev_err(&adapter->pdev->dev, "Invalid opcode %d for setting virtchnl ethertype to enable/disable VLAN offloads\n", offload_op); return -EINVAL; } /* make sure ethertype is supported */ if (offload_support->outer & vc_ethertype && offload_support->outer & VIRTCHNL_VLAN_TOGGLE) { msg->outer_ethertype_setting = vc_ethertype; } else if (offload_support->inner & vc_ethertype && offload_support->inner & VIRTCHNL_VLAN_TOGGLE) { msg->inner_ethertype_setting = vc_ethertype; } else { dev_dbg(&adapter->pdev->dev, "opcode %d unsupported for VLAN TPID 0x%04x\n", offload_op, tpid); return -EINVAL; } return 0; } /** * iavf_clear_offload_v2_aq_required - clear AQ required bit for offload request * @adapter: adapter structure * @tpid: VLAN TPID * @offload_op: opcode used to determine which AQ required bit to clear */ static void iavf_clear_offload_v2_aq_required(struct iavf_adapter *adapter, u16 tpid, enum virtchnl_ops offload_op) { switch (offload_op) { case VIRTCHNL_OP_ENABLE_VLAN_STRIPPING_V2: if (tpid == ETH_P_8021Q) adapter->aq_required &= ~IAVF_FLAG_AQ_ENABLE_CTAG_VLAN_STRIPPING; else if (tpid == ETH_P_8021AD) adapter->aq_required &= ~IAVF_FLAG_AQ_ENABLE_STAG_VLAN_STRIPPING; break; case VIRTCHNL_OP_DISABLE_VLAN_STRIPPING_V2: if (tpid == ETH_P_8021Q) adapter->aq_required &= ~IAVF_FLAG_AQ_DISABLE_CTAG_VLAN_STRIPPING; else if (tpid == ETH_P_8021AD) adapter->aq_required &= ~IAVF_FLAG_AQ_DISABLE_STAG_VLAN_STRIPPING; break; case VIRTCHNL_OP_ENABLE_VLAN_INSERTION_V2: if (tpid == ETH_P_8021Q) adapter->aq_required &= ~IAVF_FLAG_AQ_ENABLE_CTAG_VLAN_INSERTION; else if (tpid == ETH_P_8021AD) adapter->aq_required &= ~IAVF_FLAG_AQ_ENABLE_STAG_VLAN_INSERTION; break; case VIRTCHNL_OP_DISABLE_VLAN_INSERTION_V2: if (tpid == ETH_P_8021Q) adapter->aq_required &= ~IAVF_FLAG_AQ_DISABLE_CTAG_VLAN_INSERTION; else if (tpid == ETH_P_8021AD) adapter->aq_required &= ~IAVF_FLAG_AQ_DISABLE_STAG_VLAN_INSERTION; break; default: dev_err(&adapter->pdev->dev, "Unsupported opcode %d specified for clearing aq_required bits for VIRTCHNL_VF_OFFLOAD_VLAN_V2 offload request\n", offload_op); } } /** * iavf_send_vlan_offload_v2 - send offload enable/disable over virtchnl * @adapter: adapter structure * @tpid: VLAN TPID used for the command (i.e. 0x8100 or 0x88a8) * @offload_op: offload_op used to make the request over virtchnl */ static void iavf_send_vlan_offload_v2(struct iavf_adapter *adapter, u16 tpid, enum virtchnl_ops offload_op) { struct virtchnl_vlan_setting *msg; int len = sizeof(*msg); if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { /* bail because we already have a command pending */ dev_err(&adapter->pdev->dev, "Cannot send %d, command %d pending\n", offload_op, adapter->current_op); return; } adapter->current_op = offload_op; msg = kzalloc(len, GFP_KERNEL); if (!msg) return; msg->vport_id = adapter->vsi_res->vsi_id; /* always clear to prevent unsupported and endless requests */ iavf_clear_offload_v2_aq_required(adapter, tpid, offload_op); /* only send valid offload requests */ if (!iavf_set_vc_offload_ethertype(adapter, msg, tpid, offload_op)) iavf_send_pf_msg(adapter, offload_op, (u8 *)msg, len); else adapter->current_op = VIRTCHNL_OP_UNKNOWN; kfree(msg); } /** * iavf_enable_vlan_stripping_v2 - enable VLAN stripping * @adapter: adapter structure * @tpid: VLAN TPID used to enable VLAN stripping */ void iavf_enable_vlan_stripping_v2(struct iavf_adapter *adapter, u16 tpid) { iavf_send_vlan_offload_v2(adapter, tpid, VIRTCHNL_OP_ENABLE_VLAN_STRIPPING_V2); } /** * iavf_disable_vlan_stripping_v2 - disable VLAN stripping * @adapter: adapter structure * @tpid: VLAN TPID used to disable VLAN stripping */ void iavf_disable_vlan_stripping_v2(struct iavf_adapter *adapter, u16 tpid) { iavf_send_vlan_offload_v2(adapter, tpid, VIRTCHNL_OP_DISABLE_VLAN_STRIPPING_V2); } /** * iavf_enable_vlan_insertion_v2 - enable VLAN insertion * @adapter: adapter structure * @tpid: VLAN TPID used to enable VLAN insertion */ void iavf_enable_vlan_insertion_v2(struct iavf_adapter *adapter, u16 tpid) { iavf_send_vlan_offload_v2(adapter, tpid, VIRTCHNL_OP_ENABLE_VLAN_INSERTION_V2); } /** * iavf_disable_vlan_insertion_v2 - disable VLAN insertion * @adapter: adapter structure * @tpid: VLAN TPID used to disable VLAN insertion */ void iavf_disable_vlan_insertion_v2(struct iavf_adapter *adapter, u16 tpid) { iavf_send_vlan_offload_v2(adapter, tpid, VIRTCHNL_OP_DISABLE_VLAN_INSERTION_V2); } #define IAVF_MAX_SPEED_STRLEN 13 /** * iavf_print_link_message - print link up or down * @adapter: adapter structure * * Log a message telling the world of our wonderous link status */ static void iavf_print_link_message(struct iavf_adapter *adapter) { struct net_device *netdev = adapter->netdev; int link_speed_mbps; char *speed; if (!adapter->link_up) { netdev_info(netdev, "NIC Link is Down\n"); return; } speed = kzalloc(IAVF_MAX_SPEED_STRLEN, GFP_KERNEL); if (!speed) return; if (ADV_LINK_SUPPORT(adapter)) { link_speed_mbps = adapter->link_speed_mbps; goto print_link_msg; } switch (adapter->link_speed) { case VIRTCHNL_LINK_SPEED_40GB: link_speed_mbps = SPEED_40000; break; case VIRTCHNL_LINK_SPEED_25GB: link_speed_mbps = SPEED_25000; break; case VIRTCHNL_LINK_SPEED_20GB: link_speed_mbps = SPEED_20000; break; case VIRTCHNL_LINK_SPEED_10GB: link_speed_mbps = SPEED_10000; break; case VIRTCHNL_LINK_SPEED_5GB: link_speed_mbps = SPEED_5000; break; case VIRTCHNL_LINK_SPEED_2_5GB: link_speed_mbps = SPEED_2500; break; case VIRTCHNL_LINK_SPEED_1GB: link_speed_mbps = SPEED_1000; break; case VIRTCHNL_LINK_SPEED_100MB: link_speed_mbps = SPEED_100; break; default: link_speed_mbps = SPEED_UNKNOWN; break; } print_link_msg: if (link_speed_mbps > SPEED_1000) { if (link_speed_mbps == SPEED_2500) snprintf(speed, IAVF_MAX_SPEED_STRLEN, "2.5 Gbps"); else /* convert to Gbps inline */ snprintf(speed, IAVF_MAX_SPEED_STRLEN, "%d %s", link_speed_mbps / 1000, "Gbps"); } else if (link_speed_mbps == SPEED_UNKNOWN) { snprintf(speed, IAVF_MAX_SPEED_STRLEN, "%s", "Unknown Mbps"); } else { snprintf(speed, IAVF_MAX_SPEED_STRLEN, "%d %s", link_speed_mbps, "Mbps"); } netdev_info(netdev, "NIC Link is Up Speed is %s Full Duplex\n", speed); kfree(speed); } /** * iavf_get_vpe_link_status * @adapter: adapter structure * @vpe: virtchnl_pf_event structure * * Helper function for determining the link status **/ static bool iavf_get_vpe_link_status(struct iavf_adapter *adapter, struct virtchnl_pf_event *vpe) { if (ADV_LINK_SUPPORT(adapter)) return vpe->event_data.link_event_adv.link_status; else return vpe->event_data.link_event.link_status; } /** * iavf_set_adapter_link_speed_from_vpe * @adapter: adapter structure for which we are setting the link speed * @vpe: virtchnl_pf_event structure that contains the link speed we are setting * * Helper function for setting iavf_adapter link speed **/ static void iavf_set_adapter_link_speed_from_vpe(struct iavf_adapter *adapter, struct virtchnl_pf_event *vpe) { if (ADV_LINK_SUPPORT(adapter)) adapter->link_speed_mbps = vpe->event_data.link_event_adv.link_speed; else adapter->link_speed = vpe->event_data.link_event.link_speed; } /** * iavf_enable_channels * @adapter: adapter structure * * Request that the PF enable channels as specified by * the user via tc tool. **/ void iavf_enable_channels(struct iavf_adapter *adapter) { struct virtchnl_tc_info *vti = NULL; size_t len; int i; if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { /* bail because we already have a command pending */ dev_err(&adapter->pdev->dev, "Cannot configure mqprio, command %d pending\n", adapter->current_op); return; } len = virtchnl_struct_size(vti, list, adapter->num_tc); vti = kzalloc(len, GFP_KERNEL); if (!vti) return; vti->num_tc = adapter->num_tc; for (i = 0; i < vti->num_tc; i++) { vti->list[i].count = adapter->ch_config.ch_info[i].count; vti->list[i].offset = adapter->ch_config.ch_info[i].offset; vti->list[i].pad = 0; vti->list[i].max_tx_rate = adapter->ch_config.ch_info[i].max_tx_rate; } adapter->ch_config.state = __IAVF_TC_RUNNING; adapter->flags |= IAVF_FLAG_REINIT_ITR_NEEDED; adapter->current_op = VIRTCHNL_OP_ENABLE_CHANNELS; adapter->aq_required &= ~IAVF_FLAG_AQ_ENABLE_CHANNELS; iavf_send_pf_msg(adapter, VIRTCHNL_OP_ENABLE_CHANNELS, (u8 *)vti, len); kfree(vti); } /** * iavf_disable_channels * @adapter: adapter structure * * Request that the PF disable channels that are configured **/ void iavf_disable_channels(struct iavf_adapter *adapter) { if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { /* bail because we already have a command pending */ dev_err(&adapter->pdev->dev, "Cannot configure mqprio, command %d pending\n", adapter->current_op); return; } adapter->ch_config.state = __IAVF_TC_INVALID; adapter->flags |= IAVF_FLAG_REINIT_ITR_NEEDED; adapter->current_op = VIRTCHNL_OP_DISABLE_CHANNELS; adapter->aq_required &= ~IAVF_FLAG_AQ_DISABLE_CHANNELS; iavf_send_pf_msg(adapter, VIRTCHNL_OP_DISABLE_CHANNELS, NULL, 0); } /** * iavf_print_cloud_filter * @adapter: adapter structure * @f: cloud filter to print * * Print the cloud filter **/ static void iavf_print_cloud_filter(struct iavf_adapter *adapter, struct virtchnl_filter *f) { switch (f->flow_type) { case VIRTCHNL_TCP_V4_FLOW: dev_info(&adapter->pdev->dev, "dst_mac: %pM src_mac: %pM vlan_id: %hu dst_ip: %pI4 src_ip %pI4 dst_port %hu src_port %hu\n", &f->data.tcp_spec.dst_mac, &f->data.tcp_spec.src_mac, ntohs(f->data.tcp_spec.vlan_id), &f->data.tcp_spec.dst_ip[0], &f->data.tcp_spec.src_ip[0], ntohs(f->data.tcp_spec.dst_port), ntohs(f->data.tcp_spec.src_port)); break; case VIRTCHNL_TCP_V6_FLOW: dev_info(&adapter->pdev->dev, "dst_mac: %pM src_mac: %pM vlan_id: %hu dst_ip: %pI6 src_ip %pI6 dst_port %hu src_port %hu\n", &f->data.tcp_spec.dst_mac, &f->data.tcp_spec.src_mac, ntohs(f->data.tcp_spec.vlan_id), &f->data.tcp_spec.dst_ip, &f->data.tcp_spec.src_ip, ntohs(f->data.tcp_spec.dst_port), ntohs(f->data.tcp_spec.src_port)); break; } } /** * iavf_add_cloud_filter * @adapter: adapter structure * * Request that the PF add cloud filters as specified * by the user via tc tool. **/ void iavf_add_cloud_filter(struct iavf_adapter *adapter) { struct iavf_cloud_filter *cf; struct virtchnl_filter *f; int len = 0, count = 0; if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { /* bail because we already have a command pending */ dev_err(&adapter->pdev->dev, "Cannot add cloud filter, command %d pending\n", adapter->current_op); return; } list_for_each_entry(cf, &adapter->cloud_filter_list, list) { if (cf->add) { count++; break; } } if (!count) { adapter->aq_required &= ~IAVF_FLAG_AQ_ADD_CLOUD_FILTER; return; } adapter->current_op = VIRTCHNL_OP_ADD_CLOUD_FILTER; len = sizeof(struct virtchnl_filter); f = kzalloc(len, GFP_KERNEL); if (!f) return; list_for_each_entry(cf, &adapter->cloud_filter_list, list) { if (cf->add) { memcpy(f, &cf->f, sizeof(struct virtchnl_filter)); cf->add = false; cf->state = __IAVF_CF_ADD_PENDING; iavf_send_pf_msg(adapter, VIRTCHNL_OP_ADD_CLOUD_FILTER, (u8 *)f, len); } } kfree(f); } /** * iavf_del_cloud_filter * @adapter: adapter structure * * Request that the PF delete cloud filters as specified * by the user via tc tool. **/ void iavf_del_cloud_filter(struct iavf_adapter *adapter) { struct iavf_cloud_filter *cf, *cftmp; struct virtchnl_filter *f; int len = 0, count = 0; if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { /* bail because we already have a command pending */ dev_err(&adapter->pdev->dev, "Cannot remove cloud filter, command %d pending\n", adapter->current_op); return; } list_for_each_entry(cf, &adapter->cloud_filter_list, list) { if (cf->del) { count++; break; } } if (!count) { adapter->aq_required &= ~IAVF_FLAG_AQ_DEL_CLOUD_FILTER; return; } adapter->current_op = VIRTCHNL_OP_DEL_CLOUD_FILTER; len = sizeof(struct virtchnl_filter); f = kzalloc(len, GFP_KERNEL); if (!f) return; list_for_each_entry_safe(cf, cftmp, &adapter->cloud_filter_list, list) { if (cf->del) { memcpy(f, &cf->f, sizeof(struct virtchnl_filter)); cf->del = false; cf->state = __IAVF_CF_DEL_PENDING; iavf_send_pf_msg(adapter, VIRTCHNL_OP_DEL_CLOUD_FILTER, (u8 *)f, len); } } kfree(f); } /** * iavf_add_fdir_filter * @adapter: the VF adapter structure * * Request that the PF add Flow Director filters as specified * by the user via ethtool. **/ void iavf_add_fdir_filter(struct iavf_adapter *adapter) { struct iavf_fdir_fltr *fdir; struct virtchnl_fdir_add *f; bool process_fltr = false; int len; if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { /* bail because we already have a command pending */ dev_err(&adapter->pdev->dev, "Cannot add Flow Director filter, command %d pending\n", adapter->current_op); return; } len = sizeof(struct virtchnl_fdir_add); f = kzalloc(len, GFP_KERNEL); if (!f) return; spin_lock_bh(&adapter->fdir_fltr_lock); list_for_each_entry(fdir, &adapter->fdir_list_head, list) { if (fdir->state == IAVF_FDIR_FLTR_ADD_REQUEST) { process_fltr = true; fdir->state = IAVF_FDIR_FLTR_ADD_PENDING; memcpy(f, &fdir->vc_add_msg, len); break; } } spin_unlock_bh(&adapter->fdir_fltr_lock); if (!process_fltr) { /* prevent iavf_add_fdir_filter() from being called when there * are no filters to add */ adapter->aq_required &= ~IAVF_FLAG_AQ_ADD_FDIR_FILTER; kfree(f); return; } adapter->current_op = VIRTCHNL_OP_ADD_FDIR_FILTER; iavf_send_pf_msg(adapter, VIRTCHNL_OP_ADD_FDIR_FILTER, (u8 *)f, len); kfree(f); } /** * iavf_del_fdir_filter * @adapter: the VF adapter structure * * Request that the PF delete Flow Director filters as specified * by the user via ethtool. **/ void iavf_del_fdir_filter(struct iavf_adapter *adapter) { struct iavf_fdir_fltr *fdir; struct virtchnl_fdir_del f; bool process_fltr = false; int len; if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { /* bail because we already have a command pending */ dev_err(&adapter->pdev->dev, "Cannot remove Flow Director filter, command %d pending\n", adapter->current_op); return; } len = sizeof(struct virtchnl_fdir_del); spin_lock_bh(&adapter->fdir_fltr_lock); list_for_each_entry(fdir, &adapter->fdir_list_head, list) { if (fdir->state == IAVF_FDIR_FLTR_DEL_REQUEST) { process_fltr = true; memset(&f, 0, len); f.vsi_id = fdir->vc_add_msg.vsi_id; f.flow_id = fdir->flow_id; fdir->state = IAVF_FDIR_FLTR_DEL_PENDING; break; } } spin_unlock_bh(&adapter->fdir_fltr_lock); if (!process_fltr) { adapter->aq_required &= ~IAVF_FLAG_AQ_DEL_FDIR_FILTER; return; } adapter->current_op = VIRTCHNL_OP_DEL_FDIR_FILTER; iavf_send_pf_msg(adapter, VIRTCHNL_OP_DEL_FDIR_FILTER, (u8 *)&f, len); } /** * iavf_add_adv_rss_cfg * @adapter: the VF adapter structure * * Request that the PF add RSS configuration as specified * by the user via ethtool. **/ void iavf_add_adv_rss_cfg(struct iavf_adapter *adapter) { struct virtchnl_rss_cfg *rss_cfg; struct iavf_adv_rss *rss; bool process_rss = false; int len; if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { /* bail because we already have a command pending */ dev_err(&adapter->pdev->dev, "Cannot add RSS configuration, command %d pending\n", adapter->current_op); return; } len = sizeof(struct virtchnl_rss_cfg); rss_cfg = kzalloc(len, GFP_KERNEL); if (!rss_cfg) return; spin_lock_bh(&adapter->adv_rss_lock); list_for_each_entry(rss, &adapter->adv_rss_list_head, list) { if (rss->state == IAVF_ADV_RSS_ADD_REQUEST) { process_rss = true; rss->state = IAVF_ADV_RSS_ADD_PENDING; memcpy(rss_cfg, &rss->cfg_msg, len); iavf_print_adv_rss_cfg(adapter, rss, "Input set change for", "is pending"); break; } } spin_unlock_bh(&adapter->adv_rss_lock); if (process_rss) { adapter->current_op = VIRTCHNL_OP_ADD_RSS_CFG; iavf_send_pf_msg(adapter, VIRTCHNL_OP_ADD_RSS_CFG, (u8 *)rss_cfg, len); } else { adapter->aq_required &= ~IAVF_FLAG_AQ_ADD_ADV_RSS_CFG; } kfree(rss_cfg); } /** * iavf_del_adv_rss_cfg * @adapter: the VF adapter structure * * Request that the PF delete RSS configuration as specified * by the user via ethtool. **/ void iavf_del_adv_rss_cfg(struct iavf_adapter *adapter) { struct virtchnl_rss_cfg *rss_cfg; struct iavf_adv_rss *rss; bool process_rss = false; int len; if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { /* bail because we already have a command pending */ dev_err(&adapter->pdev->dev, "Cannot remove RSS configuration, command %d pending\n", adapter->current_op); return; } len = sizeof(struct virtchnl_rss_cfg); rss_cfg = kzalloc(len, GFP_KERNEL); if (!rss_cfg) return; spin_lock_bh(&adapter->adv_rss_lock); list_for_each_entry(rss, &adapter->adv_rss_list_head, list) { if (rss->state == IAVF_ADV_RSS_DEL_REQUEST) { process_rss = true; rss->state = IAVF_ADV_RSS_DEL_PENDING; memcpy(rss_cfg, &rss->cfg_msg, len); break; } } spin_unlock_bh(&adapter->adv_rss_lock); if (process_rss) { adapter->current_op = VIRTCHNL_OP_DEL_RSS_CFG; iavf_send_pf_msg(adapter, VIRTCHNL_OP_DEL_RSS_CFG, (u8 *)rss_cfg, len); } else { adapter->aq_required &= ~IAVF_FLAG_AQ_DEL_ADV_RSS_CFG; } kfree(rss_cfg); } /** * iavf_request_reset * @adapter: adapter structure * * Request that the PF reset this VF. No response is expected. **/ int iavf_request_reset(struct iavf_adapter *adapter) { int err; /* Don't check CURRENT_OP - this is always higher priority */ err = iavf_send_pf_msg(adapter, VIRTCHNL_OP_RESET_VF, NULL, 0); adapter->current_op = VIRTCHNL_OP_UNKNOWN; return err; } /** * iavf_netdev_features_vlan_strip_set - update vlan strip status * @netdev: ptr to netdev being adjusted * @enable: enable or disable vlan strip * * Helper function to change vlan strip status in netdev->features. */ static void iavf_netdev_features_vlan_strip_set(struct net_device *netdev, const bool enable) { if (enable) netdev->features |= NETIF_F_HW_VLAN_CTAG_RX; else netdev->features &= ~NETIF_F_HW_VLAN_CTAG_RX; } /** * iavf_virtchnl_completion * @adapter: adapter structure * @v_opcode: opcode sent by PF * @v_retval: retval sent by PF * @msg: message sent by PF * @msglen: message length * * Asynchronous completion function for admin queue messages. Rather than busy * wait, we fire off our requests and assume that no errors will be returned. * This function handles the reply messages. **/ void iavf_virtchnl_completion(struct iavf_adapter *adapter, enum virtchnl_ops v_opcode, enum iavf_status v_retval, u8 *msg, u16 msglen) { struct net_device *netdev = adapter->netdev; if (v_opcode == VIRTCHNL_OP_EVENT) { struct virtchnl_pf_event *vpe = (struct virtchnl_pf_event *)msg; bool link_up = iavf_get_vpe_link_status(adapter, vpe); switch (vpe->event) { case VIRTCHNL_EVENT_LINK_CHANGE: iavf_set_adapter_link_speed_from_vpe(adapter, vpe); /* we've already got the right link status, bail */ if (adapter->link_up == link_up) break; if (link_up) { /* If we get link up message and start queues * before our queues are configured it will * trigger a TX hang. In that case, just ignore * the link status message,we'll get another one * after we enable queues and actually prepared * to send traffic. */ if (adapter->state != __IAVF_RUNNING) break; /* For ADq enabled VF, we reconfigure VSIs and * re-allocate queues. Hence wait till all * queues are enabled. */ if (adapter->flags & IAVF_FLAG_QUEUES_DISABLED) break; } adapter->link_up = link_up; if (link_up) { netif_tx_start_all_queues(netdev); netif_carrier_on(netdev); } else { netif_tx_stop_all_queues(netdev); netif_carrier_off(netdev); } iavf_print_link_message(adapter); break; case VIRTCHNL_EVENT_RESET_IMPENDING: dev_info(&adapter->pdev->dev, "Reset indication received from the PF\n"); if (!(adapter->flags & IAVF_FLAG_RESET_PENDING)) { dev_info(&adapter->pdev->dev, "Scheduling reset task\n"); iavf_schedule_reset(adapter, IAVF_FLAG_RESET_PENDING); } break; default: dev_err(&adapter->pdev->dev, "Unknown event %d from PF\n", vpe->event); break; } return; } if (v_retval) { switch (v_opcode) { case VIRTCHNL_OP_ADD_VLAN: dev_err(&adapter->pdev->dev, "Failed to add VLAN filter, error %s\n", iavf_stat_str(&adapter->hw, v_retval)); break; case VIRTCHNL_OP_ADD_ETH_ADDR: dev_err(&adapter->pdev->dev, "Failed to add MAC filter, error %s\n", iavf_stat_str(&adapter->hw, v_retval)); iavf_mac_add_reject(adapter); /* restore administratively set MAC address */ ether_addr_copy(adapter->hw.mac.addr, netdev->dev_addr); wake_up(&adapter->vc_waitqueue); break; case VIRTCHNL_OP_DEL_VLAN: dev_err(&adapter->pdev->dev, "Failed to delete VLAN filter, error %s\n", iavf_stat_str(&adapter->hw, v_retval)); break; case VIRTCHNL_OP_DEL_ETH_ADDR: dev_err(&adapter->pdev->dev, "Failed to delete MAC filter, error %s\n", iavf_stat_str(&adapter->hw, v_retval)); break; case VIRTCHNL_OP_ENABLE_CHANNELS: dev_err(&adapter->pdev->dev, "Failed to configure queue channels, error %s\n", iavf_stat_str(&adapter->hw, v_retval)); adapter->flags &= ~IAVF_FLAG_REINIT_ITR_NEEDED; adapter->ch_config.state = __IAVF_TC_INVALID; netdev_reset_tc(netdev); netif_tx_start_all_queues(netdev); break; case VIRTCHNL_OP_DISABLE_CHANNELS: dev_err(&adapter->pdev->dev, "Failed to disable queue channels, error %s\n", iavf_stat_str(&adapter->hw, v_retval)); adapter->flags &= ~IAVF_FLAG_REINIT_ITR_NEEDED; adapter->ch_config.state = __IAVF_TC_RUNNING; netif_tx_start_all_queues(netdev); break; case VIRTCHNL_OP_ADD_CLOUD_FILTER: { struct iavf_cloud_filter *cf, *cftmp; list_for_each_entry_safe(cf, cftmp, &adapter->cloud_filter_list, list) { if (cf->state == __IAVF_CF_ADD_PENDING) { cf->state = __IAVF_CF_INVALID; dev_info(&adapter->pdev->dev, "Failed to add cloud filter, error %s\n", iavf_stat_str(&adapter->hw, v_retval)); iavf_print_cloud_filter(adapter, &cf->f); list_del(&cf->list); kfree(cf); adapter->num_cloud_filters--; } } } break; case VIRTCHNL_OP_DEL_CLOUD_FILTER: { struct iavf_cloud_filter *cf; list_for_each_entry(cf, &adapter->cloud_filter_list, list) { if (cf->state == __IAVF_CF_DEL_PENDING) { cf->state = __IAVF_CF_ACTIVE; dev_info(&adapter->pdev->dev, "Failed to del cloud filter, error %s\n", iavf_stat_str(&adapter->hw, v_retval)); iavf_print_cloud_filter(adapter, &cf->f); } } } break; case VIRTCHNL_OP_ADD_FDIR_FILTER: { struct iavf_fdir_fltr *fdir, *fdir_tmp; spin_lock_bh(&adapter->fdir_fltr_lock); list_for_each_entry_safe(fdir, fdir_tmp, &adapter->fdir_list_head, list) { if (fdir->state == IAVF_FDIR_FLTR_ADD_PENDING) { dev_info(&adapter->pdev->dev, "Failed to add Flow Director filter, error %s\n", iavf_stat_str(&adapter->hw, v_retval)); iavf_print_fdir_fltr(adapter, fdir); if (msglen) dev_err(&adapter->pdev->dev, "%s\n", msg); list_del(&fdir->list); kfree(fdir); adapter->fdir_active_fltr--; } } spin_unlock_bh(&adapter->fdir_fltr_lock); } break; case VIRTCHNL_OP_DEL_FDIR_FILTER: { struct iavf_fdir_fltr *fdir; spin_lock_bh(&adapter->fdir_fltr_lock); list_for_each_entry(fdir, &adapter->fdir_list_head, list) { if (fdir->state == IAVF_FDIR_FLTR_DEL_PENDING) { fdir->state = IAVF_FDIR_FLTR_ACTIVE; dev_info(&adapter->pdev->dev, "Failed to del Flow Director filter, error %s\n", iavf_stat_str(&adapter->hw, v_retval)); iavf_print_fdir_fltr(adapter, fdir); } } spin_unlock_bh(&adapter->fdir_fltr_lock); } break; case VIRTCHNL_OP_ADD_RSS_CFG: { struct iavf_adv_rss *rss, *rss_tmp; spin_lock_bh(&adapter->adv_rss_lock); list_for_each_entry_safe(rss, rss_tmp, &adapter->adv_rss_list_head, list) { if (rss->state == IAVF_ADV_RSS_ADD_PENDING) { iavf_print_adv_rss_cfg(adapter, rss, "Failed to change the input set for", NULL); list_del(&rss->list); kfree(rss); } } spin_unlock_bh(&adapter->adv_rss_lock); } break; case VIRTCHNL_OP_DEL_RSS_CFG: { struct iavf_adv_rss *rss; spin_lock_bh(&adapter->adv_rss_lock); list_for_each_entry(rss, &adapter->adv_rss_list_head, list) { if (rss->state == IAVF_ADV_RSS_DEL_PENDING) { rss->state = IAVF_ADV_RSS_ACTIVE; dev_err(&adapter->pdev->dev, "Failed to delete RSS configuration, error %s\n", iavf_stat_str(&adapter->hw, v_retval)); } } spin_unlock_bh(&adapter->adv_rss_lock); } break; case VIRTCHNL_OP_ENABLE_VLAN_STRIPPING: dev_warn(&adapter->pdev->dev, "Changing VLAN Stripping is not allowed when Port VLAN is configured\n"); /* Vlan stripping could not be enabled by ethtool. * Disable it in netdev->features. */ iavf_netdev_features_vlan_strip_set(netdev, false); break; case VIRTCHNL_OP_DISABLE_VLAN_STRIPPING: dev_warn(&adapter->pdev->dev, "Changing VLAN Stripping is not allowed when Port VLAN is configured\n"); /* Vlan stripping could not be disabled by ethtool. * Enable it in netdev->features. */ iavf_netdev_features_vlan_strip_set(netdev, true); break; case VIRTCHNL_OP_ADD_VLAN_V2: iavf_vlan_add_reject(adapter); dev_warn(&adapter->pdev->dev, "Failed to add VLAN filter, error %s\n", iavf_stat_str(&adapter->hw, v_retval)); break; default: dev_err(&adapter->pdev->dev, "PF returned error %d (%s) to our request %d\n", v_retval, iavf_stat_str(&adapter->hw, v_retval), v_opcode); } } switch (v_opcode) { case VIRTCHNL_OP_ADD_ETH_ADDR: if (!v_retval) iavf_mac_add_ok(adapter); if (!ether_addr_equal(netdev->dev_addr, adapter->hw.mac.addr)) if (!ether_addr_equal(netdev->dev_addr, adapter->hw.mac.addr)) { netif_addr_lock_bh(netdev); eth_hw_addr_set(netdev, adapter->hw.mac.addr); netif_addr_unlock_bh(netdev); } wake_up(&adapter->vc_waitqueue); break; case VIRTCHNL_OP_GET_STATS: { struct iavf_eth_stats *stats = (struct iavf_eth_stats *)msg; netdev->stats.rx_packets = stats->rx_unicast + stats->rx_multicast + stats->rx_broadcast; netdev->stats.tx_packets = stats->tx_unicast + stats->tx_multicast + stats->tx_broadcast; netdev->stats.rx_bytes = stats->rx_bytes; netdev->stats.tx_bytes = stats->tx_bytes; netdev->stats.tx_errors = stats->tx_errors; netdev->stats.rx_dropped = stats->rx_discards; netdev->stats.tx_dropped = stats->tx_discards; adapter->current_stats = *stats; } break; case VIRTCHNL_OP_GET_VF_RESOURCES: { u16 len = IAVF_VIRTCHNL_VF_RESOURCE_SIZE; memcpy(adapter->vf_res, msg, min(msglen, len)); iavf_validate_num_queues(adapter); iavf_vf_parse_hw_config(&adapter->hw, adapter->vf_res); if (is_zero_ether_addr(adapter->hw.mac.addr)) { /* restore current mac address */ ether_addr_copy(adapter->hw.mac.addr, netdev->dev_addr); } else { netif_addr_lock_bh(netdev); /* refresh current mac address if changed */ ether_addr_copy(netdev->perm_addr, adapter->hw.mac.addr); netif_addr_unlock_bh(netdev); } spin_lock_bh(&adapter->mac_vlan_list_lock); iavf_add_filter(adapter, adapter->hw.mac.addr); if (VLAN_ALLOWED(adapter)) { if (!list_empty(&adapter->vlan_filter_list)) { struct iavf_vlan_filter *vlf; /* re-add all VLAN filters over virtchnl */ list_for_each_entry(vlf, &adapter->vlan_filter_list, list) vlf->state = IAVF_VLAN_ADD; adapter->aq_required |= IAVF_FLAG_AQ_ADD_VLAN_FILTER; } } spin_unlock_bh(&adapter->mac_vlan_list_lock); iavf_parse_vf_resource_msg(adapter); /* negotiated VIRTCHNL_VF_OFFLOAD_VLAN_V2, so wait for the * response to VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS to finish * configuration */ if (VLAN_V2_ALLOWED(adapter)) break; /* fallthrough and finish config if VIRTCHNL_VF_OFFLOAD_VLAN_V2 * wasn't successfully negotiated with the PF */ } fallthrough; case VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS: { struct iavf_mac_filter *f; bool was_mac_changed; u64 aq_required = 0; if (v_opcode == VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS) memcpy(&adapter->vlan_v2_caps, msg, min_t(u16, msglen, sizeof(adapter->vlan_v2_caps))); iavf_process_config(adapter); adapter->flags |= IAVF_FLAG_SETUP_NETDEV_FEATURES; iavf_schedule_finish_config(adapter); iavf_set_queue_vlan_tag_loc(adapter); was_mac_changed = !ether_addr_equal(netdev->dev_addr, adapter->hw.mac.addr); spin_lock_bh(&adapter->mac_vlan_list_lock); /* re-add all MAC filters */ list_for_each_entry(f, &adapter->mac_filter_list, list) { if (was_mac_changed && ether_addr_equal(netdev->dev_addr, f->macaddr)) ether_addr_copy(f->macaddr, adapter->hw.mac.addr); f->is_new_mac = true; f->add = true; f->add_handled = false; f->remove = false; } /* re-add all VLAN filters */ if (VLAN_FILTERING_ALLOWED(adapter)) { struct iavf_vlan_filter *vlf; if (!list_empty(&adapter->vlan_filter_list)) { list_for_each_entry(vlf, &adapter->vlan_filter_list, list) vlf->state = IAVF_VLAN_ADD; aq_required |= IAVF_FLAG_AQ_ADD_VLAN_FILTER; } } spin_unlock_bh(&adapter->mac_vlan_list_lock); netif_addr_lock_bh(netdev); eth_hw_addr_set(netdev, adapter->hw.mac.addr); netif_addr_unlock_bh(netdev); adapter->aq_required |= IAVF_FLAG_AQ_ADD_MAC_FILTER | aq_required; } break; case VIRTCHNL_OP_ENABLE_QUEUES: /* enable transmits */ iavf_irq_enable(adapter, true); wake_up(&adapter->reset_waitqueue); adapter->flags &= ~IAVF_FLAG_QUEUES_DISABLED; break; case VIRTCHNL_OP_DISABLE_QUEUES: iavf_free_all_tx_resources(adapter); iavf_free_all_rx_resources(adapter); if (adapter->state == __IAVF_DOWN_PENDING) { iavf_change_state(adapter, __IAVF_DOWN); wake_up(&adapter->down_waitqueue); } break; case VIRTCHNL_OP_VERSION: case VIRTCHNL_OP_CONFIG_IRQ_MAP: /* Don't display an error if we get these out of sequence. * If the firmware needed to get kicked, we'll get these and * it's no problem. */ if (v_opcode != adapter->current_op) return; break; case VIRTCHNL_OP_RDMA: /* Gobble zero-length replies from the PF. They indicate that * a previous message was received OK, and the client doesn't * care about that. */ if (msglen && CLIENT_ENABLED(adapter)) iavf_notify_client_message(&adapter->vsi, msg, msglen); break; case VIRTCHNL_OP_CONFIG_RDMA_IRQ_MAP: adapter->client_pending &= ~(BIT(VIRTCHNL_OP_CONFIG_RDMA_IRQ_MAP)); break; case VIRTCHNL_OP_GET_RSS_HENA_CAPS: { struct virtchnl_rss_hena *vrh = (struct virtchnl_rss_hena *)msg; if (msglen == sizeof(*vrh)) adapter->hena = vrh->hena; else dev_warn(&adapter->pdev->dev, "Invalid message %d from PF\n", v_opcode); } break; case VIRTCHNL_OP_REQUEST_QUEUES: { struct virtchnl_vf_res_request *vfres = (struct virtchnl_vf_res_request *)msg; if (vfres->num_queue_pairs != adapter->num_req_queues) { dev_info(&adapter->pdev->dev, "Requested %d queues, PF can support %d\n", adapter->num_req_queues, vfres->num_queue_pairs); adapter->num_req_queues = 0; adapter->flags &= ~IAVF_FLAG_REINIT_ITR_NEEDED; } } break; case VIRTCHNL_OP_ADD_CLOUD_FILTER: { struct iavf_cloud_filter *cf; list_for_each_entry(cf, &adapter->cloud_filter_list, list) { if (cf->state == __IAVF_CF_ADD_PENDING) cf->state = __IAVF_CF_ACTIVE; } } break; case VIRTCHNL_OP_DEL_CLOUD_FILTER: { struct iavf_cloud_filter *cf, *cftmp; list_for_each_entry_safe(cf, cftmp, &adapter->cloud_filter_list, list) { if (cf->state == __IAVF_CF_DEL_PENDING) { cf->state = __IAVF_CF_INVALID; list_del(&cf->list); kfree(cf); adapter->num_cloud_filters--; } } } break; case VIRTCHNL_OP_ADD_FDIR_FILTER: { struct virtchnl_fdir_add *add_fltr = (struct virtchnl_fdir_add *)msg; struct iavf_fdir_fltr *fdir, *fdir_tmp; spin_lock_bh(&adapter->fdir_fltr_lock); list_for_each_entry_safe(fdir, fdir_tmp, &adapter->fdir_list_head, list) { if (fdir->state == IAVF_FDIR_FLTR_ADD_PENDING) { if (add_fltr->status == VIRTCHNL_FDIR_SUCCESS) { dev_info(&adapter->pdev->dev, "Flow Director filter with location %u is added\n", fdir->loc); fdir->state = IAVF_FDIR_FLTR_ACTIVE; fdir->flow_id = add_fltr->flow_id; } else { dev_info(&adapter->pdev->dev, "Failed to add Flow Director filter with status: %d\n", add_fltr->status); iavf_print_fdir_fltr(adapter, fdir); list_del(&fdir->list); kfree(fdir); adapter->fdir_active_fltr--; } } } spin_unlock_bh(&adapter->fdir_fltr_lock); } break; case VIRTCHNL_OP_DEL_FDIR_FILTER: { struct virtchnl_fdir_del *del_fltr = (struct virtchnl_fdir_del *)msg; struct iavf_fdir_fltr *fdir, *fdir_tmp; spin_lock_bh(&adapter->fdir_fltr_lock); list_for_each_entry_safe(fdir, fdir_tmp, &adapter->fdir_list_head, list) { if (fdir->state == IAVF_FDIR_FLTR_DEL_PENDING) { if (del_fltr->status == VIRTCHNL_FDIR_SUCCESS) { dev_info(&adapter->pdev->dev, "Flow Director filter with location %u is deleted\n", fdir->loc); list_del(&fdir->list); kfree(fdir); adapter->fdir_active_fltr--; } else { fdir->state = IAVF_FDIR_FLTR_ACTIVE; dev_info(&adapter->pdev->dev, "Failed to delete Flow Director filter with status: %d\n", del_fltr->status); iavf_print_fdir_fltr(adapter, fdir); } } } spin_unlock_bh(&adapter->fdir_fltr_lock); } break; case VIRTCHNL_OP_ADD_RSS_CFG: { struct iavf_adv_rss *rss; spin_lock_bh(&adapter->adv_rss_lock); list_for_each_entry(rss, &adapter->adv_rss_list_head, list) { if (rss->state == IAVF_ADV_RSS_ADD_PENDING) { iavf_print_adv_rss_cfg(adapter, rss, "Input set change for", "successful"); rss->state = IAVF_ADV_RSS_ACTIVE; } } spin_unlock_bh(&adapter->adv_rss_lock); } break; case VIRTCHNL_OP_DEL_RSS_CFG: { struct iavf_adv_rss *rss, *rss_tmp; spin_lock_bh(&adapter->adv_rss_lock); list_for_each_entry_safe(rss, rss_tmp, &adapter->adv_rss_list_head, list) { if (rss->state == IAVF_ADV_RSS_DEL_PENDING) { list_del(&rss->list); kfree(rss); } } spin_unlock_bh(&adapter->adv_rss_lock); } break; case VIRTCHNL_OP_ADD_VLAN_V2: { struct iavf_vlan_filter *f; spin_lock_bh(&adapter->mac_vlan_list_lock); list_for_each_entry(f, &adapter->vlan_filter_list, list) { if (f->state == IAVF_VLAN_IS_NEW) f->state = IAVF_VLAN_ACTIVE; } spin_unlock_bh(&adapter->mac_vlan_list_lock); } break; case VIRTCHNL_OP_ENABLE_VLAN_STRIPPING: /* PF enabled vlan strip on this VF. * Update netdev->features if needed to be in sync with ethtool. */ if (!v_retval) iavf_netdev_features_vlan_strip_set(netdev, true); break; case VIRTCHNL_OP_DISABLE_VLAN_STRIPPING: /* PF disabled vlan strip on this VF. * Update netdev->features if needed to be in sync with ethtool. */ if (!v_retval) iavf_netdev_features_vlan_strip_set(netdev, false); break; default: if (adapter->current_op && (v_opcode != adapter->current_op)) dev_warn(&adapter->pdev->dev, "Expected response %d from PF, received %d\n", adapter->current_op, v_opcode); break; } /* switch v_opcode */ adapter->current_op = VIRTCHNL_OP_UNKNOWN; }
linux-master
drivers/net/ethernet/intel/iavf/iavf_virtchnl.c
// SPDX-License-Identifier: GPL-2.0 /* Copyright(c) 2013 - 2018 Intel Corporation. */ #include "iavf_status.h" #include "iavf_type.h" #include "iavf_register.h" #include "iavf_adminq.h" #include "iavf_prototype.h" /** * iavf_adminq_init_regs - Initialize AdminQ registers * @hw: pointer to the hardware structure * * This assumes the alloc_asq and alloc_arq functions have already been called **/ static void iavf_adminq_init_regs(struct iavf_hw *hw) { /* set head and tail registers in our local struct */ hw->aq.asq.tail = IAVF_VF_ATQT1; hw->aq.asq.head = IAVF_VF_ATQH1; hw->aq.asq.len = IAVF_VF_ATQLEN1; hw->aq.asq.bal = IAVF_VF_ATQBAL1; hw->aq.asq.bah = IAVF_VF_ATQBAH1; hw->aq.arq.tail = IAVF_VF_ARQT1; hw->aq.arq.head = IAVF_VF_ARQH1; hw->aq.arq.len = IAVF_VF_ARQLEN1; hw->aq.arq.bal = IAVF_VF_ARQBAL1; hw->aq.arq.bah = IAVF_VF_ARQBAH1; } /** * iavf_alloc_adminq_asq_ring - Allocate Admin Queue send rings * @hw: pointer to the hardware structure **/ static enum iavf_status iavf_alloc_adminq_asq_ring(struct iavf_hw *hw) { enum iavf_status ret_code; ret_code = iavf_allocate_dma_mem(hw, &hw->aq.asq.desc_buf, iavf_mem_atq_ring, (hw->aq.num_asq_entries * sizeof(struct iavf_aq_desc)), IAVF_ADMINQ_DESC_ALIGNMENT); if (ret_code) return ret_code; ret_code = iavf_allocate_virt_mem(hw, &hw->aq.asq.cmd_buf, (hw->aq.num_asq_entries * sizeof(struct iavf_asq_cmd_details))); if (ret_code) { iavf_free_dma_mem(hw, &hw->aq.asq.desc_buf); return ret_code; } return ret_code; } /** * iavf_alloc_adminq_arq_ring - Allocate Admin Queue receive rings * @hw: pointer to the hardware structure **/ static enum iavf_status iavf_alloc_adminq_arq_ring(struct iavf_hw *hw) { enum iavf_status ret_code; ret_code = iavf_allocate_dma_mem(hw, &hw->aq.arq.desc_buf, iavf_mem_arq_ring, (hw->aq.num_arq_entries * sizeof(struct iavf_aq_desc)), IAVF_ADMINQ_DESC_ALIGNMENT); return ret_code; } /** * iavf_free_adminq_asq - Free Admin Queue send rings * @hw: pointer to the hardware structure * * This assumes the posted send buffers have already been cleaned * and de-allocated **/ static void iavf_free_adminq_asq(struct iavf_hw *hw) { iavf_free_dma_mem(hw, &hw->aq.asq.desc_buf); } /** * iavf_free_adminq_arq - Free Admin Queue receive rings * @hw: pointer to the hardware structure * * This assumes the posted receive buffers have already been cleaned * and de-allocated **/ static void iavf_free_adminq_arq(struct iavf_hw *hw) { iavf_free_dma_mem(hw, &hw->aq.arq.desc_buf); } /** * iavf_alloc_arq_bufs - Allocate pre-posted buffers for the receive queue * @hw: pointer to the hardware structure **/ static enum iavf_status iavf_alloc_arq_bufs(struct iavf_hw *hw) { struct iavf_aq_desc *desc; struct iavf_dma_mem *bi; enum iavf_status ret_code; int i; /* We'll be allocating the buffer info memory first, then we can * allocate the mapped buffers for the event processing */ /* buffer_info structures do not need alignment */ ret_code = iavf_allocate_virt_mem(hw, &hw->aq.arq.dma_head, (hw->aq.num_arq_entries * sizeof(struct iavf_dma_mem))); if (ret_code) goto alloc_arq_bufs; hw->aq.arq.r.arq_bi = (struct iavf_dma_mem *)hw->aq.arq.dma_head.va; /* allocate the mapped buffers */ for (i = 0; i < hw->aq.num_arq_entries; i++) { bi = &hw->aq.arq.r.arq_bi[i]; ret_code = iavf_allocate_dma_mem(hw, bi, iavf_mem_arq_buf, hw->aq.arq_buf_size, IAVF_ADMINQ_DESC_ALIGNMENT); if (ret_code) goto unwind_alloc_arq_bufs; /* now configure the descriptors for use */ desc = IAVF_ADMINQ_DESC(hw->aq.arq, i); desc->flags = cpu_to_le16(IAVF_AQ_FLAG_BUF); if (hw->aq.arq_buf_size > IAVF_AQ_LARGE_BUF) desc->flags |= cpu_to_le16(IAVF_AQ_FLAG_LB); desc->opcode = 0; /* This is in accordance with Admin queue design, there is no * register for buffer size configuration */ desc->datalen = cpu_to_le16((u16)bi->size); desc->retval = 0; desc->cookie_high = 0; desc->cookie_low = 0; desc->params.external.addr_high = cpu_to_le32(upper_32_bits(bi->pa)); desc->params.external.addr_low = cpu_to_le32(lower_32_bits(bi->pa)); desc->params.external.param0 = 0; desc->params.external.param1 = 0; } alloc_arq_bufs: return ret_code; unwind_alloc_arq_bufs: /* don't try to free the one that failed... */ i--; for (; i >= 0; i--) iavf_free_dma_mem(hw, &hw->aq.arq.r.arq_bi[i]); iavf_free_virt_mem(hw, &hw->aq.arq.dma_head); return ret_code; } /** * iavf_alloc_asq_bufs - Allocate empty buffer structs for the send queue * @hw: pointer to the hardware structure **/ static enum iavf_status iavf_alloc_asq_bufs(struct iavf_hw *hw) { struct iavf_dma_mem *bi; enum iavf_status ret_code; int i; /* No mapped memory needed yet, just the buffer info structures */ ret_code = iavf_allocate_virt_mem(hw, &hw->aq.asq.dma_head, (hw->aq.num_asq_entries * sizeof(struct iavf_dma_mem))); if (ret_code) goto alloc_asq_bufs; hw->aq.asq.r.asq_bi = (struct iavf_dma_mem *)hw->aq.asq.dma_head.va; /* allocate the mapped buffers */ for (i = 0; i < hw->aq.num_asq_entries; i++) { bi = &hw->aq.asq.r.asq_bi[i]; ret_code = iavf_allocate_dma_mem(hw, bi, iavf_mem_asq_buf, hw->aq.asq_buf_size, IAVF_ADMINQ_DESC_ALIGNMENT); if (ret_code) goto unwind_alloc_asq_bufs; } alloc_asq_bufs: return ret_code; unwind_alloc_asq_bufs: /* don't try to free the one that failed... */ i--; for (; i >= 0; i--) iavf_free_dma_mem(hw, &hw->aq.asq.r.asq_bi[i]); iavf_free_virt_mem(hw, &hw->aq.asq.dma_head); return ret_code; } /** * iavf_free_arq_bufs - Free receive queue buffer info elements * @hw: pointer to the hardware structure **/ static void iavf_free_arq_bufs(struct iavf_hw *hw) { int i; /* free descriptors */ for (i = 0; i < hw->aq.num_arq_entries; i++) iavf_free_dma_mem(hw, &hw->aq.arq.r.arq_bi[i]); /* free the descriptor memory */ iavf_free_dma_mem(hw, &hw->aq.arq.desc_buf); /* free the dma header */ iavf_free_virt_mem(hw, &hw->aq.arq.dma_head); } /** * iavf_free_asq_bufs - Free send queue buffer info elements * @hw: pointer to the hardware structure **/ static void iavf_free_asq_bufs(struct iavf_hw *hw) { int i; /* only unmap if the address is non-NULL */ for (i = 0; i < hw->aq.num_asq_entries; i++) if (hw->aq.asq.r.asq_bi[i].pa) iavf_free_dma_mem(hw, &hw->aq.asq.r.asq_bi[i]); /* free the buffer info list */ iavf_free_virt_mem(hw, &hw->aq.asq.cmd_buf); /* free the descriptor memory */ iavf_free_dma_mem(hw, &hw->aq.asq.desc_buf); /* free the dma header */ iavf_free_virt_mem(hw, &hw->aq.asq.dma_head); } /** * iavf_config_asq_regs - configure ASQ registers * @hw: pointer to the hardware structure * * Configure base address and length registers for the transmit queue **/ static enum iavf_status iavf_config_asq_regs(struct iavf_hw *hw) { enum iavf_status ret_code = 0; u32 reg = 0; /* Clear Head and Tail */ wr32(hw, hw->aq.asq.head, 0); wr32(hw, hw->aq.asq.tail, 0); /* set starting point */ wr32(hw, hw->aq.asq.len, (hw->aq.num_asq_entries | IAVF_VF_ATQLEN1_ATQENABLE_MASK)); wr32(hw, hw->aq.asq.bal, lower_32_bits(hw->aq.asq.desc_buf.pa)); wr32(hw, hw->aq.asq.bah, upper_32_bits(hw->aq.asq.desc_buf.pa)); /* Check one register to verify that config was applied */ reg = rd32(hw, hw->aq.asq.bal); if (reg != lower_32_bits(hw->aq.asq.desc_buf.pa)) ret_code = IAVF_ERR_ADMIN_QUEUE_ERROR; return ret_code; } /** * iavf_config_arq_regs - ARQ register configuration * @hw: pointer to the hardware structure * * Configure base address and length registers for the receive (event queue) **/ static enum iavf_status iavf_config_arq_regs(struct iavf_hw *hw) { enum iavf_status ret_code = 0; u32 reg = 0; /* Clear Head and Tail */ wr32(hw, hw->aq.arq.head, 0); wr32(hw, hw->aq.arq.tail, 0); /* set starting point */ wr32(hw, hw->aq.arq.len, (hw->aq.num_arq_entries | IAVF_VF_ARQLEN1_ARQENABLE_MASK)); wr32(hw, hw->aq.arq.bal, lower_32_bits(hw->aq.arq.desc_buf.pa)); wr32(hw, hw->aq.arq.bah, upper_32_bits(hw->aq.arq.desc_buf.pa)); /* Update tail in the HW to post pre-allocated buffers */ wr32(hw, hw->aq.arq.tail, hw->aq.num_arq_entries - 1); /* Check one register to verify that config was applied */ reg = rd32(hw, hw->aq.arq.bal); if (reg != lower_32_bits(hw->aq.arq.desc_buf.pa)) ret_code = IAVF_ERR_ADMIN_QUEUE_ERROR; return ret_code; } /** * iavf_init_asq - main initialization routine for ASQ * @hw: pointer to the hardware structure * * This is the main initialization routine for the Admin Send Queue * Prior to calling this function, drivers *MUST* set the following fields * in the hw->aq structure: * - hw->aq.num_asq_entries * - hw->aq.arq_buf_size * * Do *NOT* hold the lock when calling this as the memory allocation routines * called are not going to be atomic context safe **/ static enum iavf_status iavf_init_asq(struct iavf_hw *hw) { enum iavf_status ret_code = 0; int i; if (hw->aq.asq.count > 0) { /* queue already initialized */ ret_code = IAVF_ERR_NOT_READY; goto init_adminq_exit; } /* verify input for valid configuration */ if ((hw->aq.num_asq_entries == 0) || (hw->aq.asq_buf_size == 0)) { ret_code = IAVF_ERR_CONFIG; goto init_adminq_exit; } hw->aq.asq.next_to_use = 0; hw->aq.asq.next_to_clean = 0; /* allocate the ring memory */ ret_code = iavf_alloc_adminq_asq_ring(hw); if (ret_code) goto init_adminq_exit; /* allocate buffers in the rings */ ret_code = iavf_alloc_asq_bufs(hw); if (ret_code) goto init_adminq_free_rings; /* initialize base registers */ ret_code = iavf_config_asq_regs(hw); if (ret_code) goto init_free_asq_bufs; /* success! */ hw->aq.asq.count = hw->aq.num_asq_entries; goto init_adminq_exit; init_free_asq_bufs: for (i = 0; i < hw->aq.num_asq_entries; i++) iavf_free_dma_mem(hw, &hw->aq.asq.r.asq_bi[i]); iavf_free_virt_mem(hw, &hw->aq.asq.dma_head); init_adminq_free_rings: iavf_free_adminq_asq(hw); init_adminq_exit: return ret_code; } /** * iavf_init_arq - initialize ARQ * @hw: pointer to the hardware structure * * The main initialization routine for the Admin Receive (Event) Queue. * Prior to calling this function, drivers *MUST* set the following fields * in the hw->aq structure: * - hw->aq.num_asq_entries * - hw->aq.arq_buf_size * * Do *NOT* hold the lock when calling this as the memory allocation routines * called are not going to be atomic context safe **/ static enum iavf_status iavf_init_arq(struct iavf_hw *hw) { enum iavf_status ret_code = 0; int i; if (hw->aq.arq.count > 0) { /* queue already initialized */ ret_code = IAVF_ERR_NOT_READY; goto init_adminq_exit; } /* verify input for valid configuration */ if ((hw->aq.num_arq_entries == 0) || (hw->aq.arq_buf_size == 0)) { ret_code = IAVF_ERR_CONFIG; goto init_adminq_exit; } hw->aq.arq.next_to_use = 0; hw->aq.arq.next_to_clean = 0; /* allocate the ring memory */ ret_code = iavf_alloc_adminq_arq_ring(hw); if (ret_code) goto init_adminq_exit; /* allocate buffers in the rings */ ret_code = iavf_alloc_arq_bufs(hw); if (ret_code) goto init_adminq_free_rings; /* initialize base registers */ ret_code = iavf_config_arq_regs(hw); if (ret_code) goto init_free_arq_bufs; /* success! */ hw->aq.arq.count = hw->aq.num_arq_entries; goto init_adminq_exit; init_free_arq_bufs: for (i = 0; i < hw->aq.num_arq_entries; i++) iavf_free_dma_mem(hw, &hw->aq.arq.r.arq_bi[i]); iavf_free_virt_mem(hw, &hw->aq.arq.dma_head); init_adminq_free_rings: iavf_free_adminq_arq(hw); init_adminq_exit: return ret_code; } /** * iavf_shutdown_asq - shutdown the ASQ * @hw: pointer to the hardware structure * * The main shutdown routine for the Admin Send Queue **/ static enum iavf_status iavf_shutdown_asq(struct iavf_hw *hw) { enum iavf_status ret_code = 0; mutex_lock(&hw->aq.asq_mutex); if (hw->aq.asq.count == 0) { ret_code = IAVF_ERR_NOT_READY; goto shutdown_asq_out; } /* Stop firmware AdminQ processing */ wr32(hw, hw->aq.asq.head, 0); wr32(hw, hw->aq.asq.tail, 0); wr32(hw, hw->aq.asq.len, 0); wr32(hw, hw->aq.asq.bal, 0); wr32(hw, hw->aq.asq.bah, 0); hw->aq.asq.count = 0; /* to indicate uninitialized queue */ /* free ring buffers */ iavf_free_asq_bufs(hw); shutdown_asq_out: mutex_unlock(&hw->aq.asq_mutex); return ret_code; } /** * iavf_shutdown_arq - shutdown ARQ * @hw: pointer to the hardware structure * * The main shutdown routine for the Admin Receive Queue **/ static enum iavf_status iavf_shutdown_arq(struct iavf_hw *hw) { enum iavf_status ret_code = 0; mutex_lock(&hw->aq.arq_mutex); if (hw->aq.arq.count == 0) { ret_code = IAVF_ERR_NOT_READY; goto shutdown_arq_out; } /* Stop firmware AdminQ processing */ wr32(hw, hw->aq.arq.head, 0); wr32(hw, hw->aq.arq.tail, 0); wr32(hw, hw->aq.arq.len, 0); wr32(hw, hw->aq.arq.bal, 0); wr32(hw, hw->aq.arq.bah, 0); hw->aq.arq.count = 0; /* to indicate uninitialized queue */ /* free ring buffers */ iavf_free_arq_bufs(hw); shutdown_arq_out: mutex_unlock(&hw->aq.arq_mutex); return ret_code; } /** * iavf_init_adminq - main initialization routine for Admin Queue * @hw: pointer to the hardware structure * * Prior to calling this function, drivers *MUST* set the following fields * in the hw->aq structure: * - hw->aq.num_asq_entries * - hw->aq.num_arq_entries * - hw->aq.arq_buf_size * - hw->aq.asq_buf_size **/ enum iavf_status iavf_init_adminq(struct iavf_hw *hw) { enum iavf_status ret_code; /* verify input for valid configuration */ if ((hw->aq.num_arq_entries == 0) || (hw->aq.num_asq_entries == 0) || (hw->aq.arq_buf_size == 0) || (hw->aq.asq_buf_size == 0)) { ret_code = IAVF_ERR_CONFIG; goto init_adminq_exit; } /* Set up register offsets */ iavf_adminq_init_regs(hw); /* setup ASQ command write back timeout */ hw->aq.asq_cmd_timeout = IAVF_ASQ_CMD_TIMEOUT; /* allocate the ASQ */ ret_code = iavf_init_asq(hw); if (ret_code) goto init_adminq_destroy_locks; /* allocate the ARQ */ ret_code = iavf_init_arq(hw); if (ret_code) goto init_adminq_free_asq; /* success! */ goto init_adminq_exit; init_adminq_free_asq: iavf_shutdown_asq(hw); init_adminq_destroy_locks: init_adminq_exit: return ret_code; } /** * iavf_shutdown_adminq - shutdown routine for the Admin Queue * @hw: pointer to the hardware structure **/ enum iavf_status iavf_shutdown_adminq(struct iavf_hw *hw) { if (iavf_check_asq_alive(hw)) iavf_aq_queue_shutdown(hw, true); iavf_shutdown_asq(hw); iavf_shutdown_arq(hw); return 0; } /** * iavf_clean_asq - cleans Admin send queue * @hw: pointer to the hardware structure * * returns the number of free desc **/ static u16 iavf_clean_asq(struct iavf_hw *hw) { struct iavf_adminq_ring *asq = &hw->aq.asq; struct iavf_asq_cmd_details *details; u16 ntc = asq->next_to_clean; struct iavf_aq_desc desc_cb; struct iavf_aq_desc *desc; desc = IAVF_ADMINQ_DESC(*asq, ntc); details = IAVF_ADMINQ_DETAILS(*asq, ntc); while (rd32(hw, hw->aq.asq.head) != ntc) { iavf_debug(hw, IAVF_DEBUG_AQ_MESSAGE, "ntc %d head %d.\n", ntc, rd32(hw, hw->aq.asq.head)); if (details->callback) { IAVF_ADMINQ_CALLBACK cb_func = (IAVF_ADMINQ_CALLBACK)details->callback; desc_cb = *desc; cb_func(hw, &desc_cb); } memset((void *)desc, 0, sizeof(struct iavf_aq_desc)); memset((void *)details, 0, sizeof(struct iavf_asq_cmd_details)); ntc++; if (ntc == asq->count) ntc = 0; desc = IAVF_ADMINQ_DESC(*asq, ntc); details = IAVF_ADMINQ_DETAILS(*asq, ntc); } asq->next_to_clean = ntc; return IAVF_DESC_UNUSED(asq); } /** * iavf_asq_done - check if FW has processed the Admin Send Queue * @hw: pointer to the hw struct * * Returns true if the firmware has processed all descriptors on the * admin send queue. Returns false if there are still requests pending. **/ bool iavf_asq_done(struct iavf_hw *hw) { /* AQ designers suggest use of head for better * timing reliability than DD bit */ return rd32(hw, hw->aq.asq.head) == hw->aq.asq.next_to_use; } /** * iavf_asq_send_command - send command to Admin Queue * @hw: pointer to the hw struct * @desc: prefilled descriptor describing the command (non DMA mem) * @buff: buffer to use for indirect commands * @buff_size: size of buffer for indirect commands * @cmd_details: pointer to command details structure * * This is the main send command driver routine for the Admin Queue send * queue. It runs the queue, cleans the queue, etc **/ enum iavf_status iavf_asq_send_command(struct iavf_hw *hw, struct iavf_aq_desc *desc, void *buff, /* can be NULL */ u16 buff_size, struct iavf_asq_cmd_details *cmd_details) { struct iavf_dma_mem *dma_buff = NULL; struct iavf_asq_cmd_details *details; struct iavf_aq_desc *desc_on_ring; bool cmd_completed = false; enum iavf_status status = 0; u16 retval = 0; u32 val = 0; mutex_lock(&hw->aq.asq_mutex); if (hw->aq.asq.count == 0) { iavf_debug(hw, IAVF_DEBUG_AQ_MESSAGE, "AQTX: Admin queue not initialized.\n"); status = IAVF_ERR_QUEUE_EMPTY; goto asq_send_command_error; } hw->aq.asq_last_status = IAVF_AQ_RC_OK; val = rd32(hw, hw->aq.asq.head); if (val >= hw->aq.num_asq_entries) { iavf_debug(hw, IAVF_DEBUG_AQ_MESSAGE, "AQTX: head overrun at %d\n", val); status = IAVF_ERR_QUEUE_EMPTY; goto asq_send_command_error; } details = IAVF_ADMINQ_DETAILS(hw->aq.asq, hw->aq.asq.next_to_use); if (cmd_details) { *details = *cmd_details; /* If the cmd_details are defined copy the cookie. The * cpu_to_le32 is not needed here because the data is ignored * by the FW, only used by the driver */ if (details->cookie) { desc->cookie_high = cpu_to_le32(upper_32_bits(details->cookie)); desc->cookie_low = cpu_to_le32(lower_32_bits(details->cookie)); } } else { memset(details, 0, sizeof(struct iavf_asq_cmd_details)); } /* clear requested flags and then set additional flags if defined */ desc->flags &= ~cpu_to_le16(details->flags_dis); desc->flags |= cpu_to_le16(details->flags_ena); if (buff_size > hw->aq.asq_buf_size) { iavf_debug(hw, IAVF_DEBUG_AQ_MESSAGE, "AQTX: Invalid buffer size: %d.\n", buff_size); status = IAVF_ERR_INVALID_SIZE; goto asq_send_command_error; } if (details->postpone && !details->async) { iavf_debug(hw, IAVF_DEBUG_AQ_MESSAGE, "AQTX: Async flag not set along with postpone flag"); status = IAVF_ERR_PARAM; goto asq_send_command_error; } /* call clean and check queue available function to reclaim the * descriptors that were processed by FW, the function returns the * number of desc available */ /* the clean function called here could be called in a separate thread * in case of asynchronous completions */ if (iavf_clean_asq(hw) == 0) { iavf_debug(hw, IAVF_DEBUG_AQ_MESSAGE, "AQTX: Error queue is full.\n"); status = IAVF_ERR_ADMIN_QUEUE_FULL; goto asq_send_command_error; } /* initialize the temp desc pointer with the right desc */ desc_on_ring = IAVF_ADMINQ_DESC(hw->aq.asq, hw->aq.asq.next_to_use); /* if the desc is available copy the temp desc to the right place */ *desc_on_ring = *desc; /* if buff is not NULL assume indirect command */ if (buff) { dma_buff = &hw->aq.asq.r.asq_bi[hw->aq.asq.next_to_use]; /* copy the user buff into the respective DMA buff */ memcpy(dma_buff->va, buff, buff_size); desc_on_ring->datalen = cpu_to_le16(buff_size); /* Update the address values in the desc with the pa value * for respective buffer */ desc_on_ring->params.external.addr_high = cpu_to_le32(upper_32_bits(dma_buff->pa)); desc_on_ring->params.external.addr_low = cpu_to_le32(lower_32_bits(dma_buff->pa)); } /* bump the tail */ iavf_debug(hw, IAVF_DEBUG_AQ_MESSAGE, "AQTX: desc and buffer:\n"); iavf_debug_aq(hw, IAVF_DEBUG_AQ_COMMAND, (void *)desc_on_ring, buff, buff_size); (hw->aq.asq.next_to_use)++; if (hw->aq.asq.next_to_use == hw->aq.asq.count) hw->aq.asq.next_to_use = 0; if (!details->postpone) wr32(hw, hw->aq.asq.tail, hw->aq.asq.next_to_use); /* if cmd_details are not defined or async flag is not set, * we need to wait for desc write back */ if (!details->async && !details->postpone) { u32 total_delay = 0; do { /* AQ designers suggest use of head for better * timing reliability than DD bit */ if (iavf_asq_done(hw)) break; udelay(50); total_delay += 50; } while (total_delay < hw->aq.asq_cmd_timeout); } /* if ready, copy the desc back to temp */ if (iavf_asq_done(hw)) { *desc = *desc_on_ring; if (buff) memcpy(buff, dma_buff->va, buff_size); retval = le16_to_cpu(desc->retval); if (retval != 0) { iavf_debug(hw, IAVF_DEBUG_AQ_MESSAGE, "AQTX: Command completed with error 0x%X.\n", retval); /* strip off FW internal code */ retval &= 0xff; } cmd_completed = true; if ((enum iavf_admin_queue_err)retval == IAVF_AQ_RC_OK) status = 0; else if ((enum iavf_admin_queue_err)retval == IAVF_AQ_RC_EBUSY) status = IAVF_ERR_NOT_READY; else status = IAVF_ERR_ADMIN_QUEUE_ERROR; hw->aq.asq_last_status = (enum iavf_admin_queue_err)retval; } iavf_debug(hw, IAVF_DEBUG_AQ_MESSAGE, "AQTX: desc and buffer writeback:\n"); iavf_debug_aq(hw, IAVF_DEBUG_AQ_COMMAND, (void *)desc, buff, buff_size); /* save writeback aq if requested */ if (details->wb_desc) *details->wb_desc = *desc_on_ring; /* update the error if time out occurred */ if ((!cmd_completed) && (!details->async && !details->postpone)) { if (rd32(hw, hw->aq.asq.len) & IAVF_VF_ATQLEN1_ATQCRIT_MASK) { iavf_debug(hw, IAVF_DEBUG_AQ_MESSAGE, "AQTX: AQ Critical error.\n"); status = IAVF_ERR_ADMIN_QUEUE_CRITICAL_ERROR; } else { iavf_debug(hw, IAVF_DEBUG_AQ_MESSAGE, "AQTX: Writeback timeout.\n"); status = IAVF_ERR_ADMIN_QUEUE_TIMEOUT; } } asq_send_command_error: mutex_unlock(&hw->aq.asq_mutex); return status; } /** * iavf_fill_default_direct_cmd_desc - AQ descriptor helper function * @desc: pointer to the temp descriptor (non DMA mem) * @opcode: the opcode can be used to decide which flags to turn off or on * * Fill the desc with default values **/ void iavf_fill_default_direct_cmd_desc(struct iavf_aq_desc *desc, u16 opcode) { /* zero out the desc */ memset((void *)desc, 0, sizeof(struct iavf_aq_desc)); desc->opcode = cpu_to_le16(opcode); desc->flags = cpu_to_le16(IAVF_AQ_FLAG_SI); } /** * iavf_clean_arq_element * @hw: pointer to the hw struct * @e: event info from the receive descriptor, includes any buffers * @pending: number of events that could be left to process * * This function cleans one Admin Receive Queue element and returns * the contents through e. It can also return how many events are * left to process through 'pending' **/ enum iavf_status iavf_clean_arq_element(struct iavf_hw *hw, struct iavf_arq_event_info *e, u16 *pending) { u16 ntc = hw->aq.arq.next_to_clean; struct iavf_aq_desc *desc; enum iavf_status ret_code = 0; struct iavf_dma_mem *bi; u16 desc_idx; u16 datalen; u16 flags; u16 ntu; /* pre-clean the event info */ memset(&e->desc, 0, sizeof(e->desc)); /* take the lock before we start messing with the ring */ mutex_lock(&hw->aq.arq_mutex); if (hw->aq.arq.count == 0) { iavf_debug(hw, IAVF_DEBUG_AQ_MESSAGE, "AQRX: Admin queue not initialized.\n"); ret_code = IAVF_ERR_QUEUE_EMPTY; goto clean_arq_element_err; } /* set next_to_use to head */ ntu = rd32(hw, hw->aq.arq.head) & IAVF_VF_ARQH1_ARQH_MASK; if (ntu == ntc) { /* nothing to do - shouldn't need to update ring's values */ ret_code = IAVF_ERR_ADMIN_QUEUE_NO_WORK; goto clean_arq_element_out; } /* now clean the next descriptor */ desc = IAVF_ADMINQ_DESC(hw->aq.arq, ntc); desc_idx = ntc; hw->aq.arq_last_status = (enum iavf_admin_queue_err)le16_to_cpu(desc->retval); flags = le16_to_cpu(desc->flags); if (flags & IAVF_AQ_FLAG_ERR) { ret_code = IAVF_ERR_ADMIN_QUEUE_ERROR; iavf_debug(hw, IAVF_DEBUG_AQ_MESSAGE, "AQRX: Event received with error 0x%X.\n", hw->aq.arq_last_status); } e->desc = *desc; datalen = le16_to_cpu(desc->datalen); e->msg_len = min(datalen, e->buf_len); if (e->msg_buf && (e->msg_len != 0)) memcpy(e->msg_buf, hw->aq.arq.r.arq_bi[desc_idx].va, e->msg_len); iavf_debug(hw, IAVF_DEBUG_AQ_MESSAGE, "AQRX: desc and buffer:\n"); iavf_debug_aq(hw, IAVF_DEBUG_AQ_COMMAND, (void *)desc, e->msg_buf, hw->aq.arq_buf_size); /* Restore the original datalen and buffer address in the desc, * FW updates datalen to indicate the event message * size */ bi = &hw->aq.arq.r.arq_bi[ntc]; memset((void *)desc, 0, sizeof(struct iavf_aq_desc)); desc->flags = cpu_to_le16(IAVF_AQ_FLAG_BUF); if (hw->aq.arq_buf_size > IAVF_AQ_LARGE_BUF) desc->flags |= cpu_to_le16(IAVF_AQ_FLAG_LB); desc->datalen = cpu_to_le16((u16)bi->size); desc->params.external.addr_high = cpu_to_le32(upper_32_bits(bi->pa)); desc->params.external.addr_low = cpu_to_le32(lower_32_bits(bi->pa)); /* set tail = the last cleaned desc index. */ wr32(hw, hw->aq.arq.tail, ntc); /* ntc is updated to tail + 1 */ ntc++; if (ntc == hw->aq.num_arq_entries) ntc = 0; hw->aq.arq.next_to_clean = ntc; hw->aq.arq.next_to_use = ntu; clean_arq_element_out: /* Set pending if needed, unlock and return */ if (pending) *pending = (ntc > ntu ? hw->aq.arq.count : 0) + (ntu - ntc); clean_arq_element_err: mutex_unlock(&hw->aq.arq_mutex); return ret_code; }
linux-master
drivers/net/ethernet/intel/iavf/iavf_adminq.c
// SPDX-License-Identifier: GPL-2.0 /* Copyright(c) 2013 - 2018 Intel Corporation. */ /* ethtool support for iavf */ #include "iavf.h" #include <linux/uaccess.h> /* ethtool statistics helpers */ /** * struct iavf_stats - definition for an ethtool statistic * @stat_string: statistic name to display in ethtool -S output * @sizeof_stat: the sizeof() the stat, must be no greater than sizeof(u64) * @stat_offset: offsetof() the stat from a base pointer * * This structure defines a statistic to be added to the ethtool stats buffer. * It defines a statistic as offset from a common base pointer. Stats should * be defined in constant arrays using the IAVF_STAT macro, with every element * of the array using the same _type for calculating the sizeof_stat and * stat_offset. * * The @sizeof_stat is expected to be sizeof(u8), sizeof(u16), sizeof(u32) or * sizeof(u64). Other sizes are not expected and will produce a WARN_ONCE from * the iavf_add_ethtool_stat() helper function. * * The @stat_string is interpreted as a format string, allowing formatted * values to be inserted while looping over multiple structures for a given * statistics array. Thus, every statistic string in an array should have the * same type and number of format specifiers, to be formatted by variadic * arguments to the iavf_add_stat_string() helper function. **/ struct iavf_stats { char stat_string[ETH_GSTRING_LEN]; int sizeof_stat; int stat_offset; }; /* Helper macro to define an iavf_stat structure with proper size and type. * Use this when defining constant statistics arrays. Note that @_type expects * only a type name and is used multiple times. */ #define IAVF_STAT(_type, _name, _stat) { \ .stat_string = _name, \ .sizeof_stat = sizeof_field(_type, _stat), \ .stat_offset = offsetof(_type, _stat) \ } /* Helper macro for defining some statistics related to queues */ #define IAVF_QUEUE_STAT(_name, _stat) \ IAVF_STAT(struct iavf_ring, _name, _stat) /* Stats associated with a Tx or Rx ring */ static const struct iavf_stats iavf_gstrings_queue_stats[] = { IAVF_QUEUE_STAT("%s-%u.packets", stats.packets), IAVF_QUEUE_STAT("%s-%u.bytes", stats.bytes), }; /** * iavf_add_one_ethtool_stat - copy the stat into the supplied buffer * @data: location to store the stat value * @pointer: basis for where to copy from * @stat: the stat definition * * Copies the stat data defined by the pointer and stat structure pair into * the memory supplied as data. Used to implement iavf_add_ethtool_stats and * iavf_add_queue_stats. If the pointer is null, data will be zero'd. */ static void iavf_add_one_ethtool_stat(u64 *data, void *pointer, const struct iavf_stats *stat) { char *p; if (!pointer) { /* ensure that the ethtool data buffer is zero'd for any stats * which don't have a valid pointer. */ *data = 0; return; } p = (char *)pointer + stat->stat_offset; switch (stat->sizeof_stat) { case sizeof(u64): *data = *((u64 *)p); break; case sizeof(u32): *data = *((u32 *)p); break; case sizeof(u16): *data = *((u16 *)p); break; case sizeof(u8): *data = *((u8 *)p); break; default: WARN_ONCE(1, "unexpected stat size for %s", stat->stat_string); *data = 0; } } /** * __iavf_add_ethtool_stats - copy stats into the ethtool supplied buffer * @data: ethtool stats buffer * @pointer: location to copy stats from * @stats: array of stats to copy * @size: the size of the stats definition * * Copy the stats defined by the stats array using the pointer as a base into * the data buffer supplied by ethtool. Updates the data pointer to point to * the next empty location for successive calls to __iavf_add_ethtool_stats. * If pointer is null, set the data values to zero and update the pointer to * skip these stats. **/ static void __iavf_add_ethtool_stats(u64 **data, void *pointer, const struct iavf_stats stats[], const unsigned int size) { unsigned int i; for (i = 0; i < size; i++) iavf_add_one_ethtool_stat((*data)++, pointer, &stats[i]); } /** * iavf_add_ethtool_stats - copy stats into ethtool supplied buffer * @data: ethtool stats buffer * @pointer: location where stats are stored * @stats: static const array of stat definitions * * Macro to ease the use of __iavf_add_ethtool_stats by taking a static * constant stats array and passing the ARRAY_SIZE(). This avoids typos by * ensuring that we pass the size associated with the given stats array. * * The parameter @stats is evaluated twice, so parameters with side effects * should be avoided. **/ #define iavf_add_ethtool_stats(data, pointer, stats) \ __iavf_add_ethtool_stats(data, pointer, stats, ARRAY_SIZE(stats)) /** * iavf_add_queue_stats - copy queue statistics into supplied buffer * @data: ethtool stats buffer * @ring: the ring to copy * * Queue statistics must be copied while protected by * u64_stats_fetch_begin, so we can't directly use iavf_add_ethtool_stats. * Assumes that queue stats are defined in iavf_gstrings_queue_stats. If the * ring pointer is null, zero out the queue stat values and update the data * pointer. Otherwise safely copy the stats from the ring into the supplied * buffer and update the data pointer when finished. * * This function expects to be called while under rcu_read_lock(). **/ static void iavf_add_queue_stats(u64 **data, struct iavf_ring *ring) { const unsigned int size = ARRAY_SIZE(iavf_gstrings_queue_stats); const struct iavf_stats *stats = iavf_gstrings_queue_stats; unsigned int start; unsigned int i; /* To avoid invalid statistics values, ensure that we keep retrying * the copy until we get a consistent value according to * u64_stats_fetch_retry. But first, make sure our ring is * non-null before attempting to access its syncp. */ do { start = !ring ? 0 : u64_stats_fetch_begin(&ring->syncp); for (i = 0; i < size; i++) iavf_add_one_ethtool_stat(&(*data)[i], ring, &stats[i]); } while (ring && u64_stats_fetch_retry(&ring->syncp, start)); /* Once we successfully copy the stats in, update the data pointer */ *data += size; } /** * __iavf_add_stat_strings - copy stat strings into ethtool buffer * @p: ethtool supplied buffer * @stats: stat definitions array * @size: size of the stats array * * Format and copy the strings described by stats into the buffer pointed at * by p. **/ static void __iavf_add_stat_strings(u8 **p, const struct iavf_stats stats[], const unsigned int size, ...) { unsigned int i; for (i = 0; i < size; i++) { va_list args; va_start(args, size); vsnprintf(*p, ETH_GSTRING_LEN, stats[i].stat_string, args); *p += ETH_GSTRING_LEN; va_end(args); } } /** * iavf_add_stat_strings - copy stat strings into ethtool buffer * @p: ethtool supplied buffer * @stats: stat definitions array * * Format and copy the strings described by the const static stats value into * the buffer pointed at by p. * * The parameter @stats is evaluated twice, so parameters with side effects * should be avoided. Additionally, stats must be an array such that * ARRAY_SIZE can be called on it. **/ #define iavf_add_stat_strings(p, stats, ...) \ __iavf_add_stat_strings(p, stats, ARRAY_SIZE(stats), ## __VA_ARGS__) #define VF_STAT(_name, _stat) \ IAVF_STAT(struct iavf_adapter, _name, _stat) static const struct iavf_stats iavf_gstrings_stats[] = { VF_STAT("rx_bytes", current_stats.rx_bytes), VF_STAT("rx_unicast", current_stats.rx_unicast), VF_STAT("rx_multicast", current_stats.rx_multicast), VF_STAT("rx_broadcast", current_stats.rx_broadcast), VF_STAT("rx_discards", current_stats.rx_discards), VF_STAT("rx_unknown_protocol", current_stats.rx_unknown_protocol), VF_STAT("tx_bytes", current_stats.tx_bytes), VF_STAT("tx_unicast", current_stats.tx_unicast), VF_STAT("tx_multicast", current_stats.tx_multicast), VF_STAT("tx_broadcast", current_stats.tx_broadcast), VF_STAT("tx_discards", current_stats.tx_discards), VF_STAT("tx_errors", current_stats.tx_errors), }; #define IAVF_STATS_LEN ARRAY_SIZE(iavf_gstrings_stats) #define IAVF_QUEUE_STATS_LEN ARRAY_SIZE(iavf_gstrings_queue_stats) /* For now we have one and only one private flag and it is only defined * when we have support for the SKIP_CPU_SYNC DMA attribute. Instead * of leaving all this code sitting around empty we will strip it unless * our one private flag is actually available. */ struct iavf_priv_flags { char flag_string[ETH_GSTRING_LEN]; u32 flag; bool read_only; }; #define IAVF_PRIV_FLAG(_name, _flag, _read_only) { \ .flag_string = _name, \ .flag = _flag, \ .read_only = _read_only, \ } static const struct iavf_priv_flags iavf_gstrings_priv_flags[] = { IAVF_PRIV_FLAG("legacy-rx", IAVF_FLAG_LEGACY_RX, 0), }; #define IAVF_PRIV_FLAGS_STR_LEN ARRAY_SIZE(iavf_gstrings_priv_flags) /** * iavf_get_link_ksettings - Get Link Speed and Duplex settings * @netdev: network interface device structure * @cmd: ethtool command * * Reports speed/duplex settings. Because this is a VF, we don't know what * kind of link we really have, so we fake it. **/ static int iavf_get_link_ksettings(struct net_device *netdev, struct ethtool_link_ksettings *cmd) { struct iavf_adapter *adapter = netdev_priv(netdev); ethtool_link_ksettings_zero_link_mode(cmd, supported); cmd->base.autoneg = AUTONEG_DISABLE; cmd->base.port = PORT_NONE; cmd->base.duplex = DUPLEX_FULL; if (ADV_LINK_SUPPORT(adapter)) { if (adapter->link_speed_mbps && adapter->link_speed_mbps < U32_MAX) cmd->base.speed = adapter->link_speed_mbps; else cmd->base.speed = SPEED_UNKNOWN; return 0; } switch (adapter->link_speed) { case VIRTCHNL_LINK_SPEED_40GB: cmd->base.speed = SPEED_40000; break; case VIRTCHNL_LINK_SPEED_25GB: cmd->base.speed = SPEED_25000; break; case VIRTCHNL_LINK_SPEED_20GB: cmd->base.speed = SPEED_20000; break; case VIRTCHNL_LINK_SPEED_10GB: cmd->base.speed = SPEED_10000; break; case VIRTCHNL_LINK_SPEED_5GB: cmd->base.speed = SPEED_5000; break; case VIRTCHNL_LINK_SPEED_2_5GB: cmd->base.speed = SPEED_2500; break; case VIRTCHNL_LINK_SPEED_1GB: cmd->base.speed = SPEED_1000; break; case VIRTCHNL_LINK_SPEED_100MB: cmd->base.speed = SPEED_100; break; default: break; } return 0; } /** * iavf_get_sset_count - Get length of string set * @netdev: network interface device structure * @sset: id of string set * * Reports size of various string tables. **/ static int iavf_get_sset_count(struct net_device *netdev, int sset) { /* Report the maximum number queues, even if not every queue is * currently configured. Since allocation of queues is in pairs, * use netdev->real_num_tx_queues * 2. The real_num_tx_queues is set * at device creation and never changes. */ if (sset == ETH_SS_STATS) return IAVF_STATS_LEN + (IAVF_QUEUE_STATS_LEN * 2 * netdev->real_num_tx_queues); else if (sset == ETH_SS_PRIV_FLAGS) return IAVF_PRIV_FLAGS_STR_LEN; else return -EINVAL; } /** * iavf_get_ethtool_stats - report device statistics * @netdev: network interface device structure * @stats: ethtool statistics structure * @data: pointer to data buffer * * All statistics are added to the data buffer as an array of u64. **/ static void iavf_get_ethtool_stats(struct net_device *netdev, struct ethtool_stats *stats, u64 *data) { struct iavf_adapter *adapter = netdev_priv(netdev); unsigned int i; /* Explicitly request stats refresh */ iavf_schedule_aq_request(adapter, IAVF_FLAG_AQ_REQUEST_STATS); iavf_add_ethtool_stats(&data, adapter, iavf_gstrings_stats); rcu_read_lock(); /* As num_active_queues describe both tx and rx queues, we can use * it to iterate over rings' stats. */ for (i = 0; i < adapter->num_active_queues; i++) { struct iavf_ring *ring; /* Tx rings stats */ ring = &adapter->tx_rings[i]; iavf_add_queue_stats(&data, ring); /* Rx rings stats */ ring = &adapter->rx_rings[i]; iavf_add_queue_stats(&data, ring); } rcu_read_unlock(); } /** * iavf_get_priv_flag_strings - Get private flag strings * @netdev: network interface device structure * @data: buffer for string data * * Builds the private flags string table **/ static void iavf_get_priv_flag_strings(struct net_device *netdev, u8 *data) { unsigned int i; for (i = 0; i < IAVF_PRIV_FLAGS_STR_LEN; i++) { snprintf(data, ETH_GSTRING_LEN, "%s", iavf_gstrings_priv_flags[i].flag_string); data += ETH_GSTRING_LEN; } } /** * iavf_get_stat_strings - Get stat strings * @netdev: network interface device structure * @data: buffer for string data * * Builds the statistics string table **/ static void iavf_get_stat_strings(struct net_device *netdev, u8 *data) { unsigned int i; iavf_add_stat_strings(&data, iavf_gstrings_stats); /* Queues are always allocated in pairs, so we just use * real_num_tx_queues for both Tx and Rx queues. */ for (i = 0; i < netdev->real_num_tx_queues; i++) { iavf_add_stat_strings(&data, iavf_gstrings_queue_stats, "tx", i); iavf_add_stat_strings(&data, iavf_gstrings_queue_stats, "rx", i); } } /** * iavf_get_strings - Get string set * @netdev: network interface device structure * @sset: id of string set * @data: buffer for string data * * Builds string tables for various string sets **/ static void iavf_get_strings(struct net_device *netdev, u32 sset, u8 *data) { switch (sset) { case ETH_SS_STATS: iavf_get_stat_strings(netdev, data); break; case ETH_SS_PRIV_FLAGS: iavf_get_priv_flag_strings(netdev, data); break; default: break; } } /** * iavf_get_priv_flags - report device private flags * @netdev: network interface device structure * * The get string set count and the string set should be matched for each * flag returned. Add new strings for each flag to the iavf_gstrings_priv_flags * array. * * Returns a u32 bitmap of flags. **/ static u32 iavf_get_priv_flags(struct net_device *netdev) { struct iavf_adapter *adapter = netdev_priv(netdev); u32 i, ret_flags = 0; for (i = 0; i < IAVF_PRIV_FLAGS_STR_LEN; i++) { const struct iavf_priv_flags *priv_flags; priv_flags = &iavf_gstrings_priv_flags[i]; if (priv_flags->flag & adapter->flags) ret_flags |= BIT(i); } return ret_flags; } /** * iavf_set_priv_flags - set private flags * @netdev: network interface device structure * @flags: bit flags to be set **/ static int iavf_set_priv_flags(struct net_device *netdev, u32 flags) { struct iavf_adapter *adapter = netdev_priv(netdev); u32 orig_flags, new_flags, changed_flags; int ret = 0; u32 i; orig_flags = READ_ONCE(adapter->flags); new_flags = orig_flags; for (i = 0; i < IAVF_PRIV_FLAGS_STR_LEN; i++) { const struct iavf_priv_flags *priv_flags; priv_flags = &iavf_gstrings_priv_flags[i]; if (flags & BIT(i)) new_flags |= priv_flags->flag; else new_flags &= ~(priv_flags->flag); if (priv_flags->read_only && ((orig_flags ^ new_flags) & ~BIT(i))) return -EOPNOTSUPP; } /* Before we finalize any flag changes, any checks which we need to * perform to determine if the new flags will be supported should go * here... */ /* Compare and exchange the new flags into place. If we failed, that * is if cmpxchg returns anything but the old value, this means * something else must have modified the flags variable since we * copied it. We'll just punt with an error and log something in the * message buffer. */ if (cmpxchg(&adapter->flags, orig_flags, new_flags) != orig_flags) { dev_warn(&adapter->pdev->dev, "Unable to update adapter->flags as it was modified by another thread...\n"); return -EAGAIN; } changed_flags = orig_flags ^ new_flags; /* Process any additional changes needed as a result of flag changes. * The changed_flags value reflects the list of bits that were changed * in the code above. */ /* issue a reset to force legacy-rx change to take effect */ if (changed_flags & IAVF_FLAG_LEGACY_RX) { if (netif_running(netdev)) { iavf_schedule_reset(adapter, IAVF_FLAG_RESET_NEEDED); ret = iavf_wait_for_reset(adapter); if (ret) netdev_warn(netdev, "Changing private flags timeout or interrupted waiting for reset"); } } return ret; } /** * iavf_get_msglevel - Get debug message level * @netdev: network interface device structure * * Returns current debug message level. **/ static u32 iavf_get_msglevel(struct net_device *netdev) { struct iavf_adapter *adapter = netdev_priv(netdev); return adapter->msg_enable; } /** * iavf_set_msglevel - Set debug message level * @netdev: network interface device structure * @data: message level * * Set current debug message level. Higher values cause the driver to * be noisier. **/ static void iavf_set_msglevel(struct net_device *netdev, u32 data) { struct iavf_adapter *adapter = netdev_priv(netdev); if (IAVF_DEBUG_USER & data) adapter->hw.debug_mask = data; adapter->msg_enable = data; } /** * iavf_get_drvinfo - Get driver info * @netdev: network interface device structure * @drvinfo: ethool driver info structure * * Returns information about the driver and device for display to the user. **/ static void iavf_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo) { struct iavf_adapter *adapter = netdev_priv(netdev); strscpy(drvinfo->driver, iavf_driver_name, 32); strscpy(drvinfo->fw_version, "N/A", 4); strscpy(drvinfo->bus_info, pci_name(adapter->pdev), 32); drvinfo->n_priv_flags = IAVF_PRIV_FLAGS_STR_LEN; } /** * iavf_get_ringparam - Get ring parameters * @netdev: network interface device structure * @ring: ethtool ringparam structure * @kernel_ring: ethtool extenal ringparam structure * @extack: netlink extended ACK report struct * * Returns current ring parameters. TX and RX rings are reported separately, * but the number of rings is not reported. **/ static void iavf_get_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring, struct kernel_ethtool_ringparam *kernel_ring, struct netlink_ext_ack *extack) { struct iavf_adapter *adapter = netdev_priv(netdev); ring->rx_max_pending = IAVF_MAX_RXD; ring->tx_max_pending = IAVF_MAX_TXD; ring->rx_pending = adapter->rx_desc_count; ring->tx_pending = adapter->tx_desc_count; } /** * iavf_set_ringparam - Set ring parameters * @netdev: network interface device structure * @ring: ethtool ringparam structure * @kernel_ring: ethtool external ringparam structure * @extack: netlink extended ACK report struct * * Sets ring parameters. TX and RX rings are controlled separately, but the * number of rings is not specified, so all rings get the same settings. **/ static int iavf_set_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring, struct kernel_ethtool_ringparam *kernel_ring, struct netlink_ext_ack *extack) { struct iavf_adapter *adapter = netdev_priv(netdev); u32 new_rx_count, new_tx_count; int ret = 0; if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending)) return -EINVAL; if (ring->tx_pending > IAVF_MAX_TXD || ring->tx_pending < IAVF_MIN_TXD || ring->rx_pending > IAVF_MAX_RXD || ring->rx_pending < IAVF_MIN_RXD) { netdev_err(netdev, "Descriptors requested (Tx: %d / Rx: %d) out of range [%d-%d] (increment %d)\n", ring->tx_pending, ring->rx_pending, IAVF_MIN_TXD, IAVF_MAX_RXD, IAVF_REQ_DESCRIPTOR_MULTIPLE); return -EINVAL; } new_tx_count = ALIGN(ring->tx_pending, IAVF_REQ_DESCRIPTOR_MULTIPLE); if (new_tx_count != ring->tx_pending) netdev_info(netdev, "Requested Tx descriptor count rounded up to %d\n", new_tx_count); new_rx_count = ALIGN(ring->rx_pending, IAVF_REQ_DESCRIPTOR_MULTIPLE); if (new_rx_count != ring->rx_pending) netdev_info(netdev, "Requested Rx descriptor count rounded up to %d\n", new_rx_count); /* if nothing to do return success */ if ((new_tx_count == adapter->tx_desc_count) && (new_rx_count == adapter->rx_desc_count)) { netdev_dbg(netdev, "Nothing to change, descriptor count is same as requested\n"); return 0; } if (new_tx_count != adapter->tx_desc_count) { netdev_dbg(netdev, "Changing Tx descriptor count from %d to %d\n", adapter->tx_desc_count, new_tx_count); adapter->tx_desc_count = new_tx_count; } if (new_rx_count != adapter->rx_desc_count) { netdev_dbg(netdev, "Changing Rx descriptor count from %d to %d\n", adapter->rx_desc_count, new_rx_count); adapter->rx_desc_count = new_rx_count; } if (netif_running(netdev)) { iavf_schedule_reset(adapter, IAVF_FLAG_RESET_NEEDED); ret = iavf_wait_for_reset(adapter); if (ret) netdev_warn(netdev, "Changing ring parameters timeout or interrupted waiting for reset"); } return ret; } /** * __iavf_get_coalesce - get per-queue coalesce settings * @netdev: the netdev to check * @ec: ethtool coalesce data structure * @queue: which queue to pick * * Gets the per-queue settings for coalescence. Specifically Rx and Tx usecs * are per queue. If queue is <0 then we default to queue 0 as the * representative value. **/ static int __iavf_get_coalesce(struct net_device *netdev, struct ethtool_coalesce *ec, int queue) { struct iavf_adapter *adapter = netdev_priv(netdev); struct iavf_ring *rx_ring, *tx_ring; /* Rx and Tx usecs per queue value. If user doesn't specify the * queue, return queue 0's value to represent. */ if (queue < 0) queue = 0; else if (queue >= adapter->num_active_queues) return -EINVAL; rx_ring = &adapter->rx_rings[queue]; tx_ring = &adapter->tx_rings[queue]; if (ITR_IS_DYNAMIC(rx_ring->itr_setting)) ec->use_adaptive_rx_coalesce = 1; if (ITR_IS_DYNAMIC(tx_ring->itr_setting)) ec->use_adaptive_tx_coalesce = 1; ec->rx_coalesce_usecs = rx_ring->itr_setting & ~IAVF_ITR_DYNAMIC; ec->tx_coalesce_usecs = tx_ring->itr_setting & ~IAVF_ITR_DYNAMIC; return 0; } /** * iavf_get_coalesce - Get interrupt coalescing settings * @netdev: network interface device structure * @ec: ethtool coalesce structure * @kernel_coal: ethtool CQE mode setting structure * @extack: extack for reporting error messages * * Returns current coalescing settings. This is referred to elsewhere in the * driver as Interrupt Throttle Rate, as this is how the hardware describes * this functionality. Note that if per-queue settings have been modified this * only represents the settings of queue 0. **/ static int iavf_get_coalesce(struct net_device *netdev, struct ethtool_coalesce *ec, struct kernel_ethtool_coalesce *kernel_coal, struct netlink_ext_ack *extack) { return __iavf_get_coalesce(netdev, ec, -1); } /** * iavf_get_per_queue_coalesce - get coalesce values for specific queue * @netdev: netdev to read * @ec: coalesce settings from ethtool * @queue: the queue to read * * Read specific queue's coalesce settings. **/ static int iavf_get_per_queue_coalesce(struct net_device *netdev, u32 queue, struct ethtool_coalesce *ec) { return __iavf_get_coalesce(netdev, ec, queue); } /** * iavf_set_itr_per_queue - set ITR values for specific queue * @adapter: the VF adapter struct to set values for * @ec: coalesce settings from ethtool * @queue: the queue to modify * * Change the ITR settings for a specific queue. **/ static int iavf_set_itr_per_queue(struct iavf_adapter *adapter, struct ethtool_coalesce *ec, int queue) { struct iavf_ring *rx_ring = &adapter->rx_rings[queue]; struct iavf_ring *tx_ring = &adapter->tx_rings[queue]; struct iavf_q_vector *q_vector; u16 itr_setting; itr_setting = rx_ring->itr_setting & ~IAVF_ITR_DYNAMIC; if (ec->rx_coalesce_usecs != itr_setting && ec->use_adaptive_rx_coalesce) { netif_info(adapter, drv, adapter->netdev, "Rx interrupt throttling cannot be changed if adaptive-rx is enabled\n"); return -EINVAL; } itr_setting = tx_ring->itr_setting & ~IAVF_ITR_DYNAMIC; if (ec->tx_coalesce_usecs != itr_setting && ec->use_adaptive_tx_coalesce) { netif_info(adapter, drv, adapter->netdev, "Tx interrupt throttling cannot be changed if adaptive-tx is enabled\n"); return -EINVAL; } rx_ring->itr_setting = ITR_REG_ALIGN(ec->rx_coalesce_usecs); tx_ring->itr_setting = ITR_REG_ALIGN(ec->tx_coalesce_usecs); rx_ring->itr_setting |= IAVF_ITR_DYNAMIC; if (!ec->use_adaptive_rx_coalesce) rx_ring->itr_setting ^= IAVF_ITR_DYNAMIC; tx_ring->itr_setting |= IAVF_ITR_DYNAMIC; if (!ec->use_adaptive_tx_coalesce) tx_ring->itr_setting ^= IAVF_ITR_DYNAMIC; q_vector = rx_ring->q_vector; q_vector->rx.target_itr = ITR_TO_REG(rx_ring->itr_setting); q_vector = tx_ring->q_vector; q_vector->tx.target_itr = ITR_TO_REG(tx_ring->itr_setting); /* The interrupt handler itself will take care of programming * the Tx and Rx ITR values based on the values we have entered * into the q_vector, no need to write the values now. */ return 0; } /** * __iavf_set_coalesce - set coalesce settings for particular queue * @netdev: the netdev to change * @ec: ethtool coalesce settings * @queue: the queue to change * * Sets the coalesce settings for a particular queue. **/ static int __iavf_set_coalesce(struct net_device *netdev, struct ethtool_coalesce *ec, int queue) { struct iavf_adapter *adapter = netdev_priv(netdev); int i; if (ec->rx_coalesce_usecs == 0) { if (ec->use_adaptive_rx_coalesce) netif_info(adapter, drv, netdev, "rx-usecs=0, need to disable adaptive-rx for a complete disable\n"); } else if ((ec->rx_coalesce_usecs < IAVF_MIN_ITR) || (ec->rx_coalesce_usecs > IAVF_MAX_ITR)) { netif_info(adapter, drv, netdev, "Invalid value, rx-usecs range is 0-8160\n"); return -EINVAL; } else if (ec->tx_coalesce_usecs == 0) { if (ec->use_adaptive_tx_coalesce) netif_info(adapter, drv, netdev, "tx-usecs=0, need to disable adaptive-tx for a complete disable\n"); } else if ((ec->tx_coalesce_usecs < IAVF_MIN_ITR) || (ec->tx_coalesce_usecs > IAVF_MAX_ITR)) { netif_info(adapter, drv, netdev, "Invalid value, tx-usecs range is 0-8160\n"); return -EINVAL; } /* Rx and Tx usecs has per queue value. If user doesn't specify the * queue, apply to all queues. */ if (queue < 0) { for (i = 0; i < adapter->num_active_queues; i++) if (iavf_set_itr_per_queue(adapter, ec, i)) return -EINVAL; } else if (queue < adapter->num_active_queues) { if (iavf_set_itr_per_queue(adapter, ec, queue)) return -EINVAL; } else { netif_info(adapter, drv, netdev, "Invalid queue value, queue range is 0 - %d\n", adapter->num_active_queues - 1); return -EINVAL; } return 0; } /** * iavf_set_coalesce - Set interrupt coalescing settings * @netdev: network interface device structure * @ec: ethtool coalesce structure * @kernel_coal: ethtool CQE mode setting structure * @extack: extack for reporting error messages * * Change current coalescing settings for every queue. **/ static int iavf_set_coalesce(struct net_device *netdev, struct ethtool_coalesce *ec, struct kernel_ethtool_coalesce *kernel_coal, struct netlink_ext_ack *extack) { return __iavf_set_coalesce(netdev, ec, -1); } /** * iavf_set_per_queue_coalesce - set specific queue's coalesce settings * @netdev: the netdev to change * @ec: ethtool's coalesce settings * @queue: the queue to modify * * Modifies a specific queue's coalesce settings. */ static int iavf_set_per_queue_coalesce(struct net_device *netdev, u32 queue, struct ethtool_coalesce *ec) { return __iavf_set_coalesce(netdev, ec, queue); } /** * iavf_fltr_to_ethtool_flow - convert filter type values to ethtool * flow type values * @flow: filter type to be converted * * Returns the corresponding ethtool flow type. */ static int iavf_fltr_to_ethtool_flow(enum iavf_fdir_flow_type flow) { switch (flow) { case IAVF_FDIR_FLOW_IPV4_TCP: return TCP_V4_FLOW; case IAVF_FDIR_FLOW_IPV4_UDP: return UDP_V4_FLOW; case IAVF_FDIR_FLOW_IPV4_SCTP: return SCTP_V4_FLOW; case IAVF_FDIR_FLOW_IPV4_AH: return AH_V4_FLOW; case IAVF_FDIR_FLOW_IPV4_ESP: return ESP_V4_FLOW; case IAVF_FDIR_FLOW_IPV4_OTHER: return IPV4_USER_FLOW; case IAVF_FDIR_FLOW_IPV6_TCP: return TCP_V6_FLOW; case IAVF_FDIR_FLOW_IPV6_UDP: return UDP_V6_FLOW; case IAVF_FDIR_FLOW_IPV6_SCTP: return SCTP_V6_FLOW; case IAVF_FDIR_FLOW_IPV6_AH: return AH_V6_FLOW; case IAVF_FDIR_FLOW_IPV6_ESP: return ESP_V6_FLOW; case IAVF_FDIR_FLOW_IPV6_OTHER: return IPV6_USER_FLOW; case IAVF_FDIR_FLOW_NON_IP_L2: return ETHER_FLOW; default: /* 0 is undefined ethtool flow */ return 0; } } /** * iavf_ethtool_flow_to_fltr - convert ethtool flow type to filter enum * @eth: Ethtool flow type to be converted * * Returns flow enum */ static enum iavf_fdir_flow_type iavf_ethtool_flow_to_fltr(int eth) { switch (eth) { case TCP_V4_FLOW: return IAVF_FDIR_FLOW_IPV4_TCP; case UDP_V4_FLOW: return IAVF_FDIR_FLOW_IPV4_UDP; case SCTP_V4_FLOW: return IAVF_FDIR_FLOW_IPV4_SCTP; case AH_V4_FLOW: return IAVF_FDIR_FLOW_IPV4_AH; case ESP_V4_FLOW: return IAVF_FDIR_FLOW_IPV4_ESP; case IPV4_USER_FLOW: return IAVF_FDIR_FLOW_IPV4_OTHER; case TCP_V6_FLOW: return IAVF_FDIR_FLOW_IPV6_TCP; case UDP_V6_FLOW: return IAVF_FDIR_FLOW_IPV6_UDP; case SCTP_V6_FLOW: return IAVF_FDIR_FLOW_IPV6_SCTP; case AH_V6_FLOW: return IAVF_FDIR_FLOW_IPV6_AH; case ESP_V6_FLOW: return IAVF_FDIR_FLOW_IPV6_ESP; case IPV6_USER_FLOW: return IAVF_FDIR_FLOW_IPV6_OTHER; case ETHER_FLOW: return IAVF_FDIR_FLOW_NON_IP_L2; default: return IAVF_FDIR_FLOW_NONE; } } /** * iavf_is_mask_valid - check mask field set * @mask: full mask to check * @field: field for which mask should be valid * * If the mask is fully set return true. If it is not valid for field return * false. */ static bool iavf_is_mask_valid(u64 mask, u64 field) { return (mask & field) == field; } /** * iavf_parse_rx_flow_user_data - deconstruct user-defined data * @fsp: pointer to ethtool Rx flow specification * @fltr: pointer to Flow Director filter for userdef data storage * * Returns 0 on success, negative error value on failure */ static int iavf_parse_rx_flow_user_data(struct ethtool_rx_flow_spec *fsp, struct iavf_fdir_fltr *fltr) { struct iavf_flex_word *flex; int i, cnt = 0; if (!(fsp->flow_type & FLOW_EXT)) return 0; for (i = 0; i < IAVF_FLEX_WORD_NUM; i++) { #define IAVF_USERDEF_FLEX_WORD_M GENMASK(15, 0) #define IAVF_USERDEF_FLEX_OFFS_S 16 #define IAVF_USERDEF_FLEX_OFFS_M GENMASK(31, IAVF_USERDEF_FLEX_OFFS_S) #define IAVF_USERDEF_FLEX_FLTR_M GENMASK(31, 0) u32 value = be32_to_cpu(fsp->h_ext.data[i]); u32 mask = be32_to_cpu(fsp->m_ext.data[i]); if (!value || !mask) continue; if (!iavf_is_mask_valid(mask, IAVF_USERDEF_FLEX_FLTR_M)) return -EINVAL; /* 504 is the maximum value for offsets, and offset is measured * from the start of the MAC address. */ #define IAVF_USERDEF_FLEX_MAX_OFFS_VAL 504 flex = &fltr->flex_words[cnt++]; flex->word = value & IAVF_USERDEF_FLEX_WORD_M; flex->offset = (value & IAVF_USERDEF_FLEX_OFFS_M) >> IAVF_USERDEF_FLEX_OFFS_S; if (flex->offset > IAVF_USERDEF_FLEX_MAX_OFFS_VAL) return -EINVAL; } fltr->flex_cnt = cnt; return 0; } /** * iavf_fill_rx_flow_ext_data - fill the additional data * @fsp: pointer to ethtool Rx flow specification * @fltr: pointer to Flow Director filter to get additional data */ static void iavf_fill_rx_flow_ext_data(struct ethtool_rx_flow_spec *fsp, struct iavf_fdir_fltr *fltr) { if (!fltr->ext_mask.usr_def[0] && !fltr->ext_mask.usr_def[1]) return; fsp->flow_type |= FLOW_EXT; memcpy(fsp->h_ext.data, fltr->ext_data.usr_def, sizeof(fsp->h_ext.data)); memcpy(fsp->m_ext.data, fltr->ext_mask.usr_def, sizeof(fsp->m_ext.data)); } /** * iavf_get_ethtool_fdir_entry - fill ethtool structure with Flow Director filter data * @adapter: the VF adapter structure that contains filter list * @cmd: ethtool command data structure to receive the filter data * * Returns 0 as expected for success by ethtool */ static int iavf_get_ethtool_fdir_entry(struct iavf_adapter *adapter, struct ethtool_rxnfc *cmd) { struct ethtool_rx_flow_spec *fsp = (struct ethtool_rx_flow_spec *)&cmd->fs; struct iavf_fdir_fltr *rule = NULL; int ret = 0; if (!FDIR_FLTR_SUPPORT(adapter)) return -EOPNOTSUPP; spin_lock_bh(&adapter->fdir_fltr_lock); rule = iavf_find_fdir_fltr_by_loc(adapter, fsp->location); if (!rule) { ret = -EINVAL; goto release_lock; } fsp->flow_type = iavf_fltr_to_ethtool_flow(rule->flow_type); memset(&fsp->m_u, 0, sizeof(fsp->m_u)); memset(&fsp->m_ext, 0, sizeof(fsp->m_ext)); switch (fsp->flow_type) { case TCP_V4_FLOW: case UDP_V4_FLOW: case SCTP_V4_FLOW: fsp->h_u.tcp_ip4_spec.ip4src = rule->ip_data.v4_addrs.src_ip; fsp->h_u.tcp_ip4_spec.ip4dst = rule->ip_data.v4_addrs.dst_ip; fsp->h_u.tcp_ip4_spec.psrc = rule->ip_data.src_port; fsp->h_u.tcp_ip4_spec.pdst = rule->ip_data.dst_port; fsp->h_u.tcp_ip4_spec.tos = rule->ip_data.tos; fsp->m_u.tcp_ip4_spec.ip4src = rule->ip_mask.v4_addrs.src_ip; fsp->m_u.tcp_ip4_spec.ip4dst = rule->ip_mask.v4_addrs.dst_ip; fsp->m_u.tcp_ip4_spec.psrc = rule->ip_mask.src_port; fsp->m_u.tcp_ip4_spec.pdst = rule->ip_mask.dst_port; fsp->m_u.tcp_ip4_spec.tos = rule->ip_mask.tos; break; case AH_V4_FLOW: case ESP_V4_FLOW: fsp->h_u.ah_ip4_spec.ip4src = rule->ip_data.v4_addrs.src_ip; fsp->h_u.ah_ip4_spec.ip4dst = rule->ip_data.v4_addrs.dst_ip; fsp->h_u.ah_ip4_spec.spi = rule->ip_data.spi; fsp->h_u.ah_ip4_spec.tos = rule->ip_data.tos; fsp->m_u.ah_ip4_spec.ip4src = rule->ip_mask.v4_addrs.src_ip; fsp->m_u.ah_ip4_spec.ip4dst = rule->ip_mask.v4_addrs.dst_ip; fsp->m_u.ah_ip4_spec.spi = rule->ip_mask.spi; fsp->m_u.ah_ip4_spec.tos = rule->ip_mask.tos; break; case IPV4_USER_FLOW: fsp->h_u.usr_ip4_spec.ip4src = rule->ip_data.v4_addrs.src_ip; fsp->h_u.usr_ip4_spec.ip4dst = rule->ip_data.v4_addrs.dst_ip; fsp->h_u.usr_ip4_spec.l4_4_bytes = rule->ip_data.l4_header; fsp->h_u.usr_ip4_spec.tos = rule->ip_data.tos; fsp->h_u.usr_ip4_spec.ip_ver = ETH_RX_NFC_IP4; fsp->h_u.usr_ip4_spec.proto = rule->ip_data.proto; fsp->m_u.usr_ip4_spec.ip4src = rule->ip_mask.v4_addrs.src_ip; fsp->m_u.usr_ip4_spec.ip4dst = rule->ip_mask.v4_addrs.dst_ip; fsp->m_u.usr_ip4_spec.l4_4_bytes = rule->ip_mask.l4_header; fsp->m_u.usr_ip4_spec.tos = rule->ip_mask.tos; fsp->m_u.usr_ip4_spec.ip_ver = 0xFF; fsp->m_u.usr_ip4_spec.proto = rule->ip_mask.proto; break; case TCP_V6_FLOW: case UDP_V6_FLOW: case SCTP_V6_FLOW: memcpy(fsp->h_u.usr_ip6_spec.ip6src, &rule->ip_data.v6_addrs.src_ip, sizeof(struct in6_addr)); memcpy(fsp->h_u.usr_ip6_spec.ip6dst, &rule->ip_data.v6_addrs.dst_ip, sizeof(struct in6_addr)); fsp->h_u.tcp_ip6_spec.psrc = rule->ip_data.src_port; fsp->h_u.tcp_ip6_spec.pdst = rule->ip_data.dst_port; fsp->h_u.tcp_ip6_spec.tclass = rule->ip_data.tclass; memcpy(fsp->m_u.usr_ip6_spec.ip6src, &rule->ip_mask.v6_addrs.src_ip, sizeof(struct in6_addr)); memcpy(fsp->m_u.usr_ip6_spec.ip6dst, &rule->ip_mask.v6_addrs.dst_ip, sizeof(struct in6_addr)); fsp->m_u.tcp_ip6_spec.psrc = rule->ip_mask.src_port; fsp->m_u.tcp_ip6_spec.pdst = rule->ip_mask.dst_port; fsp->m_u.tcp_ip6_spec.tclass = rule->ip_mask.tclass; break; case AH_V6_FLOW: case ESP_V6_FLOW: memcpy(fsp->h_u.ah_ip6_spec.ip6src, &rule->ip_data.v6_addrs.src_ip, sizeof(struct in6_addr)); memcpy(fsp->h_u.ah_ip6_spec.ip6dst, &rule->ip_data.v6_addrs.dst_ip, sizeof(struct in6_addr)); fsp->h_u.ah_ip6_spec.spi = rule->ip_data.spi; fsp->h_u.ah_ip6_spec.tclass = rule->ip_data.tclass; memcpy(fsp->m_u.ah_ip6_spec.ip6src, &rule->ip_mask.v6_addrs.src_ip, sizeof(struct in6_addr)); memcpy(fsp->m_u.ah_ip6_spec.ip6dst, &rule->ip_mask.v6_addrs.dst_ip, sizeof(struct in6_addr)); fsp->m_u.ah_ip6_spec.spi = rule->ip_mask.spi; fsp->m_u.ah_ip6_spec.tclass = rule->ip_mask.tclass; break; case IPV6_USER_FLOW: memcpy(fsp->h_u.usr_ip6_spec.ip6src, &rule->ip_data.v6_addrs.src_ip, sizeof(struct in6_addr)); memcpy(fsp->h_u.usr_ip6_spec.ip6dst, &rule->ip_data.v6_addrs.dst_ip, sizeof(struct in6_addr)); fsp->h_u.usr_ip6_spec.l4_4_bytes = rule->ip_data.l4_header; fsp->h_u.usr_ip6_spec.tclass = rule->ip_data.tclass; fsp->h_u.usr_ip6_spec.l4_proto = rule->ip_data.proto; memcpy(fsp->m_u.usr_ip6_spec.ip6src, &rule->ip_mask.v6_addrs.src_ip, sizeof(struct in6_addr)); memcpy(fsp->m_u.usr_ip6_spec.ip6dst, &rule->ip_mask.v6_addrs.dst_ip, sizeof(struct in6_addr)); fsp->m_u.usr_ip6_spec.l4_4_bytes = rule->ip_mask.l4_header; fsp->m_u.usr_ip6_spec.tclass = rule->ip_mask.tclass; fsp->m_u.usr_ip6_spec.l4_proto = rule->ip_mask.proto; break; case ETHER_FLOW: fsp->h_u.ether_spec.h_proto = rule->eth_data.etype; fsp->m_u.ether_spec.h_proto = rule->eth_mask.etype; break; default: ret = -EINVAL; break; } iavf_fill_rx_flow_ext_data(fsp, rule); if (rule->action == VIRTCHNL_ACTION_DROP) fsp->ring_cookie = RX_CLS_FLOW_DISC; else fsp->ring_cookie = rule->q_index; release_lock: spin_unlock_bh(&adapter->fdir_fltr_lock); return ret; } /** * iavf_get_fdir_fltr_ids - fill buffer with filter IDs of active filters * @adapter: the VF adapter structure containing the filter list * @cmd: ethtool command data structure * @rule_locs: ethtool array passed in from OS to receive filter IDs * * Returns 0 as expected for success by ethtool */ static int iavf_get_fdir_fltr_ids(struct iavf_adapter *adapter, struct ethtool_rxnfc *cmd, u32 *rule_locs) { struct iavf_fdir_fltr *fltr; unsigned int cnt = 0; int val = 0; if (!FDIR_FLTR_SUPPORT(adapter)) return -EOPNOTSUPP; cmd->data = IAVF_MAX_FDIR_FILTERS; spin_lock_bh(&adapter->fdir_fltr_lock); list_for_each_entry(fltr, &adapter->fdir_list_head, list) { if (cnt == cmd->rule_cnt) { val = -EMSGSIZE; goto release_lock; } rule_locs[cnt] = fltr->loc; cnt++; } release_lock: spin_unlock_bh(&adapter->fdir_fltr_lock); if (!val) cmd->rule_cnt = cnt; return val; } /** * iavf_add_fdir_fltr_info - Set the input set for Flow Director filter * @adapter: pointer to the VF adapter structure * @fsp: pointer to ethtool Rx flow specification * @fltr: filter structure */ static int iavf_add_fdir_fltr_info(struct iavf_adapter *adapter, struct ethtool_rx_flow_spec *fsp, struct iavf_fdir_fltr *fltr) { u32 flow_type, q_index = 0; enum virtchnl_action act; int err; if (fsp->ring_cookie == RX_CLS_FLOW_DISC) { act = VIRTCHNL_ACTION_DROP; } else { q_index = fsp->ring_cookie; if (q_index >= adapter->num_active_queues) return -EINVAL; act = VIRTCHNL_ACTION_QUEUE; } fltr->action = act; fltr->loc = fsp->location; fltr->q_index = q_index; if (fsp->flow_type & FLOW_EXT) { memcpy(fltr->ext_data.usr_def, fsp->h_ext.data, sizeof(fltr->ext_data.usr_def)); memcpy(fltr->ext_mask.usr_def, fsp->m_ext.data, sizeof(fltr->ext_mask.usr_def)); } flow_type = fsp->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT | FLOW_RSS); fltr->flow_type = iavf_ethtool_flow_to_fltr(flow_type); switch (flow_type) { case TCP_V4_FLOW: case UDP_V4_FLOW: case SCTP_V4_FLOW: fltr->ip_data.v4_addrs.src_ip = fsp->h_u.tcp_ip4_spec.ip4src; fltr->ip_data.v4_addrs.dst_ip = fsp->h_u.tcp_ip4_spec.ip4dst; fltr->ip_data.src_port = fsp->h_u.tcp_ip4_spec.psrc; fltr->ip_data.dst_port = fsp->h_u.tcp_ip4_spec.pdst; fltr->ip_data.tos = fsp->h_u.tcp_ip4_spec.tos; fltr->ip_mask.v4_addrs.src_ip = fsp->m_u.tcp_ip4_spec.ip4src; fltr->ip_mask.v4_addrs.dst_ip = fsp->m_u.tcp_ip4_spec.ip4dst; fltr->ip_mask.src_port = fsp->m_u.tcp_ip4_spec.psrc; fltr->ip_mask.dst_port = fsp->m_u.tcp_ip4_spec.pdst; fltr->ip_mask.tos = fsp->m_u.tcp_ip4_spec.tos; fltr->ip_ver = 4; break; case AH_V4_FLOW: case ESP_V4_FLOW: fltr->ip_data.v4_addrs.src_ip = fsp->h_u.ah_ip4_spec.ip4src; fltr->ip_data.v4_addrs.dst_ip = fsp->h_u.ah_ip4_spec.ip4dst; fltr->ip_data.spi = fsp->h_u.ah_ip4_spec.spi; fltr->ip_data.tos = fsp->h_u.ah_ip4_spec.tos; fltr->ip_mask.v4_addrs.src_ip = fsp->m_u.ah_ip4_spec.ip4src; fltr->ip_mask.v4_addrs.dst_ip = fsp->m_u.ah_ip4_spec.ip4dst; fltr->ip_mask.spi = fsp->m_u.ah_ip4_spec.spi; fltr->ip_mask.tos = fsp->m_u.ah_ip4_spec.tos; fltr->ip_ver = 4; break; case IPV4_USER_FLOW: fltr->ip_data.v4_addrs.src_ip = fsp->h_u.usr_ip4_spec.ip4src; fltr->ip_data.v4_addrs.dst_ip = fsp->h_u.usr_ip4_spec.ip4dst; fltr->ip_data.l4_header = fsp->h_u.usr_ip4_spec.l4_4_bytes; fltr->ip_data.tos = fsp->h_u.usr_ip4_spec.tos; fltr->ip_data.proto = fsp->h_u.usr_ip4_spec.proto; fltr->ip_mask.v4_addrs.src_ip = fsp->m_u.usr_ip4_spec.ip4src; fltr->ip_mask.v4_addrs.dst_ip = fsp->m_u.usr_ip4_spec.ip4dst; fltr->ip_mask.l4_header = fsp->m_u.usr_ip4_spec.l4_4_bytes; fltr->ip_mask.tos = fsp->m_u.usr_ip4_spec.tos; fltr->ip_mask.proto = fsp->m_u.usr_ip4_spec.proto; fltr->ip_ver = 4; break; case TCP_V6_FLOW: case UDP_V6_FLOW: case SCTP_V6_FLOW: memcpy(&fltr->ip_data.v6_addrs.src_ip, fsp->h_u.usr_ip6_spec.ip6src, sizeof(struct in6_addr)); memcpy(&fltr->ip_data.v6_addrs.dst_ip, fsp->h_u.usr_ip6_spec.ip6dst, sizeof(struct in6_addr)); fltr->ip_data.src_port = fsp->h_u.tcp_ip6_spec.psrc; fltr->ip_data.dst_port = fsp->h_u.tcp_ip6_spec.pdst; fltr->ip_data.tclass = fsp->h_u.tcp_ip6_spec.tclass; memcpy(&fltr->ip_mask.v6_addrs.src_ip, fsp->m_u.usr_ip6_spec.ip6src, sizeof(struct in6_addr)); memcpy(&fltr->ip_mask.v6_addrs.dst_ip, fsp->m_u.usr_ip6_spec.ip6dst, sizeof(struct in6_addr)); fltr->ip_mask.src_port = fsp->m_u.tcp_ip6_spec.psrc; fltr->ip_mask.dst_port = fsp->m_u.tcp_ip6_spec.pdst; fltr->ip_mask.tclass = fsp->m_u.tcp_ip6_spec.tclass; fltr->ip_ver = 6; break; case AH_V6_FLOW: case ESP_V6_FLOW: memcpy(&fltr->ip_data.v6_addrs.src_ip, fsp->h_u.ah_ip6_spec.ip6src, sizeof(struct in6_addr)); memcpy(&fltr->ip_data.v6_addrs.dst_ip, fsp->h_u.ah_ip6_spec.ip6dst, sizeof(struct in6_addr)); fltr->ip_data.spi = fsp->h_u.ah_ip6_spec.spi; fltr->ip_data.tclass = fsp->h_u.ah_ip6_spec.tclass; memcpy(&fltr->ip_mask.v6_addrs.src_ip, fsp->m_u.ah_ip6_spec.ip6src, sizeof(struct in6_addr)); memcpy(&fltr->ip_mask.v6_addrs.dst_ip, fsp->m_u.ah_ip6_spec.ip6dst, sizeof(struct in6_addr)); fltr->ip_mask.spi = fsp->m_u.ah_ip6_spec.spi; fltr->ip_mask.tclass = fsp->m_u.ah_ip6_spec.tclass; fltr->ip_ver = 6; break; case IPV6_USER_FLOW: memcpy(&fltr->ip_data.v6_addrs.src_ip, fsp->h_u.usr_ip6_spec.ip6src, sizeof(struct in6_addr)); memcpy(&fltr->ip_data.v6_addrs.dst_ip, fsp->h_u.usr_ip6_spec.ip6dst, sizeof(struct in6_addr)); fltr->ip_data.l4_header = fsp->h_u.usr_ip6_spec.l4_4_bytes; fltr->ip_data.tclass = fsp->h_u.usr_ip6_spec.tclass; fltr->ip_data.proto = fsp->h_u.usr_ip6_spec.l4_proto; memcpy(&fltr->ip_mask.v6_addrs.src_ip, fsp->m_u.usr_ip6_spec.ip6src, sizeof(struct in6_addr)); memcpy(&fltr->ip_mask.v6_addrs.dst_ip, fsp->m_u.usr_ip6_spec.ip6dst, sizeof(struct in6_addr)); fltr->ip_mask.l4_header = fsp->m_u.usr_ip6_spec.l4_4_bytes; fltr->ip_mask.tclass = fsp->m_u.usr_ip6_spec.tclass; fltr->ip_mask.proto = fsp->m_u.usr_ip6_spec.l4_proto; fltr->ip_ver = 6; break; case ETHER_FLOW: fltr->eth_data.etype = fsp->h_u.ether_spec.h_proto; fltr->eth_mask.etype = fsp->m_u.ether_spec.h_proto; break; default: /* not doing un-parsed flow types */ return -EINVAL; } err = iavf_validate_fdir_fltr_masks(adapter, fltr); if (err) return err; if (iavf_fdir_is_dup_fltr(adapter, fltr)) return -EEXIST; err = iavf_parse_rx_flow_user_data(fsp, fltr); if (err) return err; return iavf_fill_fdir_add_msg(adapter, fltr); } /** * iavf_add_fdir_ethtool - add Flow Director filter * @adapter: pointer to the VF adapter structure * @cmd: command to add Flow Director filter * * Returns 0 on success and negative values for failure */ static int iavf_add_fdir_ethtool(struct iavf_adapter *adapter, struct ethtool_rxnfc *cmd) { struct ethtool_rx_flow_spec *fsp = &cmd->fs; struct iavf_fdir_fltr *fltr; int count = 50; int err; if (!FDIR_FLTR_SUPPORT(adapter)) return -EOPNOTSUPP; if (fsp->flow_type & FLOW_MAC_EXT) return -EINVAL; spin_lock_bh(&adapter->fdir_fltr_lock); if (adapter->fdir_active_fltr >= IAVF_MAX_FDIR_FILTERS) { spin_unlock_bh(&adapter->fdir_fltr_lock); dev_err(&adapter->pdev->dev, "Unable to add Flow Director filter because VF reached the limit of max allowed filters (%u)\n", IAVF_MAX_FDIR_FILTERS); return -ENOSPC; } if (iavf_find_fdir_fltr_by_loc(adapter, fsp->location)) { dev_err(&adapter->pdev->dev, "Failed to add Flow Director filter, it already exists\n"); spin_unlock_bh(&adapter->fdir_fltr_lock); return -EEXIST; } spin_unlock_bh(&adapter->fdir_fltr_lock); fltr = kzalloc(sizeof(*fltr), GFP_KERNEL); if (!fltr) return -ENOMEM; while (!mutex_trylock(&adapter->crit_lock)) { if (--count == 0) { kfree(fltr); return -EINVAL; } udelay(1); } err = iavf_add_fdir_fltr_info(adapter, fsp, fltr); if (err) goto ret; spin_lock_bh(&adapter->fdir_fltr_lock); iavf_fdir_list_add_fltr(adapter, fltr); adapter->fdir_active_fltr++; fltr->state = IAVF_FDIR_FLTR_ADD_REQUEST; adapter->aq_required |= IAVF_FLAG_AQ_ADD_FDIR_FILTER; spin_unlock_bh(&adapter->fdir_fltr_lock); mod_delayed_work(adapter->wq, &adapter->watchdog_task, 0); ret: if (err && fltr) kfree(fltr); mutex_unlock(&adapter->crit_lock); return err; } /** * iavf_del_fdir_ethtool - delete Flow Director filter * @adapter: pointer to the VF adapter structure * @cmd: command to delete Flow Director filter * * Returns 0 on success and negative values for failure */ static int iavf_del_fdir_ethtool(struct iavf_adapter *adapter, struct ethtool_rxnfc *cmd) { struct ethtool_rx_flow_spec *fsp = (struct ethtool_rx_flow_spec *)&cmd->fs; struct iavf_fdir_fltr *fltr = NULL; int err = 0; if (!FDIR_FLTR_SUPPORT(adapter)) return -EOPNOTSUPP; spin_lock_bh(&adapter->fdir_fltr_lock); fltr = iavf_find_fdir_fltr_by_loc(adapter, fsp->location); if (fltr) { if (fltr->state == IAVF_FDIR_FLTR_ACTIVE) { fltr->state = IAVF_FDIR_FLTR_DEL_REQUEST; adapter->aq_required |= IAVF_FLAG_AQ_DEL_FDIR_FILTER; } else { err = -EBUSY; } } else if (adapter->fdir_active_fltr) { err = -EINVAL; } spin_unlock_bh(&adapter->fdir_fltr_lock); if (fltr && fltr->state == IAVF_FDIR_FLTR_DEL_REQUEST) mod_delayed_work(adapter->wq, &adapter->watchdog_task, 0); return err; } /** * iavf_adv_rss_parse_hdrs - parses headers from RSS hash input * @cmd: ethtool rxnfc command * * This function parses the rxnfc command and returns intended * header types for RSS configuration */ static u32 iavf_adv_rss_parse_hdrs(struct ethtool_rxnfc *cmd) { u32 hdrs = IAVF_ADV_RSS_FLOW_SEG_HDR_NONE; switch (cmd->flow_type) { case TCP_V4_FLOW: hdrs |= IAVF_ADV_RSS_FLOW_SEG_HDR_TCP | IAVF_ADV_RSS_FLOW_SEG_HDR_IPV4; break; case UDP_V4_FLOW: hdrs |= IAVF_ADV_RSS_FLOW_SEG_HDR_UDP | IAVF_ADV_RSS_FLOW_SEG_HDR_IPV4; break; case SCTP_V4_FLOW: hdrs |= IAVF_ADV_RSS_FLOW_SEG_HDR_SCTP | IAVF_ADV_RSS_FLOW_SEG_HDR_IPV4; break; case TCP_V6_FLOW: hdrs |= IAVF_ADV_RSS_FLOW_SEG_HDR_TCP | IAVF_ADV_RSS_FLOW_SEG_HDR_IPV6; break; case UDP_V6_FLOW: hdrs |= IAVF_ADV_RSS_FLOW_SEG_HDR_UDP | IAVF_ADV_RSS_FLOW_SEG_HDR_IPV6; break; case SCTP_V6_FLOW: hdrs |= IAVF_ADV_RSS_FLOW_SEG_HDR_SCTP | IAVF_ADV_RSS_FLOW_SEG_HDR_IPV6; break; default: break; } return hdrs; } /** * iavf_adv_rss_parse_hash_flds - parses hash fields from RSS hash input * @cmd: ethtool rxnfc command * * This function parses the rxnfc command and returns intended hash fields for * RSS configuration */ static u64 iavf_adv_rss_parse_hash_flds(struct ethtool_rxnfc *cmd) { u64 hfld = IAVF_ADV_RSS_HASH_INVALID; if (cmd->data & RXH_IP_SRC || cmd->data & RXH_IP_DST) { switch (cmd->flow_type) { case TCP_V4_FLOW: case UDP_V4_FLOW: case SCTP_V4_FLOW: if (cmd->data & RXH_IP_SRC) hfld |= IAVF_ADV_RSS_HASH_FLD_IPV4_SA; if (cmd->data & RXH_IP_DST) hfld |= IAVF_ADV_RSS_HASH_FLD_IPV4_DA; break; case TCP_V6_FLOW: case UDP_V6_FLOW: case SCTP_V6_FLOW: if (cmd->data & RXH_IP_SRC) hfld |= IAVF_ADV_RSS_HASH_FLD_IPV6_SA; if (cmd->data & RXH_IP_DST) hfld |= IAVF_ADV_RSS_HASH_FLD_IPV6_DA; break; default: break; } } if (cmd->data & RXH_L4_B_0_1 || cmd->data & RXH_L4_B_2_3) { switch (cmd->flow_type) { case TCP_V4_FLOW: case TCP_V6_FLOW: if (cmd->data & RXH_L4_B_0_1) hfld |= IAVF_ADV_RSS_HASH_FLD_TCP_SRC_PORT; if (cmd->data & RXH_L4_B_2_3) hfld |= IAVF_ADV_RSS_HASH_FLD_TCP_DST_PORT; break; case UDP_V4_FLOW: case UDP_V6_FLOW: if (cmd->data & RXH_L4_B_0_1) hfld |= IAVF_ADV_RSS_HASH_FLD_UDP_SRC_PORT; if (cmd->data & RXH_L4_B_2_3) hfld |= IAVF_ADV_RSS_HASH_FLD_UDP_DST_PORT; break; case SCTP_V4_FLOW: case SCTP_V6_FLOW: if (cmd->data & RXH_L4_B_0_1) hfld |= IAVF_ADV_RSS_HASH_FLD_SCTP_SRC_PORT; if (cmd->data & RXH_L4_B_2_3) hfld |= IAVF_ADV_RSS_HASH_FLD_SCTP_DST_PORT; break; default: break; } } return hfld; } /** * iavf_set_adv_rss_hash_opt - Enable/Disable flow types for RSS hash * @adapter: pointer to the VF adapter structure * @cmd: ethtool rxnfc command * * Returns Success if the flow input set is supported. */ static int iavf_set_adv_rss_hash_opt(struct iavf_adapter *adapter, struct ethtool_rxnfc *cmd) { struct iavf_adv_rss *rss_old, *rss_new; bool rss_new_add = false; int count = 50, err = 0; u64 hash_flds; u32 hdrs; if (!ADV_RSS_SUPPORT(adapter)) return -EOPNOTSUPP; hdrs = iavf_adv_rss_parse_hdrs(cmd); if (hdrs == IAVF_ADV_RSS_FLOW_SEG_HDR_NONE) return -EINVAL; hash_flds = iavf_adv_rss_parse_hash_flds(cmd); if (hash_flds == IAVF_ADV_RSS_HASH_INVALID) return -EINVAL; rss_new = kzalloc(sizeof(*rss_new), GFP_KERNEL); if (!rss_new) return -ENOMEM; if (iavf_fill_adv_rss_cfg_msg(&rss_new->cfg_msg, hdrs, hash_flds)) { kfree(rss_new); return -EINVAL; } while (!mutex_trylock(&adapter->crit_lock)) { if (--count == 0) { kfree(rss_new); return -EINVAL; } udelay(1); } spin_lock_bh(&adapter->adv_rss_lock); rss_old = iavf_find_adv_rss_cfg_by_hdrs(adapter, hdrs); if (rss_old) { if (rss_old->state != IAVF_ADV_RSS_ACTIVE) { err = -EBUSY; } else if (rss_old->hash_flds != hash_flds) { rss_old->state = IAVF_ADV_RSS_ADD_REQUEST; rss_old->hash_flds = hash_flds; memcpy(&rss_old->cfg_msg, &rss_new->cfg_msg, sizeof(rss_new->cfg_msg)); adapter->aq_required |= IAVF_FLAG_AQ_ADD_ADV_RSS_CFG; } else { err = -EEXIST; } } else { rss_new_add = true; rss_new->state = IAVF_ADV_RSS_ADD_REQUEST; rss_new->packet_hdrs = hdrs; rss_new->hash_flds = hash_flds; list_add_tail(&rss_new->list, &adapter->adv_rss_list_head); adapter->aq_required |= IAVF_FLAG_AQ_ADD_ADV_RSS_CFG; } spin_unlock_bh(&adapter->adv_rss_lock); if (!err) mod_delayed_work(adapter->wq, &adapter->watchdog_task, 0); mutex_unlock(&adapter->crit_lock); if (!rss_new_add) kfree(rss_new); return err; } /** * iavf_get_adv_rss_hash_opt - Retrieve hash fields for a given flow-type * @adapter: pointer to the VF adapter structure * @cmd: ethtool rxnfc command * * Returns Success if the flow input set is supported. */ static int iavf_get_adv_rss_hash_opt(struct iavf_adapter *adapter, struct ethtool_rxnfc *cmd) { struct iavf_adv_rss *rss; u64 hash_flds; u32 hdrs; if (!ADV_RSS_SUPPORT(adapter)) return -EOPNOTSUPP; cmd->data = 0; hdrs = iavf_adv_rss_parse_hdrs(cmd); if (hdrs == IAVF_ADV_RSS_FLOW_SEG_HDR_NONE) return -EINVAL; spin_lock_bh(&adapter->adv_rss_lock); rss = iavf_find_adv_rss_cfg_by_hdrs(adapter, hdrs); if (rss) hash_flds = rss->hash_flds; else hash_flds = IAVF_ADV_RSS_HASH_INVALID; spin_unlock_bh(&adapter->adv_rss_lock); if (hash_flds == IAVF_ADV_RSS_HASH_INVALID) return -EINVAL; if (hash_flds & (IAVF_ADV_RSS_HASH_FLD_IPV4_SA | IAVF_ADV_RSS_HASH_FLD_IPV6_SA)) cmd->data |= (u64)RXH_IP_SRC; if (hash_flds & (IAVF_ADV_RSS_HASH_FLD_IPV4_DA | IAVF_ADV_RSS_HASH_FLD_IPV6_DA)) cmd->data |= (u64)RXH_IP_DST; if (hash_flds & (IAVF_ADV_RSS_HASH_FLD_TCP_SRC_PORT | IAVF_ADV_RSS_HASH_FLD_UDP_SRC_PORT | IAVF_ADV_RSS_HASH_FLD_SCTP_SRC_PORT)) cmd->data |= (u64)RXH_L4_B_0_1; if (hash_flds & (IAVF_ADV_RSS_HASH_FLD_TCP_DST_PORT | IAVF_ADV_RSS_HASH_FLD_UDP_DST_PORT | IAVF_ADV_RSS_HASH_FLD_SCTP_DST_PORT)) cmd->data |= (u64)RXH_L4_B_2_3; return 0; } /** * iavf_set_rxnfc - command to set Rx flow rules. * @netdev: network interface device structure * @cmd: ethtool rxnfc command * * Returns 0 for success and negative values for errors */ static int iavf_set_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd) { struct iavf_adapter *adapter = netdev_priv(netdev); int ret = -EOPNOTSUPP; switch (cmd->cmd) { case ETHTOOL_SRXCLSRLINS: ret = iavf_add_fdir_ethtool(adapter, cmd); break; case ETHTOOL_SRXCLSRLDEL: ret = iavf_del_fdir_ethtool(adapter, cmd); break; case ETHTOOL_SRXFH: ret = iavf_set_adv_rss_hash_opt(adapter, cmd); break; default: break; } return ret; } /** * iavf_get_rxnfc - command to get RX flow classification rules * @netdev: network interface device structure * @cmd: ethtool rxnfc command * @rule_locs: pointer to store rule locations * * Returns Success if the command is supported. **/ static int iavf_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd, u32 *rule_locs) { struct iavf_adapter *adapter = netdev_priv(netdev); int ret = -EOPNOTSUPP; switch (cmd->cmd) { case ETHTOOL_GRXRINGS: cmd->data = adapter->num_active_queues; ret = 0; break; case ETHTOOL_GRXCLSRLCNT: if (!FDIR_FLTR_SUPPORT(adapter)) break; spin_lock_bh(&adapter->fdir_fltr_lock); cmd->rule_cnt = adapter->fdir_active_fltr; spin_unlock_bh(&adapter->fdir_fltr_lock); cmd->data = IAVF_MAX_FDIR_FILTERS; ret = 0; break; case ETHTOOL_GRXCLSRULE: ret = iavf_get_ethtool_fdir_entry(adapter, cmd); break; case ETHTOOL_GRXCLSRLALL: ret = iavf_get_fdir_fltr_ids(adapter, cmd, (u32 *)rule_locs); break; case ETHTOOL_GRXFH: ret = iavf_get_adv_rss_hash_opt(adapter, cmd); break; default: break; } return ret; } /** * iavf_get_channels: get the number of channels supported by the device * @netdev: network interface device structure * @ch: channel information structure * * For the purposes of our device, we only use combined channels, i.e. a tx/rx * queue pair. Report one extra channel to match our "other" MSI-X vector. **/ static void iavf_get_channels(struct net_device *netdev, struct ethtool_channels *ch) { struct iavf_adapter *adapter = netdev_priv(netdev); /* Report maximum channels */ ch->max_combined = adapter->vsi_res->num_queue_pairs; ch->max_other = NONQ_VECS; ch->other_count = NONQ_VECS; ch->combined_count = adapter->num_active_queues; } /** * iavf_set_channels: set the new channel count * @netdev: network interface device structure * @ch: channel information structure * * Negotiate a new number of channels with the PF then do a reset. During * reset we'll realloc queues and fix the RSS table. Returns 0 on success, * negative on failure. **/ static int iavf_set_channels(struct net_device *netdev, struct ethtool_channels *ch) { struct iavf_adapter *adapter = netdev_priv(netdev); u32 num_req = ch->combined_count; int ret = 0; if ((adapter->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ADQ) && adapter->num_tc) { dev_info(&adapter->pdev->dev, "Cannot set channels since ADq is enabled.\n"); return -EINVAL; } /* All of these should have already been checked by ethtool before this * even gets to us, but just to be sure. */ if (num_req == 0 || num_req > adapter->vsi_res->num_queue_pairs) return -EINVAL; if (num_req == adapter->num_active_queues) return 0; if (ch->rx_count || ch->tx_count || ch->other_count != NONQ_VECS) return -EINVAL; adapter->num_req_queues = num_req; adapter->flags |= IAVF_FLAG_REINIT_ITR_NEEDED; iavf_schedule_reset(adapter, IAVF_FLAG_RESET_NEEDED); ret = iavf_wait_for_reset(adapter); if (ret) netdev_warn(netdev, "Changing channel count timeout or interrupted waiting for reset"); return ret; } /** * iavf_get_rxfh_key_size - get the RSS hash key size * @netdev: network interface device structure * * Returns the table size. **/ static u32 iavf_get_rxfh_key_size(struct net_device *netdev) { struct iavf_adapter *adapter = netdev_priv(netdev); return adapter->rss_key_size; } /** * iavf_get_rxfh_indir_size - get the rx flow hash indirection table size * @netdev: network interface device structure * * Returns the table size. **/ static u32 iavf_get_rxfh_indir_size(struct net_device *netdev) { struct iavf_adapter *adapter = netdev_priv(netdev); return adapter->rss_lut_size; } /** * iavf_get_rxfh - get the rx flow hash indirection table * @netdev: network interface device structure * @indir: indirection table * @key: hash key * @hfunc: hash function in use * * Reads the indirection table directly from the hardware. Always returns 0. **/ static int iavf_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key, u8 *hfunc) { struct iavf_adapter *adapter = netdev_priv(netdev); u16 i; if (hfunc) *hfunc = ETH_RSS_HASH_TOP; if (key) memcpy(key, adapter->rss_key, adapter->rss_key_size); if (indir) /* Each 32 bits pointed by 'indir' is stored with a lut entry */ for (i = 0; i < adapter->rss_lut_size; i++) indir[i] = (u32)adapter->rss_lut[i]; return 0; } /** * iavf_set_rxfh - set the rx flow hash indirection table * @netdev: network interface device structure * @indir: indirection table * @key: hash key * @hfunc: hash function to use * * Returns -EINVAL if the table specifies an invalid queue id, otherwise * returns 0 after programming the table. **/ static int iavf_set_rxfh(struct net_device *netdev, const u32 *indir, const u8 *key, const u8 hfunc) { struct iavf_adapter *adapter = netdev_priv(netdev); u16 i; /* Only support toeplitz hash function */ if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP) return -EOPNOTSUPP; if (!key && !indir) return 0; if (key) memcpy(adapter->rss_key, key, adapter->rss_key_size); if (indir) { /* Each 32 bits pointed by 'indir' is stored with a lut entry */ for (i = 0; i < adapter->rss_lut_size; i++) adapter->rss_lut[i] = (u8)(indir[i]); } return iavf_config_rss(adapter); } static const struct ethtool_ops iavf_ethtool_ops = { .supported_coalesce_params = ETHTOOL_COALESCE_USECS | ETHTOOL_COALESCE_USE_ADAPTIVE, .get_drvinfo = iavf_get_drvinfo, .get_link = ethtool_op_get_link, .get_ringparam = iavf_get_ringparam, .set_ringparam = iavf_set_ringparam, .get_strings = iavf_get_strings, .get_ethtool_stats = iavf_get_ethtool_stats, .get_sset_count = iavf_get_sset_count, .get_priv_flags = iavf_get_priv_flags, .set_priv_flags = iavf_set_priv_flags, .get_msglevel = iavf_get_msglevel, .set_msglevel = iavf_set_msglevel, .get_coalesce = iavf_get_coalesce, .set_coalesce = iavf_set_coalesce, .get_per_queue_coalesce = iavf_get_per_queue_coalesce, .set_per_queue_coalesce = iavf_set_per_queue_coalesce, .set_rxnfc = iavf_set_rxnfc, .get_rxnfc = iavf_get_rxnfc, .get_rxfh_indir_size = iavf_get_rxfh_indir_size, .get_rxfh = iavf_get_rxfh, .set_rxfh = iavf_set_rxfh, .get_channels = iavf_get_channels, .set_channels = iavf_set_channels, .get_rxfh_key_size = iavf_get_rxfh_key_size, .get_link_ksettings = iavf_get_link_ksettings, }; /** * iavf_set_ethtool_ops - Initialize ethtool ops struct * @netdev: network interface device structure * * Sets ethtool ops struct in our netdev so that ethtool can call * our functions. **/ void iavf_set_ethtool_ops(struct net_device *netdev) { netdev->ethtool_ops = &iavf_ethtool_ops; }
linux-master
drivers/net/ethernet/intel/iavf/iavf_ethtool.c
// SPDX-License-Identifier: GPL-2.0 /* Copyright(c) 2013 - 2018 Intel Corporation. */ #include <linux/list.h> #include <linux/errno.h> #include "iavf.h" #include "iavf_prototype.h" #include "iavf_client.h" static const char iavf_client_interface_version_str[] = IAVF_CLIENT_VERSION_STR; static struct iavf_client *vf_registered_client; static LIST_HEAD(iavf_devices); static DEFINE_MUTEX(iavf_device_mutex); static u32 iavf_client_virtchnl_send(struct iavf_info *ldev, struct iavf_client *client, u8 *msg, u16 len); static int iavf_client_setup_qvlist(struct iavf_info *ldev, struct iavf_client *client, struct iavf_qvlist_info *qvlist_info); static struct iavf_ops iavf_lan_ops = { .virtchnl_send = iavf_client_virtchnl_send, .setup_qvlist = iavf_client_setup_qvlist, }; /** * iavf_client_get_params - retrieve relevant client parameters * @vsi: VSI with parameters * @params: client param struct **/ static void iavf_client_get_params(struct iavf_vsi *vsi, struct iavf_params *params) { int i; memset(params, 0, sizeof(struct iavf_params)); params->mtu = vsi->netdev->mtu; params->link_up = vsi->back->link_up; for (i = 0; i < IAVF_MAX_USER_PRIORITY; i++) { params->qos.prio_qos[i].tc = 0; params->qos.prio_qos[i].qs_handle = vsi->qs_handle; } } /** * iavf_notify_client_message - call the client message receive callback * @vsi: the VSI associated with this client * @msg: message buffer * @len: length of message * * If there is a client to this VSI, call the client **/ void iavf_notify_client_message(struct iavf_vsi *vsi, u8 *msg, u16 len) { struct iavf_client_instance *cinst; if (!vsi) return; cinst = vsi->back->cinst; if (!cinst || !cinst->client || !cinst->client->ops || !cinst->client->ops->virtchnl_receive) { dev_dbg(&vsi->back->pdev->dev, "Cannot locate client instance virtchnl_receive function\n"); return; } cinst->client->ops->virtchnl_receive(&cinst->lan_info, cinst->client, msg, len); } /** * iavf_notify_client_l2_params - call the client notify callback * @vsi: the VSI with l2 param changes * * If there is a client to this VSI, call the client **/ void iavf_notify_client_l2_params(struct iavf_vsi *vsi) { struct iavf_client_instance *cinst; struct iavf_params params; if (!vsi) return; cinst = vsi->back->cinst; if (!cinst || !cinst->client || !cinst->client->ops || !cinst->client->ops->l2_param_change) { dev_dbg(&vsi->back->pdev->dev, "Cannot locate client instance l2_param_change function\n"); return; } iavf_client_get_params(vsi, &params); cinst->lan_info.params = params; cinst->client->ops->l2_param_change(&cinst->lan_info, cinst->client, &params); } /** * iavf_notify_client_open - call the client open callback * @vsi: the VSI with netdev opened * * If there is a client to this netdev, call the client with open **/ void iavf_notify_client_open(struct iavf_vsi *vsi) { struct iavf_adapter *adapter = vsi->back; struct iavf_client_instance *cinst = adapter->cinst; int ret; if (!cinst || !cinst->client || !cinst->client->ops || !cinst->client->ops->open) { dev_dbg(&vsi->back->pdev->dev, "Cannot locate client instance open function\n"); return; } if (!(test_bit(__IAVF_CLIENT_INSTANCE_OPENED, &cinst->state))) { ret = cinst->client->ops->open(&cinst->lan_info, cinst->client); if (!ret) set_bit(__IAVF_CLIENT_INSTANCE_OPENED, &cinst->state); } } /** * iavf_client_release_qvlist - send a message to the PF to release rdma qv map * @ldev: pointer to L2 context. * * Return 0 on success or < 0 on error **/ static int iavf_client_release_qvlist(struct iavf_info *ldev) { struct iavf_adapter *adapter = ldev->vf; enum iavf_status err; if (adapter->aq_required) return -EAGAIN; err = iavf_aq_send_msg_to_pf(&adapter->hw, VIRTCHNL_OP_RELEASE_RDMA_IRQ_MAP, IAVF_SUCCESS, NULL, 0, NULL); if (err) dev_err(&adapter->pdev->dev, "Unable to send RDMA vector release message to PF, error %d, aq status %d\n", err, adapter->hw.aq.asq_last_status); return err; } /** * iavf_notify_client_close - call the client close callback * @vsi: the VSI with netdev closed * @reset: true when close called due to reset pending * * If there is a client to this netdev, call the client with close **/ void iavf_notify_client_close(struct iavf_vsi *vsi, bool reset) { struct iavf_adapter *adapter = vsi->back; struct iavf_client_instance *cinst = adapter->cinst; if (!cinst || !cinst->client || !cinst->client->ops || !cinst->client->ops->close) { dev_dbg(&vsi->back->pdev->dev, "Cannot locate client instance close function\n"); return; } cinst->client->ops->close(&cinst->lan_info, cinst->client, reset); iavf_client_release_qvlist(&cinst->lan_info); clear_bit(__IAVF_CLIENT_INSTANCE_OPENED, &cinst->state); } /** * iavf_client_add_instance - add a client instance to the instance list * @adapter: pointer to the board struct * * Returns cinst ptr on success, NULL on failure **/ static struct iavf_client_instance * iavf_client_add_instance(struct iavf_adapter *adapter) { struct iavf_client_instance *cinst = NULL; struct iavf_vsi *vsi = &adapter->vsi; struct netdev_hw_addr *mac = NULL; struct iavf_params params; if (!vf_registered_client) goto out; if (adapter->cinst) { cinst = adapter->cinst; goto out; } cinst = kzalloc(sizeof(*cinst), GFP_KERNEL); if (!cinst) goto out; cinst->lan_info.vf = (void *)adapter; cinst->lan_info.netdev = vsi->netdev; cinst->lan_info.pcidev = adapter->pdev; cinst->lan_info.fid = 0; cinst->lan_info.ftype = IAVF_CLIENT_FTYPE_VF; cinst->lan_info.hw_addr = adapter->hw.hw_addr; cinst->lan_info.ops = &iavf_lan_ops; cinst->lan_info.version.major = IAVF_CLIENT_VERSION_MAJOR; cinst->lan_info.version.minor = IAVF_CLIENT_VERSION_MINOR; cinst->lan_info.version.build = IAVF_CLIENT_VERSION_BUILD; iavf_client_get_params(vsi, &params); cinst->lan_info.params = params; set_bit(__IAVF_CLIENT_INSTANCE_NONE, &cinst->state); cinst->lan_info.msix_count = adapter->num_rdma_msix; cinst->lan_info.msix_entries = &adapter->msix_entries[adapter->rdma_base_vector]; mac = list_first_entry(&cinst->lan_info.netdev->dev_addrs.list, struct netdev_hw_addr, list); if (mac) ether_addr_copy(cinst->lan_info.lanmac, mac->addr); else dev_err(&adapter->pdev->dev, "MAC address list is empty!\n"); cinst->client = vf_registered_client; adapter->cinst = cinst; out: return cinst; } /** * iavf_client_del_instance - removes a client instance from the list * @adapter: pointer to the board struct * **/ static void iavf_client_del_instance(struct iavf_adapter *adapter) { kfree(adapter->cinst); adapter->cinst = NULL; } /** * iavf_client_subtask - client maintenance work * @adapter: board private structure **/ void iavf_client_subtask(struct iavf_adapter *adapter) { struct iavf_client *client = vf_registered_client; struct iavf_client_instance *cinst; int ret = 0; if (adapter->state < __IAVF_DOWN) return; /* first check client is registered */ if (!client) return; /* Add the client instance to the instance list */ cinst = iavf_client_add_instance(adapter); if (!cinst) return; dev_info(&adapter->pdev->dev, "Added instance of Client %s\n", client->name); if (!test_bit(__IAVF_CLIENT_INSTANCE_OPENED, &cinst->state)) { /* Send an Open request to the client */ if (client->ops && client->ops->open) ret = client->ops->open(&cinst->lan_info, client); if (!ret) set_bit(__IAVF_CLIENT_INSTANCE_OPENED, &cinst->state); else /* remove client instance */ iavf_client_del_instance(adapter); } } /** * iavf_lan_add_device - add a lan device struct to the list of lan devices * @adapter: pointer to the board struct * * Returns 0 on success or none 0 on error **/ int iavf_lan_add_device(struct iavf_adapter *adapter) { struct iavf_device *ldev; int ret = 0; mutex_lock(&iavf_device_mutex); list_for_each_entry(ldev, &iavf_devices, list) { if (ldev->vf == adapter) { ret = -EEXIST; goto out; } } ldev = kzalloc(sizeof(*ldev), GFP_KERNEL); if (!ldev) { ret = -ENOMEM; goto out; } ldev->vf = adapter; INIT_LIST_HEAD(&ldev->list); list_add(&ldev->list, &iavf_devices); dev_info(&adapter->pdev->dev, "Added LAN device bus=0x%02x dev=0x%02x func=0x%02x\n", adapter->hw.bus.bus_id, adapter->hw.bus.device, adapter->hw.bus.func); /* Since in some cases register may have happened before a device gets * added, we can schedule a subtask to go initiate the clients. */ adapter->flags |= IAVF_FLAG_SERVICE_CLIENT_REQUESTED; out: mutex_unlock(&iavf_device_mutex); return ret; } /** * iavf_lan_del_device - removes a lan device from the device list * @adapter: pointer to the board struct * * Returns 0 on success or non-0 on error **/ int iavf_lan_del_device(struct iavf_adapter *adapter) { struct iavf_device *ldev, *tmp; int ret = -ENODEV; mutex_lock(&iavf_device_mutex); list_for_each_entry_safe(ldev, tmp, &iavf_devices, list) { if (ldev->vf == adapter) { dev_info(&adapter->pdev->dev, "Deleted LAN device bus=0x%02x dev=0x%02x func=0x%02x\n", adapter->hw.bus.bus_id, adapter->hw.bus.device, adapter->hw.bus.func); list_del(&ldev->list); kfree(ldev); ret = 0; break; } } mutex_unlock(&iavf_device_mutex); return ret; } /** * iavf_client_release - release client specific resources * @client: pointer to the registered client * **/ static void iavf_client_release(struct iavf_client *client) { struct iavf_client_instance *cinst; struct iavf_device *ldev; struct iavf_adapter *adapter; mutex_lock(&iavf_device_mutex); list_for_each_entry(ldev, &iavf_devices, list) { adapter = ldev->vf; cinst = adapter->cinst; if (!cinst) continue; if (test_bit(__IAVF_CLIENT_INSTANCE_OPENED, &cinst->state)) { if (client->ops && client->ops->close) client->ops->close(&cinst->lan_info, client, false); iavf_client_release_qvlist(&cinst->lan_info); clear_bit(__IAVF_CLIENT_INSTANCE_OPENED, &cinst->state); dev_warn(&adapter->pdev->dev, "Client %s instance closed\n", client->name); } /* delete the client instance */ iavf_client_del_instance(adapter); dev_info(&adapter->pdev->dev, "Deleted client instance of Client %s\n", client->name); } mutex_unlock(&iavf_device_mutex); } /** * iavf_client_prepare - prepare client specific resources * @client: pointer to the registered client * **/ static void iavf_client_prepare(struct iavf_client *client) { struct iavf_device *ldev; struct iavf_adapter *adapter; mutex_lock(&iavf_device_mutex); list_for_each_entry(ldev, &iavf_devices, list) { adapter = ldev->vf; /* Signal the watchdog to service the client */ adapter->flags |= IAVF_FLAG_SERVICE_CLIENT_REQUESTED; } mutex_unlock(&iavf_device_mutex); } /** * iavf_client_virtchnl_send - send a message to the PF instance * @ldev: pointer to L2 context. * @client: Client pointer. * @msg: pointer to message buffer * @len: message length * * Return 0 on success or < 0 on error **/ static u32 iavf_client_virtchnl_send(struct iavf_info *ldev, struct iavf_client *client, u8 *msg, u16 len) { struct iavf_adapter *adapter = ldev->vf; enum iavf_status err; if (adapter->aq_required) return -EAGAIN; err = iavf_aq_send_msg_to_pf(&adapter->hw, VIRTCHNL_OP_RDMA, IAVF_SUCCESS, msg, len, NULL); if (err) dev_err(&adapter->pdev->dev, "Unable to send RDMA message to PF, error %d, aq status %d\n", err, adapter->hw.aq.asq_last_status); return err; } /** * iavf_client_setup_qvlist - send a message to the PF to setup rdma qv map * @ldev: pointer to L2 context. * @client: Client pointer. * @qvlist_info: queue and vector list * * Return 0 on success or < 0 on error **/ static int iavf_client_setup_qvlist(struct iavf_info *ldev, struct iavf_client *client, struct iavf_qvlist_info *qvlist_info) { struct virtchnl_rdma_qvlist_info *v_qvlist_info; struct iavf_adapter *adapter = ldev->vf; struct iavf_qv_info *qv_info; enum iavf_status err; u32 v_idx, i; size_t msg_size; if (adapter->aq_required) return -EAGAIN; /* A quick check on whether the vectors belong to the client */ for (i = 0; i < qvlist_info->num_vectors; i++) { qv_info = &qvlist_info->qv_info[i]; if (!qv_info) continue; v_idx = qv_info->v_idx; if ((v_idx >= (adapter->rdma_base_vector + adapter->num_rdma_msix)) || (v_idx < adapter->rdma_base_vector)) return -EINVAL; } v_qvlist_info = (struct virtchnl_rdma_qvlist_info *)qvlist_info; msg_size = virtchnl_struct_size(v_qvlist_info, qv_info, v_qvlist_info->num_vectors); adapter->client_pending |= BIT(VIRTCHNL_OP_CONFIG_RDMA_IRQ_MAP); err = iavf_aq_send_msg_to_pf(&adapter->hw, VIRTCHNL_OP_CONFIG_RDMA_IRQ_MAP, IAVF_SUCCESS, (u8 *)v_qvlist_info, msg_size, NULL); if (err) { dev_err(&adapter->pdev->dev, "Unable to send RDMA vector config message to PF, error %d, aq status %d\n", err, adapter->hw.aq.asq_last_status); goto out; } err = -EBUSY; for (i = 0; i < 5; i++) { msleep(100); if (!(adapter->client_pending & BIT(VIRTCHNL_OP_CONFIG_RDMA_IRQ_MAP))) { err = 0; break; } } out: return err; } /** * iavf_register_client - Register a iavf client driver with the L2 driver * @client: pointer to the iavf_client struct * * Returns 0 on success or non-0 on error **/ int iavf_register_client(struct iavf_client *client) { int ret = 0; if (!client) { ret = -EIO; goto out; } if (strlen(client->name) == 0) { pr_info("iavf: Failed to register client with no name\n"); ret = -EIO; goto out; } if (vf_registered_client) { pr_info("iavf: Client %s has already been registered!\n", client->name); ret = -EEXIST; goto out; } if ((client->version.major != IAVF_CLIENT_VERSION_MAJOR) || (client->version.minor != IAVF_CLIENT_VERSION_MINOR)) { pr_info("iavf: Failed to register client %s due to mismatched client interface version\n", client->name); pr_info("Client is using version: %02d.%02d.%02d while LAN driver supports %s\n", client->version.major, client->version.minor, client->version.build, iavf_client_interface_version_str); ret = -EIO; goto out; } vf_registered_client = client; iavf_client_prepare(client); pr_info("iavf: Registered client %s with return code %d\n", client->name, ret); out: return ret; } EXPORT_SYMBOL(iavf_register_client); /** * iavf_unregister_client - Unregister a iavf client driver with the L2 driver * @client: pointer to the iavf_client struct * * Returns 0 on success or non-0 on error **/ int iavf_unregister_client(struct iavf_client *client) { int ret = 0; /* When a unregister request comes through we would have to send * a close for each of the client instances that were opened. * client_release function is called to handle this. */ iavf_client_release(client); if (vf_registered_client != client) { pr_info("iavf: Client %s has not been registered\n", client->name); ret = -ENODEV; goto out; } vf_registered_client = NULL; pr_info("iavf: Unregistered client %s\n", client->name); out: return ret; } EXPORT_SYMBOL(iavf_unregister_client);
linux-master
drivers/net/ethernet/intel/iavf/iavf_client.c
// SPDX-License-Identifier: GPL-2.0 /* Copyright(c) 1999 - 2018 Intel Corporation. */ #include <linux/pci.h> #include <linux/delay.h> #include <linux/sched.h> #include "ixgbe.h" #include "ixgbe_phy.h" #define IXGBE_82598_MAX_TX_QUEUES 32 #define IXGBE_82598_MAX_RX_QUEUES 64 #define IXGBE_82598_RAR_ENTRIES 16 #define IXGBE_82598_MC_TBL_SIZE 128 #define IXGBE_82598_VFT_TBL_SIZE 128 #define IXGBE_82598_RX_PB_SIZE 512 static s32 ixgbe_setup_copper_link_82598(struct ixgbe_hw *hw, ixgbe_link_speed speed, bool autoneg_wait_to_complete); static s32 ixgbe_read_i2c_eeprom_82598(struct ixgbe_hw *hw, u8 byte_offset, u8 *eeprom_data); /** * ixgbe_set_pcie_completion_timeout - set pci-e completion timeout * @hw: pointer to the HW structure * * The defaults for 82598 should be in the range of 50us to 50ms, * however the hardware default for these parts is 500us to 1ms which is less * than the 10ms recommended by the pci-e spec. To address this we need to * increase the value to either 10ms to 250ms for capability version 1 config, * or 16ms to 55ms for version 2. **/ static void ixgbe_set_pcie_completion_timeout(struct ixgbe_hw *hw) { u32 gcr = IXGBE_READ_REG(hw, IXGBE_GCR); u16 pcie_devctl2; if (ixgbe_removed(hw->hw_addr)) return; /* only take action if timeout value is defaulted to 0 */ if (gcr & IXGBE_GCR_CMPL_TMOUT_MASK) goto out; /* * if capababilities version is type 1 we can write the * timeout of 10ms to 250ms through the GCR register */ if (!(gcr & IXGBE_GCR_CAP_VER2)) { gcr |= IXGBE_GCR_CMPL_TMOUT_10ms; goto out; } /* * for version 2 capabilities we need to write the config space * directly in order to set the completion timeout value for * 16ms to 55ms */ pcie_devctl2 = ixgbe_read_pci_cfg_word(hw, IXGBE_PCI_DEVICE_CONTROL2); pcie_devctl2 |= IXGBE_PCI_DEVICE_CONTROL2_16ms; ixgbe_write_pci_cfg_word(hw, IXGBE_PCI_DEVICE_CONTROL2, pcie_devctl2); out: /* disable completion timeout resend */ gcr &= ~IXGBE_GCR_CMPL_TMOUT_RESEND; IXGBE_WRITE_REG(hw, IXGBE_GCR, gcr); } static s32 ixgbe_get_invariants_82598(struct ixgbe_hw *hw) { struct ixgbe_mac_info *mac = &hw->mac; /* Call PHY identify routine to get the phy type */ ixgbe_identify_phy_generic(hw); mac->mcft_size = IXGBE_82598_MC_TBL_SIZE; mac->vft_size = IXGBE_82598_VFT_TBL_SIZE; mac->num_rar_entries = IXGBE_82598_RAR_ENTRIES; mac->rx_pb_size = IXGBE_82598_RX_PB_SIZE; mac->max_rx_queues = IXGBE_82598_MAX_RX_QUEUES; mac->max_tx_queues = IXGBE_82598_MAX_TX_QUEUES; mac->max_msix_vectors = ixgbe_get_pcie_msix_count_generic(hw); return 0; } /** * ixgbe_init_phy_ops_82598 - PHY/SFP specific init * @hw: pointer to hardware structure * * Initialize any function pointers that were not able to be * set during get_invariants because the PHY/SFP type was * not known. Perform the SFP init if necessary. * **/ static s32 ixgbe_init_phy_ops_82598(struct ixgbe_hw *hw) { struct ixgbe_mac_info *mac = &hw->mac; struct ixgbe_phy_info *phy = &hw->phy; s32 ret_val; u16 list_offset, data_offset; /* Identify the PHY */ phy->ops.identify(hw); /* Overwrite the link function pointers if copper PHY */ if (mac->ops.get_media_type(hw) == ixgbe_media_type_copper) { mac->ops.setup_link = &ixgbe_setup_copper_link_82598; mac->ops.get_link_capabilities = &ixgbe_get_copper_link_capabilities_generic; } switch (hw->phy.type) { case ixgbe_phy_tn: phy->ops.setup_link = &ixgbe_setup_phy_link_tnx; phy->ops.check_link = &ixgbe_check_phy_link_tnx; break; case ixgbe_phy_nl: phy->ops.reset = &ixgbe_reset_phy_nl; /* Call SFP+ identify routine to get the SFP+ module type */ ret_val = phy->ops.identify_sfp(hw); if (ret_val) return ret_val; if (hw->phy.sfp_type == ixgbe_sfp_type_unknown) return IXGBE_ERR_SFP_NOT_SUPPORTED; /* Check to see if SFP+ module is supported */ ret_val = ixgbe_get_sfp_init_sequence_offsets(hw, &list_offset, &data_offset); if (ret_val) return IXGBE_ERR_SFP_NOT_SUPPORTED; break; default: break; } return 0; } /** * ixgbe_start_hw_82598 - Prepare hardware for Tx/Rx * @hw: pointer to hardware structure * * Starts the hardware using the generic start_hw function. * Disables relaxed ordering for archs other than SPARC * Then set pcie completion timeout * **/ static s32 ixgbe_start_hw_82598(struct ixgbe_hw *hw) { s32 ret_val; ret_val = ixgbe_start_hw_generic(hw); if (ret_val) return ret_val; /* set the completion timeout for interface */ ixgbe_set_pcie_completion_timeout(hw); return 0; } /** * ixgbe_get_link_capabilities_82598 - Determines link capabilities * @hw: pointer to hardware structure * @speed: pointer to link speed * @autoneg: boolean auto-negotiation value * * Determines the link capabilities by reading the AUTOC register. **/ static s32 ixgbe_get_link_capabilities_82598(struct ixgbe_hw *hw, ixgbe_link_speed *speed, bool *autoneg) { u32 autoc = 0; /* * Determine link capabilities based on the stored value of AUTOC, * which represents EEPROM defaults. If AUTOC value has not been * stored, use the current register value. */ if (hw->mac.orig_link_settings_stored) autoc = hw->mac.orig_autoc; else autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC); switch (autoc & IXGBE_AUTOC_LMS_MASK) { case IXGBE_AUTOC_LMS_1G_LINK_NO_AN: *speed = IXGBE_LINK_SPEED_1GB_FULL; *autoneg = false; break; case IXGBE_AUTOC_LMS_10G_LINK_NO_AN: *speed = IXGBE_LINK_SPEED_10GB_FULL; *autoneg = false; break; case IXGBE_AUTOC_LMS_1G_AN: *speed = IXGBE_LINK_SPEED_1GB_FULL; *autoneg = true; break; case IXGBE_AUTOC_LMS_KX4_AN: case IXGBE_AUTOC_LMS_KX4_AN_1G_AN: *speed = IXGBE_LINK_SPEED_UNKNOWN; if (autoc & IXGBE_AUTOC_KX4_SUPP) *speed |= IXGBE_LINK_SPEED_10GB_FULL; if (autoc & IXGBE_AUTOC_KX_SUPP) *speed |= IXGBE_LINK_SPEED_1GB_FULL; *autoneg = true; break; default: return IXGBE_ERR_LINK_SETUP; } return 0; } /** * ixgbe_get_media_type_82598 - Determines media type * @hw: pointer to hardware structure * * Returns the media type (fiber, copper, backplane) **/ static enum ixgbe_media_type ixgbe_get_media_type_82598(struct ixgbe_hw *hw) { /* Detect if there is a copper PHY attached. */ switch (hw->phy.type) { case ixgbe_phy_cu_unknown: case ixgbe_phy_tn: return ixgbe_media_type_copper; default: break; } /* Media type for I82598 is based on device ID */ switch (hw->device_id) { case IXGBE_DEV_ID_82598: case IXGBE_DEV_ID_82598_BX: /* Default device ID is mezzanine card KX/KX4 */ return ixgbe_media_type_backplane; case IXGBE_DEV_ID_82598AF_DUAL_PORT: case IXGBE_DEV_ID_82598AF_SINGLE_PORT: case IXGBE_DEV_ID_82598_DA_DUAL_PORT: case IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM: case IXGBE_DEV_ID_82598EB_XF_LR: case IXGBE_DEV_ID_82598EB_SFP_LOM: return ixgbe_media_type_fiber; case IXGBE_DEV_ID_82598EB_CX4: case IXGBE_DEV_ID_82598_CX4_DUAL_PORT: return ixgbe_media_type_cx4; case IXGBE_DEV_ID_82598AT: case IXGBE_DEV_ID_82598AT2: return ixgbe_media_type_copper; default: return ixgbe_media_type_unknown; } } /** * ixgbe_fc_enable_82598 - Enable flow control * @hw: pointer to hardware structure * * Enable flow control according to the current settings. **/ static s32 ixgbe_fc_enable_82598(struct ixgbe_hw *hw) { u32 fctrl_reg; u32 rmcs_reg; u32 reg; u32 fcrtl, fcrth; u32 link_speed = 0; int i; bool link_up; /* Validate the water mark configuration */ if (!hw->fc.pause_time) return IXGBE_ERR_INVALID_LINK_SETTINGS; /* Low water mark of zero causes XOFF floods */ for (i = 0; i < MAX_TRAFFIC_CLASS; i++) { if ((hw->fc.current_mode & ixgbe_fc_tx_pause) && hw->fc.high_water[i]) { if (!hw->fc.low_water[i] || hw->fc.low_water[i] >= hw->fc.high_water[i]) { hw_dbg(hw, "Invalid water mark configuration\n"); return IXGBE_ERR_INVALID_LINK_SETTINGS; } } } /* * On 82598 having Rx FC on causes resets while doing 1G * so if it's on turn it off once we know link_speed. For * more details see 82598 Specification update. */ hw->mac.ops.check_link(hw, &link_speed, &link_up, false); if (link_up && link_speed == IXGBE_LINK_SPEED_1GB_FULL) { switch (hw->fc.requested_mode) { case ixgbe_fc_full: hw->fc.requested_mode = ixgbe_fc_tx_pause; break; case ixgbe_fc_rx_pause: hw->fc.requested_mode = ixgbe_fc_none; break; default: /* no change */ break; } } /* Negotiate the fc mode to use */ hw->mac.ops.fc_autoneg(hw); /* Disable any previous flow control settings */ fctrl_reg = IXGBE_READ_REG(hw, IXGBE_FCTRL); fctrl_reg &= ~(IXGBE_FCTRL_RFCE | IXGBE_FCTRL_RPFCE); rmcs_reg = IXGBE_READ_REG(hw, IXGBE_RMCS); rmcs_reg &= ~(IXGBE_RMCS_TFCE_PRIORITY | IXGBE_RMCS_TFCE_802_3X); /* * The possible values of fc.current_mode are: * 0: Flow control is completely disabled * 1: Rx flow control is enabled (we can receive pause frames, * but not send pause frames). * 2: Tx flow control is enabled (we can send pause frames but * we do not support receiving pause frames). * 3: Both Rx and Tx flow control (symmetric) are enabled. * other: Invalid. */ switch (hw->fc.current_mode) { case ixgbe_fc_none: /* * Flow control is disabled by software override or autoneg. * The code below will actually disable it in the HW. */ break; case ixgbe_fc_rx_pause: /* * Rx Flow control is enabled and Tx Flow control is * disabled by software override. Since there really * isn't a way to advertise that we are capable of RX * Pause ONLY, we will advertise that we support both * symmetric and asymmetric Rx PAUSE. Later, we will * disable the adapter's ability to send PAUSE frames. */ fctrl_reg |= IXGBE_FCTRL_RFCE; break; case ixgbe_fc_tx_pause: /* * Tx Flow control is enabled, and Rx Flow control is * disabled by software override. */ rmcs_reg |= IXGBE_RMCS_TFCE_802_3X; break; case ixgbe_fc_full: /* Flow control (both Rx and Tx) is enabled by SW override. */ fctrl_reg |= IXGBE_FCTRL_RFCE; rmcs_reg |= IXGBE_RMCS_TFCE_802_3X; break; default: hw_dbg(hw, "Flow control param set incorrectly\n"); return IXGBE_ERR_CONFIG; } /* Set 802.3x based flow control settings. */ fctrl_reg |= IXGBE_FCTRL_DPF; IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl_reg); IXGBE_WRITE_REG(hw, IXGBE_RMCS, rmcs_reg); /* Set up and enable Rx high/low water mark thresholds, enable XON. */ for (i = 0; i < MAX_TRAFFIC_CLASS; i++) { if ((hw->fc.current_mode & ixgbe_fc_tx_pause) && hw->fc.high_water[i]) { fcrtl = (hw->fc.low_water[i] << 10) | IXGBE_FCRTL_XONE; fcrth = (hw->fc.high_water[i] << 10) | IXGBE_FCRTH_FCEN; IXGBE_WRITE_REG(hw, IXGBE_FCRTL(i), fcrtl); IXGBE_WRITE_REG(hw, IXGBE_FCRTH(i), fcrth); } else { IXGBE_WRITE_REG(hw, IXGBE_FCRTL(i), 0); IXGBE_WRITE_REG(hw, IXGBE_FCRTH(i), 0); } } /* Configure pause time (2 TCs per register) */ reg = hw->fc.pause_time * 0x00010001; for (i = 0; i < (MAX_TRAFFIC_CLASS / 2); i++) IXGBE_WRITE_REG(hw, IXGBE_FCTTV(i), reg); /* Configure flow control refresh threshold value */ IXGBE_WRITE_REG(hw, IXGBE_FCRTV, hw->fc.pause_time / 2); return 0; } /** * ixgbe_start_mac_link_82598 - Configures MAC link settings * @hw: pointer to hardware structure * @autoneg_wait_to_complete: true when waiting for completion is needed * * Configures link settings based on values in the ixgbe_hw struct. * Restarts the link. Performs autonegotiation if needed. **/ static s32 ixgbe_start_mac_link_82598(struct ixgbe_hw *hw, bool autoneg_wait_to_complete) { u32 autoc_reg; u32 links_reg; u32 i; s32 status = 0; /* Restart link */ autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC); autoc_reg |= IXGBE_AUTOC_AN_RESTART; IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg); /* Only poll for autoneg to complete if specified to do so */ if (autoneg_wait_to_complete) { if ((autoc_reg & IXGBE_AUTOC_LMS_MASK) == IXGBE_AUTOC_LMS_KX4_AN || (autoc_reg & IXGBE_AUTOC_LMS_MASK) == IXGBE_AUTOC_LMS_KX4_AN_1G_AN) { links_reg = 0; /* Just in case Autoneg time = 0 */ for (i = 0; i < IXGBE_AUTO_NEG_TIME; i++) { links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS); if (links_reg & IXGBE_LINKS_KX_AN_COMP) break; msleep(100); } if (!(links_reg & IXGBE_LINKS_KX_AN_COMP)) { status = IXGBE_ERR_AUTONEG_NOT_COMPLETE; hw_dbg(hw, "Autonegotiation did not complete.\n"); } } } /* Add delay to filter out noises during initial link setup */ msleep(50); return status; } /** * ixgbe_validate_link_ready - Function looks for phy link * @hw: pointer to hardware structure * * Function indicates success when phy link is available. If phy is not ready * within 5 seconds of MAC indicating link, the function returns error. **/ static s32 ixgbe_validate_link_ready(struct ixgbe_hw *hw) { u32 timeout; u16 an_reg; if (hw->device_id != IXGBE_DEV_ID_82598AT2) return 0; for (timeout = 0; timeout < IXGBE_VALIDATE_LINK_READY_TIMEOUT; timeout++) { hw->phy.ops.read_reg(hw, MDIO_STAT1, MDIO_MMD_AN, &an_reg); if ((an_reg & MDIO_AN_STAT1_COMPLETE) && (an_reg & MDIO_STAT1_LSTATUS)) break; msleep(100); } if (timeout == IXGBE_VALIDATE_LINK_READY_TIMEOUT) { hw_dbg(hw, "Link was indicated but link is down\n"); return IXGBE_ERR_LINK_SETUP; } return 0; } /** * ixgbe_check_mac_link_82598 - Get link/speed status * @hw: pointer to hardware structure * @speed: pointer to link speed * @link_up: true is link is up, false otherwise * @link_up_wait_to_complete: bool used to wait for link up or not * * Reads the links register to determine if link is up and the current speed **/ static s32 ixgbe_check_mac_link_82598(struct ixgbe_hw *hw, ixgbe_link_speed *speed, bool *link_up, bool link_up_wait_to_complete) { u32 links_reg; u32 i; u16 link_reg, adapt_comp_reg; /* * SERDES PHY requires us to read link status from register 0xC79F. * Bit 0 set indicates link is up/ready; clear indicates link down. * 0xC00C is read to check that the XAUI lanes are active. Bit 0 * clear indicates active; set indicates inactive. */ if (hw->phy.type == ixgbe_phy_nl) { hw->phy.ops.read_reg(hw, 0xC79F, MDIO_MMD_PMAPMD, &link_reg); hw->phy.ops.read_reg(hw, 0xC79F, MDIO_MMD_PMAPMD, &link_reg); hw->phy.ops.read_reg(hw, 0xC00C, MDIO_MMD_PMAPMD, &adapt_comp_reg); if (link_up_wait_to_complete) { for (i = 0; i < IXGBE_LINK_UP_TIME; i++) { if ((link_reg & 1) && ((adapt_comp_reg & 1) == 0)) { *link_up = true; break; } else { *link_up = false; } msleep(100); hw->phy.ops.read_reg(hw, 0xC79F, MDIO_MMD_PMAPMD, &link_reg); hw->phy.ops.read_reg(hw, 0xC00C, MDIO_MMD_PMAPMD, &adapt_comp_reg); } } else { if ((link_reg & 1) && ((adapt_comp_reg & 1) == 0)) *link_up = true; else *link_up = false; } if (!*link_up) return 0; } links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS); if (link_up_wait_to_complete) { for (i = 0; i < IXGBE_LINK_UP_TIME; i++) { if (links_reg & IXGBE_LINKS_UP) { *link_up = true; break; } else { *link_up = false; } msleep(100); links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS); } } else { if (links_reg & IXGBE_LINKS_UP) *link_up = true; else *link_up = false; } if (links_reg & IXGBE_LINKS_SPEED) *speed = IXGBE_LINK_SPEED_10GB_FULL; else *speed = IXGBE_LINK_SPEED_1GB_FULL; if ((hw->device_id == IXGBE_DEV_ID_82598AT2) && *link_up && (ixgbe_validate_link_ready(hw) != 0)) *link_up = false; return 0; } /** * ixgbe_setup_mac_link_82598 - Set MAC link speed * @hw: pointer to hardware structure * @speed: new link speed * @autoneg_wait_to_complete: true when waiting for completion is needed * * Set the link speed in the AUTOC register and restarts link. **/ static s32 ixgbe_setup_mac_link_82598(struct ixgbe_hw *hw, ixgbe_link_speed speed, bool autoneg_wait_to_complete) { bool autoneg = false; ixgbe_link_speed link_capabilities = IXGBE_LINK_SPEED_UNKNOWN; u32 curr_autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC); u32 autoc = curr_autoc; u32 link_mode = autoc & IXGBE_AUTOC_LMS_MASK; /* Check to see if speed passed in is supported. */ ixgbe_get_link_capabilities_82598(hw, &link_capabilities, &autoneg); speed &= link_capabilities; if (speed == IXGBE_LINK_SPEED_UNKNOWN) return IXGBE_ERR_LINK_SETUP; /* Set KX4/KX support according to speed requested */ else if (link_mode == IXGBE_AUTOC_LMS_KX4_AN || link_mode == IXGBE_AUTOC_LMS_KX4_AN_1G_AN) { autoc &= ~IXGBE_AUTOC_KX4_KX_SUPP_MASK; if (speed & IXGBE_LINK_SPEED_10GB_FULL) autoc |= IXGBE_AUTOC_KX4_SUPP; if (speed & IXGBE_LINK_SPEED_1GB_FULL) autoc |= IXGBE_AUTOC_KX_SUPP; if (autoc != curr_autoc) IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc); } /* Setup and restart the link based on the new values in * ixgbe_hw This will write the AUTOC register based on the new * stored values */ return ixgbe_start_mac_link_82598(hw, autoneg_wait_to_complete); } /** * ixgbe_setup_copper_link_82598 - Set the PHY autoneg advertised field * @hw: pointer to hardware structure * @speed: new link speed * @autoneg_wait_to_complete: true if waiting is needed to complete * * Sets the link speed in the AUTOC register in the MAC and restarts link. **/ static s32 ixgbe_setup_copper_link_82598(struct ixgbe_hw *hw, ixgbe_link_speed speed, bool autoneg_wait_to_complete) { s32 status; /* Setup the PHY according to input speed */ status = hw->phy.ops.setup_link_speed(hw, speed, autoneg_wait_to_complete); /* Set up MAC */ ixgbe_start_mac_link_82598(hw, autoneg_wait_to_complete); return status; } /** * ixgbe_reset_hw_82598 - Performs hardware reset * @hw: pointer to hardware structure * * Resets the hardware by resetting the transmit and receive units, masks and * clears all interrupts, performing a PHY reset, and performing a link (MAC) * reset. **/ static s32 ixgbe_reset_hw_82598(struct ixgbe_hw *hw) { s32 status; s32 phy_status = 0; u32 ctrl; u32 gheccr; u32 i; u32 autoc; u8 analog_val; /* Call adapter stop to disable tx/rx and clear interrupts */ status = hw->mac.ops.stop_adapter(hw); if (status) return status; /* * Power up the Atlas Tx lanes if they are currently powered down. * Atlas Tx lanes are powered down for MAC loopback tests, but * they are not automatically restored on reset. */ hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_LPBK, &analog_val); if (analog_val & IXGBE_ATLAS_PDN_TX_REG_EN) { /* Enable Tx Atlas so packets can be transmitted again */ hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_LPBK, &analog_val); analog_val &= ~IXGBE_ATLAS_PDN_TX_REG_EN; hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_LPBK, analog_val); hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_10G, &analog_val); analog_val &= ~IXGBE_ATLAS_PDN_TX_10G_QL_ALL; hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_10G, analog_val); hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_1G, &analog_val); analog_val &= ~IXGBE_ATLAS_PDN_TX_1G_QL_ALL; hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_1G, analog_val); hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_AN, &analog_val); analog_val &= ~IXGBE_ATLAS_PDN_TX_AN_QL_ALL; hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_AN, analog_val); } /* Reset PHY */ if (hw->phy.reset_disable == false) { /* PHY ops must be identified and initialized prior to reset */ /* Init PHY and function pointers, perform SFP setup */ phy_status = hw->phy.ops.init(hw); if (phy_status == IXGBE_ERR_SFP_NOT_SUPPORTED) return phy_status; if (phy_status == IXGBE_ERR_SFP_NOT_PRESENT) goto mac_reset_top; hw->phy.ops.reset(hw); } mac_reset_top: /* * Issue global reset to the MAC. This needs to be a SW reset. * If link reset is used, it might reset the MAC when mng is using it */ ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL) | IXGBE_CTRL_RST; IXGBE_WRITE_REG(hw, IXGBE_CTRL, ctrl); IXGBE_WRITE_FLUSH(hw); usleep_range(1000, 1200); /* Poll for reset bit to self-clear indicating reset is complete */ for (i = 0; i < 10; i++) { ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL); if (!(ctrl & IXGBE_CTRL_RST)) break; udelay(1); } if (ctrl & IXGBE_CTRL_RST) { status = IXGBE_ERR_RESET_FAILED; hw_dbg(hw, "Reset polling failed to complete.\n"); } msleep(50); /* * Double resets are required for recovery from certain error * conditions. Between resets, it is necessary to stall to allow time * for any pending HW events to complete. */ if (hw->mac.flags & IXGBE_FLAGS_DOUBLE_RESET_REQUIRED) { hw->mac.flags &= ~IXGBE_FLAGS_DOUBLE_RESET_REQUIRED; goto mac_reset_top; } gheccr = IXGBE_READ_REG(hw, IXGBE_GHECCR); gheccr &= ~(BIT(21) | BIT(18) | BIT(9) | BIT(6)); IXGBE_WRITE_REG(hw, IXGBE_GHECCR, gheccr); /* * Store the original AUTOC value if it has not been * stored off yet. Otherwise restore the stored original * AUTOC value since the reset operation sets back to deaults. */ autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC); if (hw->mac.orig_link_settings_stored == false) { hw->mac.orig_autoc = autoc; hw->mac.orig_link_settings_stored = true; } else if (autoc != hw->mac.orig_autoc) { IXGBE_WRITE_REG(hw, IXGBE_AUTOC, hw->mac.orig_autoc); } /* Store the permanent mac address */ hw->mac.ops.get_mac_addr(hw, hw->mac.perm_addr); /* * Store MAC address from RAR0, clear receive address registers, and * clear the multicast table */ hw->mac.ops.init_rx_addrs(hw); if (phy_status) status = phy_status; return status; } /** * ixgbe_set_vmdq_82598 - Associate a VMDq set index with a rx address * @hw: pointer to hardware struct * @rar: receive address register index to associate with a VMDq index * @vmdq: VMDq set index **/ static s32 ixgbe_set_vmdq_82598(struct ixgbe_hw *hw, u32 rar, u32 vmdq) { u32 rar_high; u32 rar_entries = hw->mac.num_rar_entries; /* Make sure we are using a valid rar index range */ if (rar >= rar_entries) { hw_dbg(hw, "RAR index %d is out of range.\n", rar); return IXGBE_ERR_INVALID_ARGUMENT; } rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(rar)); rar_high &= ~IXGBE_RAH_VIND_MASK; rar_high |= ((vmdq << IXGBE_RAH_VIND_SHIFT) & IXGBE_RAH_VIND_MASK); IXGBE_WRITE_REG(hw, IXGBE_RAH(rar), rar_high); return 0; } /** * ixgbe_clear_vmdq_82598 - Disassociate a VMDq set index from an rx address * @hw: pointer to hardware struct * @rar: receive address register index to associate with a VMDq index * @vmdq: VMDq clear index (not used in 82598, but elsewhere) **/ static s32 ixgbe_clear_vmdq_82598(struct ixgbe_hw *hw, u32 rar, u32 vmdq) { u32 rar_high; u32 rar_entries = hw->mac.num_rar_entries; /* Make sure we are using a valid rar index range */ if (rar >= rar_entries) { hw_dbg(hw, "RAR index %d is out of range.\n", rar); return IXGBE_ERR_INVALID_ARGUMENT; } rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(rar)); if (rar_high & IXGBE_RAH_VIND_MASK) { rar_high &= ~IXGBE_RAH_VIND_MASK; IXGBE_WRITE_REG(hw, IXGBE_RAH(rar), rar_high); } return 0; } /** * ixgbe_set_vfta_82598 - Set VLAN filter table * @hw: pointer to hardware structure * @vlan: VLAN id to write to VLAN filter * @vind: VMDq output index that maps queue to VLAN id in VFTA * @vlan_on: boolean flag to turn on/off VLAN in VFTA * @vlvf_bypass: boolean flag - unused * * Turn on/off specified VLAN in the VLAN filter table. **/ static s32 ixgbe_set_vfta_82598(struct ixgbe_hw *hw, u32 vlan, u32 vind, bool vlan_on, bool vlvf_bypass) { u32 regindex; u32 bitindex; u32 bits; u32 vftabyte; if (vlan > 4095) return IXGBE_ERR_PARAM; /* Determine 32-bit word position in array */ regindex = (vlan >> 5) & 0x7F; /* upper seven bits */ /* Determine the location of the (VMD) queue index */ vftabyte = ((vlan >> 3) & 0x03); /* bits (4:3) indicating byte array */ bitindex = (vlan & 0x7) << 2; /* lower 3 bits indicate nibble */ /* Set the nibble for VMD queue index */ bits = IXGBE_READ_REG(hw, IXGBE_VFTAVIND(vftabyte, regindex)); bits &= (~(0x0F << bitindex)); bits |= (vind << bitindex); IXGBE_WRITE_REG(hw, IXGBE_VFTAVIND(vftabyte, regindex), bits); /* Determine the location of the bit for this VLAN id */ bitindex = vlan & 0x1F; /* lower five bits */ bits = IXGBE_READ_REG(hw, IXGBE_VFTA(regindex)); if (vlan_on) /* Turn on this VLAN id */ bits |= BIT(bitindex); else /* Turn off this VLAN id */ bits &= ~BIT(bitindex); IXGBE_WRITE_REG(hw, IXGBE_VFTA(regindex), bits); return 0; } /** * ixgbe_clear_vfta_82598 - Clear VLAN filter table * @hw: pointer to hardware structure * * Clears the VLAN filter table, and the VMDq index associated with the filter **/ static s32 ixgbe_clear_vfta_82598(struct ixgbe_hw *hw) { u32 offset; u32 vlanbyte; for (offset = 0; offset < hw->mac.vft_size; offset++) IXGBE_WRITE_REG(hw, IXGBE_VFTA(offset), 0); for (vlanbyte = 0; vlanbyte < 4; vlanbyte++) for (offset = 0; offset < hw->mac.vft_size; offset++) IXGBE_WRITE_REG(hw, IXGBE_VFTAVIND(vlanbyte, offset), 0); return 0; } /** * ixgbe_read_analog_reg8_82598 - Reads 8 bit Atlas analog register * @hw: pointer to hardware structure * @reg: analog register to read * @val: read value * * Performs read operation to Atlas analog register specified. **/ static s32 ixgbe_read_analog_reg8_82598(struct ixgbe_hw *hw, u32 reg, u8 *val) { u32 atlas_ctl; IXGBE_WRITE_REG(hw, IXGBE_ATLASCTL, IXGBE_ATLASCTL_WRITE_CMD | (reg << 8)); IXGBE_WRITE_FLUSH(hw); udelay(10); atlas_ctl = IXGBE_READ_REG(hw, IXGBE_ATLASCTL); *val = (u8)atlas_ctl; return 0; } /** * ixgbe_write_analog_reg8_82598 - Writes 8 bit Atlas analog register * @hw: pointer to hardware structure * @reg: atlas register to write * @val: value to write * * Performs write operation to Atlas analog register specified. **/ static s32 ixgbe_write_analog_reg8_82598(struct ixgbe_hw *hw, u32 reg, u8 val) { u32 atlas_ctl; atlas_ctl = (reg << 8) | val; IXGBE_WRITE_REG(hw, IXGBE_ATLASCTL, atlas_ctl); IXGBE_WRITE_FLUSH(hw); udelay(10); return 0; } /** * ixgbe_read_i2c_phy_82598 - Reads 8 bit word over I2C interface. * @hw: pointer to hardware structure * @dev_addr: address to read from * @byte_offset: byte offset to read from dev_addr * @eeprom_data: value read * * Performs 8 byte read operation to SFP module's data over I2C interface. **/ static s32 ixgbe_read_i2c_phy_82598(struct ixgbe_hw *hw, u8 dev_addr, u8 byte_offset, u8 *eeprom_data) { s32 status = 0; u16 sfp_addr = 0; u16 sfp_data = 0; u16 sfp_stat = 0; u16 gssr; u32 i; if (IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_LAN_ID_1) gssr = IXGBE_GSSR_PHY1_SM; else gssr = IXGBE_GSSR_PHY0_SM; if (hw->mac.ops.acquire_swfw_sync(hw, gssr) != 0) return IXGBE_ERR_SWFW_SYNC; if (hw->phy.type == ixgbe_phy_nl) { /* * phy SDA/SCL registers are at addresses 0xC30A to * 0xC30D. These registers are used to talk to the SFP+ * module's EEPROM through the SDA/SCL (I2C) interface. */ sfp_addr = (dev_addr << 8) + byte_offset; sfp_addr = (sfp_addr | IXGBE_I2C_EEPROM_READ_MASK); hw->phy.ops.write_reg_mdi(hw, IXGBE_MDIO_PMA_PMD_SDA_SCL_ADDR, MDIO_MMD_PMAPMD, sfp_addr); /* Poll status */ for (i = 0; i < 100; i++) { hw->phy.ops.read_reg_mdi(hw, IXGBE_MDIO_PMA_PMD_SDA_SCL_STAT, MDIO_MMD_PMAPMD, &sfp_stat); sfp_stat = sfp_stat & IXGBE_I2C_EEPROM_STATUS_MASK; if (sfp_stat != IXGBE_I2C_EEPROM_STATUS_IN_PROGRESS) break; usleep_range(10000, 20000); } if (sfp_stat != IXGBE_I2C_EEPROM_STATUS_PASS) { hw_dbg(hw, "EEPROM read did not pass.\n"); status = IXGBE_ERR_SFP_NOT_PRESENT; goto out; } /* Read data */ hw->phy.ops.read_reg_mdi(hw, IXGBE_MDIO_PMA_PMD_SDA_SCL_DATA, MDIO_MMD_PMAPMD, &sfp_data); *eeprom_data = (u8)(sfp_data >> 8); } else { status = IXGBE_ERR_PHY; } out: hw->mac.ops.release_swfw_sync(hw, gssr); return status; } /** * ixgbe_read_i2c_eeprom_82598 - Reads 8 bit word over I2C interface. * @hw: pointer to hardware structure * @byte_offset: EEPROM byte offset to read * @eeprom_data: value read * * Performs 8 byte read operation to SFP module's EEPROM over I2C interface. **/ static s32 ixgbe_read_i2c_eeprom_82598(struct ixgbe_hw *hw, u8 byte_offset, u8 *eeprom_data) { return ixgbe_read_i2c_phy_82598(hw, IXGBE_I2C_EEPROM_DEV_ADDR, byte_offset, eeprom_data); } /** * ixgbe_read_i2c_sff8472_82598 - Reads 8 bit word over I2C interface. * @hw: pointer to hardware structure * @byte_offset: byte offset at address 0xA2 * @sff8472_data: value read * * Performs 8 byte read operation to SFP module's SFF-8472 data over I2C **/ static s32 ixgbe_read_i2c_sff8472_82598(struct ixgbe_hw *hw, u8 byte_offset, u8 *sff8472_data) { return ixgbe_read_i2c_phy_82598(hw, IXGBE_I2C_EEPROM_DEV_ADDR2, byte_offset, sff8472_data); } /** * ixgbe_set_lan_id_multi_port_pcie_82598 - Set LAN id for PCIe multiple * port devices. * @hw: pointer to the HW structure * * Calls common function and corrects issue with some single port devices * that enable LAN1 but not LAN0. **/ static void ixgbe_set_lan_id_multi_port_pcie_82598(struct ixgbe_hw *hw) { struct ixgbe_bus_info *bus = &hw->bus; u16 pci_gen = 0; u16 pci_ctrl2 = 0; ixgbe_set_lan_id_multi_port_pcie(hw); /* check if LAN0 is disabled */ hw->eeprom.ops.read(hw, IXGBE_PCIE_GENERAL_PTR, &pci_gen); if ((pci_gen != 0) && (pci_gen != 0xFFFF)) { hw->eeprom.ops.read(hw, pci_gen + IXGBE_PCIE_CTRL2, &pci_ctrl2); /* if LAN0 is completely disabled force function to 0 */ if ((pci_ctrl2 & IXGBE_PCIE_CTRL2_LAN_DISABLE) && !(pci_ctrl2 & IXGBE_PCIE_CTRL2_DISABLE_SELECT) && !(pci_ctrl2 & IXGBE_PCIE_CTRL2_DUMMY_ENABLE)) { bus->func = 0; } } } /** * ixgbe_set_rxpba_82598 - Initialize RX packet buffer * @hw: pointer to hardware structure * @num_pb: number of packet buffers to allocate * @headroom: reserve n KB of headroom * @strategy: packet buffer allocation strategy **/ static void ixgbe_set_rxpba_82598(struct ixgbe_hw *hw, int num_pb, u32 headroom, int strategy) { u32 rxpktsize = IXGBE_RXPBSIZE_64KB; u8 i = 0; if (!num_pb) return; /* Setup Rx packet buffer sizes */ switch (strategy) { case PBA_STRATEGY_WEIGHTED: /* Setup the first four at 80KB */ rxpktsize = IXGBE_RXPBSIZE_80KB; for (; i < 4; i++) IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpktsize); /* Setup the last four at 48KB...don't re-init i */ rxpktsize = IXGBE_RXPBSIZE_48KB; fallthrough; case PBA_STRATEGY_EQUAL: default: /* Divide the remaining Rx packet buffer evenly among the TCs */ for (; i < IXGBE_MAX_PACKET_BUFFERS; i++) IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpktsize); break; } /* Setup Tx packet buffer sizes */ for (i = 0; i < IXGBE_MAX_PACKET_BUFFERS; i++) IXGBE_WRITE_REG(hw, IXGBE_TXPBSIZE(i), IXGBE_TXPBSIZE_40KB); } static const struct ixgbe_mac_operations mac_ops_82598 = { .init_hw = &ixgbe_init_hw_generic, .reset_hw = &ixgbe_reset_hw_82598, .start_hw = &ixgbe_start_hw_82598, .clear_hw_cntrs = &ixgbe_clear_hw_cntrs_generic, .get_media_type = &ixgbe_get_media_type_82598, .enable_rx_dma = &ixgbe_enable_rx_dma_generic, .get_mac_addr = &ixgbe_get_mac_addr_generic, .stop_adapter = &ixgbe_stop_adapter_generic, .get_bus_info = &ixgbe_get_bus_info_generic, .set_lan_id = &ixgbe_set_lan_id_multi_port_pcie_82598, .read_analog_reg8 = &ixgbe_read_analog_reg8_82598, .write_analog_reg8 = &ixgbe_write_analog_reg8_82598, .setup_link = &ixgbe_setup_mac_link_82598, .set_rxpba = &ixgbe_set_rxpba_82598, .check_link = &ixgbe_check_mac_link_82598, .get_link_capabilities = &ixgbe_get_link_capabilities_82598, .led_on = &ixgbe_led_on_generic, .led_off = &ixgbe_led_off_generic, .init_led_link_act = ixgbe_init_led_link_act_generic, .blink_led_start = &ixgbe_blink_led_start_generic, .blink_led_stop = &ixgbe_blink_led_stop_generic, .set_rar = &ixgbe_set_rar_generic, .clear_rar = &ixgbe_clear_rar_generic, .set_vmdq = &ixgbe_set_vmdq_82598, .clear_vmdq = &ixgbe_clear_vmdq_82598, .init_rx_addrs = &ixgbe_init_rx_addrs_generic, .update_mc_addr_list = &ixgbe_update_mc_addr_list_generic, .enable_mc = &ixgbe_enable_mc_generic, .disable_mc = &ixgbe_disable_mc_generic, .clear_vfta = &ixgbe_clear_vfta_82598, .set_vfta = &ixgbe_set_vfta_82598, .fc_enable = &ixgbe_fc_enable_82598, .setup_fc = ixgbe_setup_fc_generic, .fc_autoneg = ixgbe_fc_autoneg, .set_fw_drv_ver = NULL, .acquire_swfw_sync = &ixgbe_acquire_swfw_sync, .release_swfw_sync = &ixgbe_release_swfw_sync, .init_swfw_sync = NULL, .get_thermal_sensor_data = NULL, .init_thermal_sensor_thresh = NULL, .prot_autoc_read = &prot_autoc_read_generic, .prot_autoc_write = &prot_autoc_write_generic, .enable_rx = &ixgbe_enable_rx_generic, .disable_rx = &ixgbe_disable_rx_generic, }; static const struct ixgbe_eeprom_operations eeprom_ops_82598 = { .init_params = &ixgbe_init_eeprom_params_generic, .read = &ixgbe_read_eerd_generic, .write = &ixgbe_write_eeprom_generic, .write_buffer = &ixgbe_write_eeprom_buffer_bit_bang_generic, .read_buffer = &ixgbe_read_eerd_buffer_generic, .calc_checksum = &ixgbe_calc_eeprom_checksum_generic, .validate_checksum = &ixgbe_validate_eeprom_checksum_generic, .update_checksum = &ixgbe_update_eeprom_checksum_generic, }; static const struct ixgbe_phy_operations phy_ops_82598 = { .identify = &ixgbe_identify_phy_generic, .identify_sfp = &ixgbe_identify_module_generic, .init = &ixgbe_init_phy_ops_82598, .reset = &ixgbe_reset_phy_generic, .read_reg = &ixgbe_read_phy_reg_generic, .write_reg = &ixgbe_write_phy_reg_generic, .read_reg_mdi = &ixgbe_read_phy_reg_mdi, .write_reg_mdi = &ixgbe_write_phy_reg_mdi, .setup_link = &ixgbe_setup_phy_link_generic, .setup_link_speed = &ixgbe_setup_phy_link_speed_generic, .read_i2c_sff8472 = &ixgbe_read_i2c_sff8472_82598, .read_i2c_eeprom = &ixgbe_read_i2c_eeprom_82598, .check_overtemp = &ixgbe_tn_check_overtemp, }; const struct ixgbe_info ixgbe_82598_info = { .mac = ixgbe_mac_82598EB, .get_invariants = &ixgbe_get_invariants_82598, .mac_ops = &mac_ops_82598, .eeprom_ops = &eeprom_ops_82598, .phy_ops = &phy_ops_82598, .mvals = ixgbe_mvals_8259X, };
linux-master
drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c
// SPDX-License-Identifier: GPL-2.0 /* Copyright(c) 1999 - 2018 Intel Corporation. */ #include "ixgbe.h" #include "ixgbe_type.h" #include "ixgbe_dcb.h" #include "ixgbe_dcb_82598.h" /** * ixgbe_dcb_config_rx_arbiter_82598 - Config Rx data arbiter * @hw: pointer to hardware structure * @refill: refill credits index by traffic class * @max: max credits index by traffic class * @prio_type: priority type indexed by traffic class * * Configure Rx Data Arbiter and credits for each traffic class. */ s32 ixgbe_dcb_config_rx_arbiter_82598(struct ixgbe_hw *hw, u16 *refill, u16 *max, u8 *prio_type) { u32 reg = 0; u32 credit_refill = 0; u32 credit_max = 0; u8 i = 0; reg = IXGBE_READ_REG(hw, IXGBE_RUPPBMR) | IXGBE_RUPPBMR_MQA; IXGBE_WRITE_REG(hw, IXGBE_RUPPBMR, reg); reg = IXGBE_READ_REG(hw, IXGBE_RMCS); /* Enable Arbiter */ reg &= ~IXGBE_RMCS_ARBDIS; /* Enable Receive Recycle within the BWG */ reg |= IXGBE_RMCS_RRM; /* Enable Deficit Fixed Priority arbitration*/ reg |= IXGBE_RMCS_DFP; IXGBE_WRITE_REG(hw, IXGBE_RMCS, reg); /* Configure traffic class credits and priority */ for (i = 0; i < MAX_TRAFFIC_CLASS; i++) { credit_refill = refill[i]; credit_max = max[i]; reg = credit_refill | (credit_max << IXGBE_RT2CR_MCL_SHIFT); if (prio_type[i] == prio_link) reg |= IXGBE_RT2CR_LSP; IXGBE_WRITE_REG(hw, IXGBE_RT2CR(i), reg); } reg = IXGBE_READ_REG(hw, IXGBE_RDRXCTL); reg |= IXGBE_RDRXCTL_RDMTS_1_2; reg |= IXGBE_RDRXCTL_MPBEN; reg |= IXGBE_RDRXCTL_MCEN; IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, reg); reg = IXGBE_READ_REG(hw, IXGBE_RXCTRL); /* Make sure there is enough descriptors before arbitration */ reg &= ~IXGBE_RXCTRL_DMBYPS; IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, reg); return 0; } /** * ixgbe_dcb_config_tx_desc_arbiter_82598 - Config Tx Desc. arbiter * @hw: pointer to hardware structure * @refill: refill credits index by traffic class * @max: max credits index by traffic class * @bwg_id: bandwidth grouping indexed by traffic class * @prio_type: priority type indexed by traffic class * * Configure Tx Descriptor Arbiter and credits for each traffic class. */ s32 ixgbe_dcb_config_tx_desc_arbiter_82598(struct ixgbe_hw *hw, u16 *refill, u16 *max, u8 *bwg_id, u8 *prio_type) { u32 reg, max_credits; u8 i; reg = IXGBE_READ_REG(hw, IXGBE_DPMCS); /* Enable arbiter */ reg &= ~IXGBE_DPMCS_ARBDIS; reg |= IXGBE_DPMCS_TSOEF; /* Configure Max TSO packet size 34KB including payload and headers */ reg |= (0x4 << IXGBE_DPMCS_MTSOS_SHIFT); IXGBE_WRITE_REG(hw, IXGBE_DPMCS, reg); /* Configure traffic class credits and priority */ for (i = 0; i < MAX_TRAFFIC_CLASS; i++) { max_credits = max[i]; reg = max_credits << IXGBE_TDTQ2TCCR_MCL_SHIFT; reg |= refill[i]; reg |= (u32)(bwg_id[i]) << IXGBE_TDTQ2TCCR_BWG_SHIFT; if (prio_type[i] == prio_group) reg |= IXGBE_TDTQ2TCCR_GSP; if (prio_type[i] == prio_link) reg |= IXGBE_TDTQ2TCCR_LSP; IXGBE_WRITE_REG(hw, IXGBE_TDTQ2TCCR(i), reg); } return 0; } /** * ixgbe_dcb_config_tx_data_arbiter_82598 - Config Tx data arbiter * @hw: pointer to hardware structure * @refill: refill credits index by traffic class * @max: max credits index by traffic class * @bwg_id: bandwidth grouping indexed by traffic class * @prio_type: priority type indexed by traffic class * * Configure Tx Data Arbiter and credits for each traffic class. */ s32 ixgbe_dcb_config_tx_data_arbiter_82598(struct ixgbe_hw *hw, u16 *refill, u16 *max, u8 *bwg_id, u8 *prio_type) { u32 reg; u8 i; reg = IXGBE_READ_REG(hw, IXGBE_PDPMCS); /* Enable Data Plane Arbiter */ reg &= ~IXGBE_PDPMCS_ARBDIS; /* Enable DFP and Transmit Recycle Mode */ reg |= (IXGBE_PDPMCS_TPPAC | IXGBE_PDPMCS_TRM); IXGBE_WRITE_REG(hw, IXGBE_PDPMCS, reg); /* Configure traffic class credits and priority */ for (i = 0; i < MAX_TRAFFIC_CLASS; i++) { reg = refill[i]; reg |= (u32)(max[i]) << IXGBE_TDPT2TCCR_MCL_SHIFT; reg |= (u32)(bwg_id[i]) << IXGBE_TDPT2TCCR_BWG_SHIFT; if (prio_type[i] == prio_group) reg |= IXGBE_TDPT2TCCR_GSP; if (prio_type[i] == prio_link) reg |= IXGBE_TDPT2TCCR_LSP; IXGBE_WRITE_REG(hw, IXGBE_TDPT2TCCR(i), reg); } /* Enable Tx packet buffer division */ reg = IXGBE_READ_REG(hw, IXGBE_DTXCTL); reg |= IXGBE_DTXCTL_ENDBUBD; IXGBE_WRITE_REG(hw, IXGBE_DTXCTL, reg); return 0; } /** * ixgbe_dcb_config_pfc_82598 - Config priority flow control * @hw: pointer to hardware structure * @pfc_en: enabled pfc bitmask * * Configure Priority Flow Control for each traffic class. */ s32 ixgbe_dcb_config_pfc_82598(struct ixgbe_hw *hw, u8 pfc_en) { u32 fcrtl, reg; u8 i; /* Enable Transmit Priority Flow Control */ reg = IXGBE_READ_REG(hw, IXGBE_RMCS); reg &= ~IXGBE_RMCS_TFCE_802_3X; reg |= IXGBE_RMCS_TFCE_PRIORITY; IXGBE_WRITE_REG(hw, IXGBE_RMCS, reg); /* Enable Receive Priority Flow Control */ reg = IXGBE_READ_REG(hw, IXGBE_FCTRL); reg &= ~(IXGBE_FCTRL_RPFCE | IXGBE_FCTRL_RFCE); if (pfc_en) reg |= IXGBE_FCTRL_RPFCE; IXGBE_WRITE_REG(hw, IXGBE_FCTRL, reg); /* Configure PFC Tx thresholds per TC */ for (i = 0; i < MAX_TRAFFIC_CLASS; i++) { if (!(pfc_en & BIT(i))) { IXGBE_WRITE_REG(hw, IXGBE_FCRTL(i), 0); IXGBE_WRITE_REG(hw, IXGBE_FCRTH(i), 0); continue; } fcrtl = (hw->fc.low_water[i] << 10) | IXGBE_FCRTL_XONE; reg = (hw->fc.high_water[i] << 10) | IXGBE_FCRTH_FCEN; IXGBE_WRITE_REG(hw, IXGBE_FCRTL(i), fcrtl); IXGBE_WRITE_REG(hw, IXGBE_FCRTH(i), reg); } /* Configure pause time */ reg = hw->fc.pause_time * 0x00010001; for (i = 0; i < (MAX_TRAFFIC_CLASS / 2); i++) IXGBE_WRITE_REG(hw, IXGBE_FCTTV(i), reg); /* Configure flow control refresh threshold value */ IXGBE_WRITE_REG(hw, IXGBE_FCRTV, hw->fc.pause_time / 2); return 0; } /** * ixgbe_dcb_config_tc_stats_82598 - Configure traffic class statistics * @hw: pointer to hardware structure * * Configure queue statistics registers, all queues belonging to same traffic * class uses a single set of queue statistics counters. */ static s32 ixgbe_dcb_config_tc_stats_82598(struct ixgbe_hw *hw) { u32 reg = 0; u8 i = 0; u8 j = 0; /* Receive Queues stats setting - 8 queues per statistics reg */ for (i = 0, j = 0; i < 15 && j < 8; i = i + 2, j++) { reg = IXGBE_READ_REG(hw, IXGBE_RQSMR(i)); reg |= ((0x1010101) * j); IXGBE_WRITE_REG(hw, IXGBE_RQSMR(i), reg); reg = IXGBE_READ_REG(hw, IXGBE_RQSMR(i + 1)); reg |= ((0x1010101) * j); IXGBE_WRITE_REG(hw, IXGBE_RQSMR(i + 1), reg); } /* Transmit Queues stats setting - 4 queues per statistics reg */ for (i = 0; i < 8; i++) { reg = IXGBE_READ_REG(hw, IXGBE_TQSMR(i)); reg |= ((0x1010101) * i); IXGBE_WRITE_REG(hw, IXGBE_TQSMR(i), reg); } return 0; } /** * ixgbe_dcb_hw_config_82598 - Config and enable DCB * @hw: pointer to hardware structure * @pfc_en: enabled pfc bitmask * @refill: refill credits index by traffic class * @max: max credits index by traffic class * @bwg_id: bandwidth grouping indexed by traffic class * @prio_type: priority type indexed by traffic class * * Configure dcb settings and enable dcb mode. */ s32 ixgbe_dcb_hw_config_82598(struct ixgbe_hw *hw, u8 pfc_en, u16 *refill, u16 *max, u8 *bwg_id, u8 *prio_type) { ixgbe_dcb_config_rx_arbiter_82598(hw, refill, max, prio_type); ixgbe_dcb_config_tx_desc_arbiter_82598(hw, refill, max, bwg_id, prio_type); ixgbe_dcb_config_tx_data_arbiter_82598(hw, refill, max, bwg_id, prio_type); ixgbe_dcb_config_pfc_82598(hw, pfc_en); ixgbe_dcb_config_tc_stats_82598(hw); return 0; }
linux-master
drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82598.c
// SPDX-License-Identifier: GPL-2.0 /* Copyright(c) 1999 - 2018 Intel Corporation. */ #include "ixgbe_x540.h" #include "ixgbe_type.h" #include "ixgbe_common.h" #include "ixgbe_phy.h" static s32 ixgbe_setup_kr_speed_x550em(struct ixgbe_hw *, ixgbe_link_speed); static s32 ixgbe_setup_fc_x550em(struct ixgbe_hw *); static void ixgbe_fc_autoneg_fiber_x550em_a(struct ixgbe_hw *); static void ixgbe_fc_autoneg_backplane_x550em_a(struct ixgbe_hw *); static s32 ixgbe_setup_fc_backplane_x550em_a(struct ixgbe_hw *); static s32 ixgbe_get_invariants_X550_x(struct ixgbe_hw *hw) { struct ixgbe_mac_info *mac = &hw->mac; struct ixgbe_phy_info *phy = &hw->phy; struct ixgbe_link_info *link = &hw->link; /* Start with X540 invariants, since so simular */ ixgbe_get_invariants_X540(hw); if (mac->ops.get_media_type(hw) != ixgbe_media_type_copper) phy->ops.set_phy_power = NULL; link->addr = IXGBE_CS4227; return 0; } static s32 ixgbe_get_invariants_X550_x_fw(struct ixgbe_hw *hw) { struct ixgbe_phy_info *phy = &hw->phy; /* Start with X540 invariants, since so similar */ ixgbe_get_invariants_X540(hw); phy->ops.set_phy_power = NULL; return 0; } static s32 ixgbe_get_invariants_X550_a(struct ixgbe_hw *hw) { struct ixgbe_mac_info *mac = &hw->mac; struct ixgbe_phy_info *phy = &hw->phy; /* Start with X540 invariants, since so simular */ ixgbe_get_invariants_X540(hw); if (mac->ops.get_media_type(hw) != ixgbe_media_type_copper) phy->ops.set_phy_power = NULL; return 0; } static s32 ixgbe_get_invariants_X550_a_fw(struct ixgbe_hw *hw) { struct ixgbe_phy_info *phy = &hw->phy; /* Start with X540 invariants, since so similar */ ixgbe_get_invariants_X540(hw); phy->ops.set_phy_power = NULL; return 0; } /** ixgbe_setup_mux_ctl - Setup ESDP register for I2C mux control * @hw: pointer to hardware structure **/ static void ixgbe_setup_mux_ctl(struct ixgbe_hw *hw) { u32 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP); if (hw->bus.lan_id) { esdp &= ~(IXGBE_ESDP_SDP1_NATIVE | IXGBE_ESDP_SDP1); esdp |= IXGBE_ESDP_SDP1_DIR; } esdp &= ~(IXGBE_ESDP_SDP0_NATIVE | IXGBE_ESDP_SDP0_DIR); IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp); IXGBE_WRITE_FLUSH(hw); } /** * ixgbe_read_cs4227 - Read CS4227 register * @hw: pointer to hardware structure * @reg: register number to write * @value: pointer to receive value read * * Returns status code */ static s32 ixgbe_read_cs4227(struct ixgbe_hw *hw, u16 reg, u16 *value) { return hw->link.ops.read_link_unlocked(hw, hw->link.addr, reg, value); } /** * ixgbe_write_cs4227 - Write CS4227 register * @hw: pointer to hardware structure * @reg: register number to write * @value: value to write to register * * Returns status code */ static s32 ixgbe_write_cs4227(struct ixgbe_hw *hw, u16 reg, u16 value) { return hw->link.ops.write_link_unlocked(hw, hw->link.addr, reg, value); } /** * ixgbe_read_pe - Read register from port expander * @hw: pointer to hardware structure * @reg: register number to read * @value: pointer to receive read value * * Returns status code */ static s32 ixgbe_read_pe(struct ixgbe_hw *hw, u8 reg, u8 *value) { s32 status; status = ixgbe_read_i2c_byte_generic_unlocked(hw, reg, IXGBE_PE, value); if (status) hw_err(hw, "port expander access failed with %d\n", status); return status; } /** * ixgbe_write_pe - Write register to port expander * @hw: pointer to hardware structure * @reg: register number to write * @value: value to write * * Returns status code */ static s32 ixgbe_write_pe(struct ixgbe_hw *hw, u8 reg, u8 value) { s32 status; status = ixgbe_write_i2c_byte_generic_unlocked(hw, reg, IXGBE_PE, value); if (status) hw_err(hw, "port expander access failed with %d\n", status); return status; } /** * ixgbe_reset_cs4227 - Reset CS4227 using port expander * @hw: pointer to hardware structure * * This function assumes that the caller has acquired the proper semaphore. * Returns error code */ static s32 ixgbe_reset_cs4227(struct ixgbe_hw *hw) { s32 status; u32 retry; u16 value; u8 reg; /* Trigger hard reset. */ status = ixgbe_read_pe(hw, IXGBE_PE_OUTPUT, &reg); if (status) return status; reg |= IXGBE_PE_BIT1; status = ixgbe_write_pe(hw, IXGBE_PE_OUTPUT, reg); if (status) return status; status = ixgbe_read_pe(hw, IXGBE_PE_CONFIG, &reg); if (status) return status; reg &= ~IXGBE_PE_BIT1; status = ixgbe_write_pe(hw, IXGBE_PE_CONFIG, reg); if (status) return status; status = ixgbe_read_pe(hw, IXGBE_PE_OUTPUT, &reg); if (status) return status; reg &= ~IXGBE_PE_BIT1; status = ixgbe_write_pe(hw, IXGBE_PE_OUTPUT, reg); if (status) return status; usleep_range(IXGBE_CS4227_RESET_HOLD, IXGBE_CS4227_RESET_HOLD + 100); status = ixgbe_read_pe(hw, IXGBE_PE_OUTPUT, &reg); if (status) return status; reg |= IXGBE_PE_BIT1; status = ixgbe_write_pe(hw, IXGBE_PE_OUTPUT, reg); if (status) return status; /* Wait for the reset to complete. */ msleep(IXGBE_CS4227_RESET_DELAY); for (retry = 0; retry < IXGBE_CS4227_RETRIES; retry++) { status = ixgbe_read_cs4227(hw, IXGBE_CS4227_EFUSE_STATUS, &value); if (!status && value == IXGBE_CS4227_EEPROM_LOAD_OK) break; msleep(IXGBE_CS4227_CHECK_DELAY); } if (retry == IXGBE_CS4227_RETRIES) { hw_err(hw, "CS4227 reset did not complete\n"); return IXGBE_ERR_PHY; } status = ixgbe_read_cs4227(hw, IXGBE_CS4227_EEPROM_STATUS, &value); if (status || !(value & IXGBE_CS4227_EEPROM_LOAD_OK)) { hw_err(hw, "CS4227 EEPROM did not load successfully\n"); return IXGBE_ERR_PHY; } return 0; } /** * ixgbe_check_cs4227 - Check CS4227 and reset as needed * @hw: pointer to hardware structure */ static void ixgbe_check_cs4227(struct ixgbe_hw *hw) { u32 swfw_mask = hw->phy.phy_semaphore_mask; s32 status; u16 value; u8 retry; for (retry = 0; retry < IXGBE_CS4227_RETRIES; retry++) { status = hw->mac.ops.acquire_swfw_sync(hw, swfw_mask); if (status) { hw_err(hw, "semaphore failed with %d\n", status); msleep(IXGBE_CS4227_CHECK_DELAY); continue; } /* Get status of reset flow. */ status = ixgbe_read_cs4227(hw, IXGBE_CS4227_SCRATCH, &value); if (!status && value == IXGBE_CS4227_RESET_COMPLETE) goto out; if (status || value != IXGBE_CS4227_RESET_PENDING) break; /* Reset is pending. Wait and check again. */ hw->mac.ops.release_swfw_sync(hw, swfw_mask); msleep(IXGBE_CS4227_CHECK_DELAY); } /* If still pending, assume other instance failed. */ if (retry == IXGBE_CS4227_RETRIES) { status = hw->mac.ops.acquire_swfw_sync(hw, swfw_mask); if (status) { hw_err(hw, "semaphore failed with %d\n", status); return; } } /* Reset the CS4227. */ status = ixgbe_reset_cs4227(hw); if (status) { hw_err(hw, "CS4227 reset failed: %d", status); goto out; } /* Reset takes so long, temporarily release semaphore in case the * other driver instance is waiting for the reset indication. */ ixgbe_write_cs4227(hw, IXGBE_CS4227_SCRATCH, IXGBE_CS4227_RESET_PENDING); hw->mac.ops.release_swfw_sync(hw, swfw_mask); usleep_range(10000, 12000); status = hw->mac.ops.acquire_swfw_sync(hw, swfw_mask); if (status) { hw_err(hw, "semaphore failed with %d", status); return; } /* Record completion for next time. */ status = ixgbe_write_cs4227(hw, IXGBE_CS4227_SCRATCH, IXGBE_CS4227_RESET_COMPLETE); out: hw->mac.ops.release_swfw_sync(hw, swfw_mask); msleep(hw->eeprom.semaphore_delay); } /** ixgbe_identify_phy_x550em - Get PHY type based on device id * @hw: pointer to hardware structure * * Returns error code */ static s32 ixgbe_identify_phy_x550em(struct ixgbe_hw *hw) { switch (hw->device_id) { case IXGBE_DEV_ID_X550EM_A_SFP: if (hw->bus.lan_id) hw->phy.phy_semaphore_mask = IXGBE_GSSR_PHY1_SM; else hw->phy.phy_semaphore_mask = IXGBE_GSSR_PHY0_SM; return ixgbe_identify_module_generic(hw); case IXGBE_DEV_ID_X550EM_X_SFP: /* set up for CS4227 usage */ hw->phy.phy_semaphore_mask = IXGBE_GSSR_SHARED_I2C_SM; ixgbe_setup_mux_ctl(hw); ixgbe_check_cs4227(hw); fallthrough; case IXGBE_DEV_ID_X550EM_A_SFP_N: return ixgbe_identify_module_generic(hw); case IXGBE_DEV_ID_X550EM_X_KX4: hw->phy.type = ixgbe_phy_x550em_kx4; break; case IXGBE_DEV_ID_X550EM_X_XFI: hw->phy.type = ixgbe_phy_x550em_xfi; break; case IXGBE_DEV_ID_X550EM_X_KR: case IXGBE_DEV_ID_X550EM_A_KR: case IXGBE_DEV_ID_X550EM_A_KR_L: hw->phy.type = ixgbe_phy_x550em_kr; break; case IXGBE_DEV_ID_X550EM_A_10G_T: if (hw->bus.lan_id) hw->phy.phy_semaphore_mask = IXGBE_GSSR_PHY1_SM; else hw->phy.phy_semaphore_mask = IXGBE_GSSR_PHY0_SM; fallthrough; case IXGBE_DEV_ID_X550EM_X_10G_T: return ixgbe_identify_phy_generic(hw); case IXGBE_DEV_ID_X550EM_X_1G_T: hw->phy.type = ixgbe_phy_ext_1g_t; break; case IXGBE_DEV_ID_X550EM_A_1G_T: case IXGBE_DEV_ID_X550EM_A_1G_T_L: hw->phy.type = ixgbe_phy_fw; hw->phy.ops.read_reg = NULL; hw->phy.ops.write_reg = NULL; if (hw->bus.lan_id) hw->phy.phy_semaphore_mask |= IXGBE_GSSR_PHY1_SM; else hw->phy.phy_semaphore_mask |= IXGBE_GSSR_PHY0_SM; break; default: break; } return 0; } static s32 ixgbe_read_phy_reg_x550em(struct ixgbe_hw *hw, u32 reg_addr, u32 device_type, u16 *phy_data) { return IXGBE_NOT_IMPLEMENTED; } static s32 ixgbe_write_phy_reg_x550em(struct ixgbe_hw *hw, u32 reg_addr, u32 device_type, u16 phy_data) { return IXGBE_NOT_IMPLEMENTED; } /** * ixgbe_read_i2c_combined_generic - Perform I2C read combined operation * @hw: pointer to the hardware structure * @addr: I2C bus address to read from * @reg: I2C device register to read from * @val: pointer to location to receive read value * * Returns an error code on error. **/ static s32 ixgbe_read_i2c_combined_generic(struct ixgbe_hw *hw, u8 addr, u16 reg, u16 *val) { return ixgbe_read_i2c_combined_generic_int(hw, addr, reg, val, true); } /** * ixgbe_read_i2c_combined_generic_unlocked - Do I2C read combined operation * @hw: pointer to the hardware structure * @addr: I2C bus address to read from * @reg: I2C device register to read from * @val: pointer to location to receive read value * * Returns an error code on error. **/ static s32 ixgbe_read_i2c_combined_generic_unlocked(struct ixgbe_hw *hw, u8 addr, u16 reg, u16 *val) { return ixgbe_read_i2c_combined_generic_int(hw, addr, reg, val, false); } /** * ixgbe_write_i2c_combined_generic - Perform I2C write combined operation * @hw: pointer to the hardware structure * @addr: I2C bus address to write to * @reg: I2C device register to write to * @val: value to write * * Returns an error code on error. **/ static s32 ixgbe_write_i2c_combined_generic(struct ixgbe_hw *hw, u8 addr, u16 reg, u16 val) { return ixgbe_write_i2c_combined_generic_int(hw, addr, reg, val, true); } /** * ixgbe_write_i2c_combined_generic_unlocked - Do I2C write combined operation * @hw: pointer to the hardware structure * @addr: I2C bus address to write to * @reg: I2C device register to write to * @val: value to write * * Returns an error code on error. **/ static s32 ixgbe_write_i2c_combined_generic_unlocked(struct ixgbe_hw *hw, u8 addr, u16 reg, u16 val) { return ixgbe_write_i2c_combined_generic_int(hw, addr, reg, val, false); } /** * ixgbe_fw_phy_activity - Perform an activity on a PHY * @hw: pointer to hardware structure * @activity: activity to perform * @data: Pointer to 4 32-bit words of data */ s32 ixgbe_fw_phy_activity(struct ixgbe_hw *hw, u16 activity, u32 (*data)[FW_PHY_ACT_DATA_COUNT]) { union { struct ixgbe_hic_phy_activity_req cmd; struct ixgbe_hic_phy_activity_resp rsp; } hic; u16 retries = FW_PHY_ACT_RETRIES; s32 rc; u32 i; do { memset(&hic, 0, sizeof(hic)); hic.cmd.hdr.cmd = FW_PHY_ACT_REQ_CMD; hic.cmd.hdr.buf_len = FW_PHY_ACT_REQ_LEN; hic.cmd.hdr.checksum = FW_DEFAULT_CHECKSUM; hic.cmd.port_number = hw->bus.lan_id; hic.cmd.activity_id = cpu_to_le16(activity); for (i = 0; i < ARRAY_SIZE(hic.cmd.data); ++i) hic.cmd.data[i] = cpu_to_be32((*data)[i]); rc = ixgbe_host_interface_command(hw, &hic.cmd, sizeof(hic.cmd), IXGBE_HI_COMMAND_TIMEOUT, true); if (rc) return rc; if (hic.rsp.hdr.cmd_or_resp.ret_status == FW_CEM_RESP_STATUS_SUCCESS) { for (i = 0; i < FW_PHY_ACT_DATA_COUNT; ++i) (*data)[i] = be32_to_cpu(hic.rsp.data[i]); return 0; } usleep_range(20, 30); --retries; } while (retries > 0); return IXGBE_ERR_HOST_INTERFACE_COMMAND; } static const struct { u16 fw_speed; ixgbe_link_speed phy_speed; } ixgbe_fw_map[] = { { FW_PHY_ACT_LINK_SPEED_10, IXGBE_LINK_SPEED_10_FULL }, { FW_PHY_ACT_LINK_SPEED_100, IXGBE_LINK_SPEED_100_FULL }, { FW_PHY_ACT_LINK_SPEED_1G, IXGBE_LINK_SPEED_1GB_FULL }, { FW_PHY_ACT_LINK_SPEED_2_5G, IXGBE_LINK_SPEED_2_5GB_FULL }, { FW_PHY_ACT_LINK_SPEED_5G, IXGBE_LINK_SPEED_5GB_FULL }, { FW_PHY_ACT_LINK_SPEED_10G, IXGBE_LINK_SPEED_10GB_FULL }, }; /** * ixgbe_get_phy_id_fw - Get the phy ID via firmware command * @hw: pointer to hardware structure * * Returns error code */ static s32 ixgbe_get_phy_id_fw(struct ixgbe_hw *hw) { u32 info[FW_PHY_ACT_DATA_COUNT] = { 0 }; u16 phy_speeds; u16 phy_id_lo; s32 rc; u16 i; if (hw->phy.id) return 0; rc = ixgbe_fw_phy_activity(hw, FW_PHY_ACT_GET_PHY_INFO, &info); if (rc) return rc; hw->phy.speeds_supported = 0; phy_speeds = info[0] & FW_PHY_INFO_SPEED_MASK; for (i = 0; i < ARRAY_SIZE(ixgbe_fw_map); ++i) { if (phy_speeds & ixgbe_fw_map[i].fw_speed) hw->phy.speeds_supported |= ixgbe_fw_map[i].phy_speed; } hw->phy.id = info[0] & FW_PHY_INFO_ID_HI_MASK; phy_id_lo = info[1] & FW_PHY_INFO_ID_LO_MASK; hw->phy.id |= phy_id_lo & IXGBE_PHY_REVISION_MASK; hw->phy.revision = phy_id_lo & ~IXGBE_PHY_REVISION_MASK; if (!hw->phy.id || hw->phy.id == IXGBE_PHY_REVISION_MASK) return IXGBE_ERR_PHY_ADDR_INVALID; hw->phy.autoneg_advertised = hw->phy.speeds_supported; hw->phy.eee_speeds_supported = IXGBE_LINK_SPEED_100_FULL | IXGBE_LINK_SPEED_1GB_FULL; hw->phy.eee_speeds_advertised = hw->phy.eee_speeds_supported; return 0; } /** * ixgbe_identify_phy_fw - Get PHY type based on firmware command * @hw: pointer to hardware structure * * Returns error code */ static s32 ixgbe_identify_phy_fw(struct ixgbe_hw *hw) { if (hw->bus.lan_id) hw->phy.phy_semaphore_mask = IXGBE_GSSR_PHY1_SM; else hw->phy.phy_semaphore_mask = IXGBE_GSSR_PHY0_SM; hw->phy.type = ixgbe_phy_fw; hw->phy.ops.read_reg = NULL; hw->phy.ops.write_reg = NULL; return ixgbe_get_phy_id_fw(hw); } /** * ixgbe_shutdown_fw_phy - Shutdown a firmware-controlled PHY * @hw: pointer to hardware structure * * Returns error code */ static s32 ixgbe_shutdown_fw_phy(struct ixgbe_hw *hw) { u32 setup[FW_PHY_ACT_DATA_COUNT] = { 0 }; setup[0] = FW_PHY_ACT_FORCE_LINK_DOWN_OFF; return ixgbe_fw_phy_activity(hw, FW_PHY_ACT_FORCE_LINK_DOWN, &setup); } /** * ixgbe_setup_fw_link - Setup firmware-controlled PHYs * @hw: pointer to hardware structure */ static s32 ixgbe_setup_fw_link(struct ixgbe_hw *hw) { u32 setup[FW_PHY_ACT_DATA_COUNT] = { 0 }; s32 rc; u16 i; if (hw->phy.reset_disable || ixgbe_check_reset_blocked(hw)) return 0; if (hw->fc.strict_ieee && hw->fc.requested_mode == ixgbe_fc_rx_pause) { hw_err(hw, "rx_pause not valid in strict IEEE mode\n"); return IXGBE_ERR_INVALID_LINK_SETTINGS; } switch (hw->fc.requested_mode) { case ixgbe_fc_full: setup[0] |= FW_PHY_ACT_SETUP_LINK_PAUSE_RXTX << FW_PHY_ACT_SETUP_LINK_PAUSE_SHIFT; break; case ixgbe_fc_rx_pause: setup[0] |= FW_PHY_ACT_SETUP_LINK_PAUSE_RX << FW_PHY_ACT_SETUP_LINK_PAUSE_SHIFT; break; case ixgbe_fc_tx_pause: setup[0] |= FW_PHY_ACT_SETUP_LINK_PAUSE_TX << FW_PHY_ACT_SETUP_LINK_PAUSE_SHIFT; break; default: break; } for (i = 0; i < ARRAY_SIZE(ixgbe_fw_map); ++i) { if (hw->phy.autoneg_advertised & ixgbe_fw_map[i].phy_speed) setup[0] |= ixgbe_fw_map[i].fw_speed; } setup[0] |= FW_PHY_ACT_SETUP_LINK_HP | FW_PHY_ACT_SETUP_LINK_AN; if (hw->phy.eee_speeds_advertised) setup[0] |= FW_PHY_ACT_SETUP_LINK_EEE; rc = ixgbe_fw_phy_activity(hw, FW_PHY_ACT_SETUP_LINK, &setup); if (rc) return rc; if (setup[0] == FW_PHY_ACT_SETUP_LINK_RSP_DOWN) return IXGBE_ERR_OVERTEMP; return 0; } /** * ixgbe_fc_autoneg_fw - Set up flow control for FW-controlled PHYs * @hw: pointer to hardware structure * * Called at init time to set up flow control. */ static s32 ixgbe_fc_autoneg_fw(struct ixgbe_hw *hw) { if (hw->fc.requested_mode == ixgbe_fc_default) hw->fc.requested_mode = ixgbe_fc_full; return ixgbe_setup_fw_link(hw); } /** ixgbe_init_eeprom_params_X550 - Initialize EEPROM params * @hw: pointer to hardware structure * * Initializes the EEPROM parameters ixgbe_eeprom_info within the * ixgbe_hw struct in order to set up EEPROM access. **/ static s32 ixgbe_init_eeprom_params_X550(struct ixgbe_hw *hw) { struct ixgbe_eeprom_info *eeprom = &hw->eeprom; u32 eec; u16 eeprom_size; if (eeprom->type == ixgbe_eeprom_uninitialized) { eeprom->semaphore_delay = 10; eeprom->type = ixgbe_flash; eec = IXGBE_READ_REG(hw, IXGBE_EEC(hw)); eeprom_size = (u16)((eec & IXGBE_EEC_SIZE) >> IXGBE_EEC_SIZE_SHIFT); eeprom->word_size = BIT(eeprom_size + IXGBE_EEPROM_WORD_SIZE_SHIFT); hw_dbg(hw, "Eeprom params: type = %d, size = %d\n", eeprom->type, eeprom->word_size); } return 0; } /** * ixgbe_iosf_wait - Wait for IOSF command completion * @hw: pointer to hardware structure * @ctrl: pointer to location to receive final IOSF control value * * Return: failing status on timeout * * Note: ctrl can be NULL if the IOSF control register value is not needed */ static s32 ixgbe_iosf_wait(struct ixgbe_hw *hw, u32 *ctrl) { u32 i, command; /* Check every 10 usec to see if the address cycle completed. * The SB IOSF BUSY bit will clear when the operation is * complete. */ for (i = 0; i < IXGBE_MDIO_COMMAND_TIMEOUT; i++) { command = IXGBE_READ_REG(hw, IXGBE_SB_IOSF_INDIRECT_CTRL); if (!(command & IXGBE_SB_IOSF_CTRL_BUSY)) break; udelay(10); } if (ctrl) *ctrl = command; if (i == IXGBE_MDIO_COMMAND_TIMEOUT) { hw_dbg(hw, "IOSF wait timed out\n"); return IXGBE_ERR_PHY; } return 0; } /** ixgbe_read_iosf_sb_reg_x550 - Writes a value to specified register of the * IOSF device * @hw: pointer to hardware structure * @reg_addr: 32 bit PHY register to write * @device_type: 3 bit device type * @phy_data: Pointer to read data from the register **/ static s32 ixgbe_read_iosf_sb_reg_x550(struct ixgbe_hw *hw, u32 reg_addr, u32 device_type, u32 *data) { u32 gssr = IXGBE_GSSR_PHY1_SM | IXGBE_GSSR_PHY0_SM; u32 command, error; s32 ret; ret = hw->mac.ops.acquire_swfw_sync(hw, gssr); if (ret) return ret; ret = ixgbe_iosf_wait(hw, NULL); if (ret) goto out; command = ((reg_addr << IXGBE_SB_IOSF_CTRL_ADDR_SHIFT) | (device_type << IXGBE_SB_IOSF_CTRL_TARGET_SELECT_SHIFT)); /* Write IOSF control register */ IXGBE_WRITE_REG(hw, IXGBE_SB_IOSF_INDIRECT_CTRL, command); ret = ixgbe_iosf_wait(hw, &command); if ((command & IXGBE_SB_IOSF_CTRL_RESP_STAT_MASK) != 0) { error = (command & IXGBE_SB_IOSF_CTRL_CMPL_ERR_MASK) >> IXGBE_SB_IOSF_CTRL_CMPL_ERR_SHIFT; hw_dbg(hw, "Failed to read, error %x\n", error); return IXGBE_ERR_PHY; } if (!ret) *data = IXGBE_READ_REG(hw, IXGBE_SB_IOSF_INDIRECT_DATA); out: hw->mac.ops.release_swfw_sync(hw, gssr); return ret; } /** * ixgbe_get_phy_token - Get the token for shared PHY access * @hw: Pointer to hardware structure */ static s32 ixgbe_get_phy_token(struct ixgbe_hw *hw) { struct ixgbe_hic_phy_token_req token_cmd; s32 status; token_cmd.hdr.cmd = FW_PHY_TOKEN_REQ_CMD; token_cmd.hdr.buf_len = FW_PHY_TOKEN_REQ_LEN; token_cmd.hdr.cmd_or_resp.cmd_resv = 0; token_cmd.hdr.checksum = FW_DEFAULT_CHECKSUM; token_cmd.port_number = hw->bus.lan_id; token_cmd.command_type = FW_PHY_TOKEN_REQ; token_cmd.pad = 0; status = ixgbe_host_interface_command(hw, &token_cmd, sizeof(token_cmd), IXGBE_HI_COMMAND_TIMEOUT, true); if (status) return status; if (token_cmd.hdr.cmd_or_resp.ret_status == FW_PHY_TOKEN_OK) return 0; if (token_cmd.hdr.cmd_or_resp.ret_status != FW_PHY_TOKEN_RETRY) return IXGBE_ERR_FW_RESP_INVALID; return IXGBE_ERR_TOKEN_RETRY; } /** * ixgbe_put_phy_token - Put the token for shared PHY access * @hw: Pointer to hardware structure */ static s32 ixgbe_put_phy_token(struct ixgbe_hw *hw) { struct ixgbe_hic_phy_token_req token_cmd; s32 status; token_cmd.hdr.cmd = FW_PHY_TOKEN_REQ_CMD; token_cmd.hdr.buf_len = FW_PHY_TOKEN_REQ_LEN; token_cmd.hdr.cmd_or_resp.cmd_resv = 0; token_cmd.hdr.checksum = FW_DEFAULT_CHECKSUM; token_cmd.port_number = hw->bus.lan_id; token_cmd.command_type = FW_PHY_TOKEN_REL; token_cmd.pad = 0; status = ixgbe_host_interface_command(hw, &token_cmd, sizeof(token_cmd), IXGBE_HI_COMMAND_TIMEOUT, true); if (status) return status; if (token_cmd.hdr.cmd_or_resp.ret_status == FW_PHY_TOKEN_OK) return 0; return IXGBE_ERR_FW_RESP_INVALID; } /** * ixgbe_write_iosf_sb_reg_x550a - Write to IOSF PHY register * @hw: pointer to hardware structure * @reg_addr: 32 bit PHY register to write * @device_type: 3 bit device type * @data: Data to write to the register **/ static s32 ixgbe_write_iosf_sb_reg_x550a(struct ixgbe_hw *hw, u32 reg_addr, __always_unused u32 device_type, u32 data) { struct ixgbe_hic_internal_phy_req write_cmd; memset(&write_cmd, 0, sizeof(write_cmd)); write_cmd.hdr.cmd = FW_INT_PHY_REQ_CMD; write_cmd.hdr.buf_len = FW_INT_PHY_REQ_LEN; write_cmd.hdr.checksum = FW_DEFAULT_CHECKSUM; write_cmd.port_number = hw->bus.lan_id; write_cmd.command_type = FW_INT_PHY_REQ_WRITE; write_cmd.address = cpu_to_be16(reg_addr); write_cmd.write_data = cpu_to_be32(data); return ixgbe_host_interface_command(hw, &write_cmd, sizeof(write_cmd), IXGBE_HI_COMMAND_TIMEOUT, false); } /** * ixgbe_read_iosf_sb_reg_x550a - Read from IOSF PHY register * @hw: pointer to hardware structure * @reg_addr: 32 bit PHY register to write * @device_type: 3 bit device type * @data: Pointer to read data from the register **/ static s32 ixgbe_read_iosf_sb_reg_x550a(struct ixgbe_hw *hw, u32 reg_addr, __always_unused u32 device_type, u32 *data) { union { struct ixgbe_hic_internal_phy_req cmd; struct ixgbe_hic_internal_phy_resp rsp; } hic; s32 status; memset(&hic, 0, sizeof(hic)); hic.cmd.hdr.cmd = FW_INT_PHY_REQ_CMD; hic.cmd.hdr.buf_len = FW_INT_PHY_REQ_LEN; hic.cmd.hdr.checksum = FW_DEFAULT_CHECKSUM; hic.cmd.port_number = hw->bus.lan_id; hic.cmd.command_type = FW_INT_PHY_REQ_READ; hic.cmd.address = cpu_to_be16(reg_addr); status = ixgbe_host_interface_command(hw, &hic.cmd, sizeof(hic.cmd), IXGBE_HI_COMMAND_TIMEOUT, true); /* Extract the register value from the response. */ *data = be32_to_cpu(hic.rsp.read_data); return status; } /** ixgbe_read_ee_hostif_buffer_X550- Read EEPROM word(s) using hostif * @hw: pointer to hardware structure * @offset: offset of word in the EEPROM to read * @words: number of words * @data: word(s) read from the EEPROM * * Reads a 16 bit word(s) from the EEPROM using the hostif. **/ static s32 ixgbe_read_ee_hostif_buffer_X550(struct ixgbe_hw *hw, u16 offset, u16 words, u16 *data) { const u32 mask = IXGBE_GSSR_SW_MNG_SM | IXGBE_GSSR_EEP_SM; struct ixgbe_hic_read_shadow_ram buffer; u32 current_word = 0; u16 words_to_read; s32 status; u32 i; /* Take semaphore for the entire operation. */ status = hw->mac.ops.acquire_swfw_sync(hw, mask); if (status) { hw_dbg(hw, "EEPROM read buffer - semaphore failed\n"); return status; } while (words) { if (words > FW_MAX_READ_BUFFER_SIZE / 2) words_to_read = FW_MAX_READ_BUFFER_SIZE / 2; else words_to_read = words; buffer.hdr.req.cmd = FW_READ_SHADOW_RAM_CMD; buffer.hdr.req.buf_lenh = 0; buffer.hdr.req.buf_lenl = FW_READ_SHADOW_RAM_LEN; buffer.hdr.req.checksum = FW_DEFAULT_CHECKSUM; /* convert offset from words to bytes */ buffer.address = (__force u32)cpu_to_be32((offset + current_word) * 2); buffer.length = (__force u16)cpu_to_be16(words_to_read * 2); buffer.pad2 = 0; buffer.pad3 = 0; status = ixgbe_hic_unlocked(hw, (u32 *)&buffer, sizeof(buffer), IXGBE_HI_COMMAND_TIMEOUT); if (status) { hw_dbg(hw, "Host interface command failed\n"); goto out; } for (i = 0; i < words_to_read; i++) { u32 reg = IXGBE_FLEX_MNG + (FW_NVM_DATA_OFFSET << 2) + 2 * i; u32 value = IXGBE_READ_REG(hw, reg); data[current_word] = (u16)(value & 0xffff); current_word++; i++; if (i < words_to_read) { value >>= 16; data[current_word] = (u16)(value & 0xffff); current_word++; } } words -= words_to_read; } out: hw->mac.ops.release_swfw_sync(hw, mask); return status; } /** ixgbe_checksum_ptr_x550 - Checksum one pointer region * @hw: pointer to hardware structure * @ptr: pointer offset in eeprom * @size: size of section pointed by ptr, if 0 first word will be used as size * @csum: address of checksum to update * * Returns error status for any failure **/ static s32 ixgbe_checksum_ptr_x550(struct ixgbe_hw *hw, u16 ptr, u16 size, u16 *csum, u16 *buffer, u32 buffer_size) { u16 buf[256]; s32 status; u16 length, bufsz, i, start; u16 *local_buffer; bufsz = ARRAY_SIZE(buf); /* Read a chunk at the pointer location */ if (!buffer) { status = ixgbe_read_ee_hostif_buffer_X550(hw, ptr, bufsz, buf); if (status) { hw_dbg(hw, "Failed to read EEPROM image\n"); return status; } local_buffer = buf; } else { if (buffer_size < ptr) return IXGBE_ERR_PARAM; local_buffer = &buffer[ptr]; } if (size) { start = 0; length = size; } else { start = 1; length = local_buffer[0]; /* Skip pointer section if length is invalid. */ if (length == 0xFFFF || length == 0 || (ptr + length) >= hw->eeprom.word_size) return 0; } if (buffer && ((u32)start + (u32)length > buffer_size)) return IXGBE_ERR_PARAM; for (i = start; length; i++, length--) { if (i == bufsz && !buffer) { ptr += bufsz; i = 0; if (length < bufsz) bufsz = length; /* Read a chunk at the pointer location */ status = ixgbe_read_ee_hostif_buffer_X550(hw, ptr, bufsz, buf); if (status) { hw_dbg(hw, "Failed to read EEPROM image\n"); return status; } } *csum += local_buffer[i]; } return 0; } /** ixgbe_calc_checksum_X550 - Calculates and returns the checksum * @hw: pointer to hardware structure * @buffer: pointer to buffer containing calculated checksum * @buffer_size: size of buffer * * Returns a negative error code on error, or the 16-bit checksum **/ static s32 ixgbe_calc_checksum_X550(struct ixgbe_hw *hw, u16 *buffer, u32 buffer_size) { u16 eeprom_ptrs[IXGBE_EEPROM_LAST_WORD + 1]; u16 *local_buffer; s32 status; u16 checksum = 0; u16 pointer, i, size; hw->eeprom.ops.init_params(hw); if (!buffer) { /* Read pointer area */ status = ixgbe_read_ee_hostif_buffer_X550(hw, 0, IXGBE_EEPROM_LAST_WORD + 1, eeprom_ptrs); if (status) { hw_dbg(hw, "Failed to read EEPROM image\n"); return status; } local_buffer = eeprom_ptrs; } else { if (buffer_size < IXGBE_EEPROM_LAST_WORD) return IXGBE_ERR_PARAM; local_buffer = buffer; } /* For X550 hardware include 0x0-0x41 in the checksum, skip the * checksum word itself */ for (i = 0; i <= IXGBE_EEPROM_LAST_WORD; i++) if (i != IXGBE_EEPROM_CHECKSUM) checksum += local_buffer[i]; /* Include all data from pointers 0x3, 0x6-0xE. This excludes the * FW, PHY module, and PCIe Expansion/Option ROM pointers. */ for (i = IXGBE_PCIE_ANALOG_PTR_X550; i < IXGBE_FW_PTR; i++) { if (i == IXGBE_PHY_PTR || i == IXGBE_OPTION_ROM_PTR) continue; pointer = local_buffer[i]; /* Skip pointer section if the pointer is invalid. */ if (pointer == 0xFFFF || pointer == 0 || pointer >= hw->eeprom.word_size) continue; switch (i) { case IXGBE_PCIE_GENERAL_PTR: size = IXGBE_IXGBE_PCIE_GENERAL_SIZE; break; case IXGBE_PCIE_CONFIG0_PTR: case IXGBE_PCIE_CONFIG1_PTR: size = IXGBE_PCIE_CONFIG_SIZE; break; default: size = 0; break; } status = ixgbe_checksum_ptr_x550(hw, pointer, size, &checksum, buffer, buffer_size); if (status) return status; } checksum = (u16)IXGBE_EEPROM_SUM - checksum; return (s32)checksum; } /** ixgbe_calc_eeprom_checksum_X550 - Calculates and returns the checksum * @hw: pointer to hardware structure * * Returns a negative error code on error, or the 16-bit checksum **/ static s32 ixgbe_calc_eeprom_checksum_X550(struct ixgbe_hw *hw) { return ixgbe_calc_checksum_X550(hw, NULL, 0); } /** ixgbe_read_ee_hostif_X550 - Read EEPROM word using a host interface command * @hw: pointer to hardware structure * @offset: offset of word in the EEPROM to read * @data: word read from the EEPROM * * Reads a 16 bit word from the EEPROM using the hostif. **/ static s32 ixgbe_read_ee_hostif_X550(struct ixgbe_hw *hw, u16 offset, u16 *data) { const u32 mask = IXGBE_GSSR_SW_MNG_SM | IXGBE_GSSR_EEP_SM; struct ixgbe_hic_read_shadow_ram buffer; s32 status; buffer.hdr.req.cmd = FW_READ_SHADOW_RAM_CMD; buffer.hdr.req.buf_lenh = 0; buffer.hdr.req.buf_lenl = FW_READ_SHADOW_RAM_LEN; buffer.hdr.req.checksum = FW_DEFAULT_CHECKSUM; /* convert offset from words to bytes */ buffer.address = (__force u32)cpu_to_be32(offset * 2); /* one word */ buffer.length = (__force u16)cpu_to_be16(sizeof(u16)); status = hw->mac.ops.acquire_swfw_sync(hw, mask); if (status) return status; status = ixgbe_hic_unlocked(hw, (u32 *)&buffer, sizeof(buffer), IXGBE_HI_COMMAND_TIMEOUT); if (!status) { *data = (u16)IXGBE_READ_REG_ARRAY(hw, IXGBE_FLEX_MNG, FW_NVM_DATA_OFFSET); } hw->mac.ops.release_swfw_sync(hw, mask); return status; } /** ixgbe_validate_eeprom_checksum_X550 - Validate EEPROM checksum * @hw: pointer to hardware structure * @checksum_val: calculated checksum * * Performs checksum calculation and validates the EEPROM checksum. If the * caller does not need checksum_val, the value can be NULL. **/ static s32 ixgbe_validate_eeprom_checksum_X550(struct ixgbe_hw *hw, u16 *checksum_val) { s32 status; u16 checksum; u16 read_checksum = 0; /* Read the first word from the EEPROM. If this times out or fails, do * not continue or we could be in for a very long wait while every * EEPROM read fails */ status = hw->eeprom.ops.read(hw, 0, &checksum); if (status) { hw_dbg(hw, "EEPROM read failed\n"); return status; } status = hw->eeprom.ops.calc_checksum(hw); if (status < 0) return status; checksum = (u16)(status & 0xffff); status = ixgbe_read_ee_hostif_X550(hw, IXGBE_EEPROM_CHECKSUM, &read_checksum); if (status) return status; /* Verify read checksum from EEPROM is the same as * calculated checksum */ if (read_checksum != checksum) { status = IXGBE_ERR_EEPROM_CHECKSUM; hw_dbg(hw, "Invalid EEPROM checksum"); } /* If the user cares, return the calculated checksum */ if (checksum_val) *checksum_val = checksum; return status; } /** ixgbe_write_ee_hostif_X550 - Write EEPROM word using hostif * @hw: pointer to hardware structure * @offset: offset of word in the EEPROM to write * @data: word write to the EEPROM * * Write a 16 bit word to the EEPROM using the hostif. **/ static s32 ixgbe_write_ee_hostif_data_X550(struct ixgbe_hw *hw, u16 offset, u16 data) { s32 status; struct ixgbe_hic_write_shadow_ram buffer; buffer.hdr.req.cmd = FW_WRITE_SHADOW_RAM_CMD; buffer.hdr.req.buf_lenh = 0; buffer.hdr.req.buf_lenl = FW_WRITE_SHADOW_RAM_LEN; buffer.hdr.req.checksum = FW_DEFAULT_CHECKSUM; /* one word */ buffer.length = cpu_to_be16(sizeof(u16)); buffer.data = data; buffer.address = cpu_to_be32(offset * 2); status = ixgbe_host_interface_command(hw, &buffer, sizeof(buffer), IXGBE_HI_COMMAND_TIMEOUT, false); return status; } /** ixgbe_write_ee_hostif_X550 - Write EEPROM word using hostif * @hw: pointer to hardware structure * @offset: offset of word in the EEPROM to write * @data: word write to the EEPROM * * Write a 16 bit word to the EEPROM using the hostif. **/ static s32 ixgbe_write_ee_hostif_X550(struct ixgbe_hw *hw, u16 offset, u16 data) { s32 status = 0; if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM) == 0) { status = ixgbe_write_ee_hostif_data_X550(hw, offset, data); hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM); } else { hw_dbg(hw, "write ee hostif failed to get semaphore"); status = IXGBE_ERR_SWFW_SYNC; } return status; } /** ixgbe_update_flash_X550 - Instruct HW to copy EEPROM to Flash device * @hw: pointer to hardware structure * * Issue a shadow RAM dump to FW to copy EEPROM from shadow RAM to the flash. **/ static s32 ixgbe_update_flash_X550(struct ixgbe_hw *hw) { s32 status = 0; union ixgbe_hic_hdr2 buffer; buffer.req.cmd = FW_SHADOW_RAM_DUMP_CMD; buffer.req.buf_lenh = 0; buffer.req.buf_lenl = FW_SHADOW_RAM_DUMP_LEN; buffer.req.checksum = FW_DEFAULT_CHECKSUM; status = ixgbe_host_interface_command(hw, &buffer, sizeof(buffer), IXGBE_HI_COMMAND_TIMEOUT, false); return status; } /** * ixgbe_get_bus_info_X550em - Set PCI bus info * @hw: pointer to hardware structure * * Sets bus link width and speed to unknown because X550em is * not a PCI device. **/ static s32 ixgbe_get_bus_info_X550em(struct ixgbe_hw *hw) { hw->bus.type = ixgbe_bus_type_internal; hw->bus.width = ixgbe_bus_width_unknown; hw->bus.speed = ixgbe_bus_speed_unknown; hw->mac.ops.set_lan_id(hw); return 0; } /** * ixgbe_fw_recovery_mode_X550 - Check FW NVM recovery mode * @hw: pointer t hardware structure * * Returns true if in FW NVM recovery mode. */ static bool ixgbe_fw_recovery_mode_X550(struct ixgbe_hw *hw) { u32 fwsm; fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM(hw)); return !!(fwsm & IXGBE_FWSM_FW_NVM_RECOVERY_MODE); } /** ixgbe_disable_rx_x550 - Disable RX unit * * Enables the Rx DMA unit for x550 **/ static void ixgbe_disable_rx_x550(struct ixgbe_hw *hw) { u32 rxctrl, pfdtxgswc; s32 status; struct ixgbe_hic_disable_rxen fw_cmd; rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL); if (rxctrl & IXGBE_RXCTRL_RXEN) { pfdtxgswc = IXGBE_READ_REG(hw, IXGBE_PFDTXGSWC); if (pfdtxgswc & IXGBE_PFDTXGSWC_VT_LBEN) { pfdtxgswc &= ~IXGBE_PFDTXGSWC_VT_LBEN; IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, pfdtxgswc); hw->mac.set_lben = true; } else { hw->mac.set_lben = false; } fw_cmd.hdr.cmd = FW_DISABLE_RXEN_CMD; fw_cmd.hdr.buf_len = FW_DISABLE_RXEN_LEN; fw_cmd.hdr.checksum = FW_DEFAULT_CHECKSUM; fw_cmd.port_number = hw->bus.lan_id; status = ixgbe_host_interface_command(hw, &fw_cmd, sizeof(struct ixgbe_hic_disable_rxen), IXGBE_HI_COMMAND_TIMEOUT, true); /* If we fail - disable RX using register write */ if (status) { rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL); if (rxctrl & IXGBE_RXCTRL_RXEN) { rxctrl &= ~IXGBE_RXCTRL_RXEN; IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, rxctrl); } } } } /** ixgbe_update_eeprom_checksum_X550 - Updates the EEPROM checksum and flash * @hw: pointer to hardware structure * * After writing EEPROM to shadow RAM using EEWR register, software calculates * checksum and updates the EEPROM and instructs the hardware to update * the flash. **/ static s32 ixgbe_update_eeprom_checksum_X550(struct ixgbe_hw *hw) { s32 status; u16 checksum = 0; /* Read the first word from the EEPROM. If this times out or fails, do * not continue or we could be in for a very long wait while every * EEPROM read fails */ status = ixgbe_read_ee_hostif_X550(hw, 0, &checksum); if (status) { hw_dbg(hw, "EEPROM read failed\n"); return status; } status = ixgbe_calc_eeprom_checksum_X550(hw); if (status < 0) return status; checksum = (u16)(status & 0xffff); status = ixgbe_write_ee_hostif_X550(hw, IXGBE_EEPROM_CHECKSUM, checksum); if (status) return status; status = ixgbe_update_flash_X550(hw); return status; } /** ixgbe_write_ee_hostif_buffer_X550 - Write EEPROM word(s) using hostif * @hw: pointer to hardware structure * @offset: offset of word in the EEPROM to write * @words: number of words * @data: word(s) write to the EEPROM * * * Write a 16 bit word(s) to the EEPROM using the hostif. **/ static s32 ixgbe_write_ee_hostif_buffer_X550(struct ixgbe_hw *hw, u16 offset, u16 words, u16 *data) { s32 status = 0; u32 i = 0; /* Take semaphore for the entire operation. */ status = hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM); if (status) { hw_dbg(hw, "EEPROM write buffer - semaphore failed\n"); return status; } for (i = 0; i < words; i++) { status = ixgbe_write_ee_hostif_data_X550(hw, offset + i, data[i]); if (status) { hw_dbg(hw, "Eeprom buffered write failed\n"); break; } } hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM); return status; } /** ixgbe_write_iosf_sb_reg_x550 - Writes a value to specified register of the * IOSF device * * @hw: pointer to hardware structure * @reg_addr: 32 bit PHY register to write * @device_type: 3 bit device type * @data: Data to write to the register **/ static s32 ixgbe_write_iosf_sb_reg_x550(struct ixgbe_hw *hw, u32 reg_addr, u32 device_type, u32 data) { u32 gssr = IXGBE_GSSR_PHY1_SM | IXGBE_GSSR_PHY0_SM; u32 command, error; s32 ret; ret = hw->mac.ops.acquire_swfw_sync(hw, gssr); if (ret) return ret; ret = ixgbe_iosf_wait(hw, NULL); if (ret) goto out; command = ((reg_addr << IXGBE_SB_IOSF_CTRL_ADDR_SHIFT) | (device_type << IXGBE_SB_IOSF_CTRL_TARGET_SELECT_SHIFT)); /* Write IOSF control register */ IXGBE_WRITE_REG(hw, IXGBE_SB_IOSF_INDIRECT_CTRL, command); /* Write IOSF data register */ IXGBE_WRITE_REG(hw, IXGBE_SB_IOSF_INDIRECT_DATA, data); ret = ixgbe_iosf_wait(hw, &command); if ((command & IXGBE_SB_IOSF_CTRL_RESP_STAT_MASK) != 0) { error = (command & IXGBE_SB_IOSF_CTRL_CMPL_ERR_MASK) >> IXGBE_SB_IOSF_CTRL_CMPL_ERR_SHIFT; hw_dbg(hw, "Failed to write, error %x\n", error); return IXGBE_ERR_PHY; } out: hw->mac.ops.release_swfw_sync(hw, gssr); return ret; } /** * ixgbe_setup_ixfi_x550em_x - MAC specific iXFI configuration * @hw: pointer to hardware structure * * iXfI configuration needed for ixgbe_mac_X550EM_x devices. **/ static s32 ixgbe_setup_ixfi_x550em_x(struct ixgbe_hw *hw) { s32 status; u32 reg_val; /* Disable training protocol FSM. */ status = ixgbe_read_iosf_sb_reg_x550(hw, IXGBE_KRM_RX_TRN_LINKUP_CTRL(hw->bus.lan_id), IXGBE_SB_IOSF_TARGET_KR_PHY, &reg_val); if (status) return status; reg_val |= IXGBE_KRM_RX_TRN_LINKUP_CTRL_CONV_WO_PROTOCOL; status = ixgbe_write_iosf_sb_reg_x550(hw, IXGBE_KRM_RX_TRN_LINKUP_CTRL(hw->bus.lan_id), IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val); if (status) return status; /* Disable Flex from training TXFFE. */ status = ixgbe_read_iosf_sb_reg_x550(hw, IXGBE_KRM_DSP_TXFFE_STATE_4(hw->bus.lan_id), IXGBE_SB_IOSF_TARGET_KR_PHY, &reg_val); if (status) return status; reg_val &= ~IXGBE_KRM_DSP_TXFFE_STATE_C0_EN; reg_val &= ~IXGBE_KRM_DSP_TXFFE_STATE_CP1_CN1_EN; reg_val &= ~IXGBE_KRM_DSP_TXFFE_STATE_CO_ADAPT_EN; status = ixgbe_write_iosf_sb_reg_x550(hw, IXGBE_KRM_DSP_TXFFE_STATE_4(hw->bus.lan_id), IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val); if (status) return status; status = ixgbe_read_iosf_sb_reg_x550(hw, IXGBE_KRM_DSP_TXFFE_STATE_5(hw->bus.lan_id), IXGBE_SB_IOSF_TARGET_KR_PHY, &reg_val); if (status) return status; reg_val &= ~IXGBE_KRM_DSP_TXFFE_STATE_C0_EN; reg_val &= ~IXGBE_KRM_DSP_TXFFE_STATE_CP1_CN1_EN; reg_val &= ~IXGBE_KRM_DSP_TXFFE_STATE_CO_ADAPT_EN; status = ixgbe_write_iosf_sb_reg_x550(hw, IXGBE_KRM_DSP_TXFFE_STATE_5(hw->bus.lan_id), IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val); if (status) return status; /* Enable override for coefficients. */ status = ixgbe_read_iosf_sb_reg_x550(hw, IXGBE_KRM_TX_COEFF_CTRL_1(hw->bus.lan_id), IXGBE_SB_IOSF_TARGET_KR_PHY, &reg_val); if (status) return status; reg_val |= IXGBE_KRM_TX_COEFF_CTRL_1_OVRRD_EN; reg_val |= IXGBE_KRM_TX_COEFF_CTRL_1_CZERO_EN; reg_val |= IXGBE_KRM_TX_COEFF_CTRL_1_CPLUS1_OVRRD_EN; reg_val |= IXGBE_KRM_TX_COEFF_CTRL_1_CMINUS1_OVRRD_EN; status = ixgbe_write_iosf_sb_reg_x550(hw, IXGBE_KRM_TX_COEFF_CTRL_1(hw->bus.lan_id), IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val); return status; } /** * ixgbe_restart_an_internal_phy_x550em - restart autonegotiation for the * internal PHY * @hw: pointer to hardware structure **/ static s32 ixgbe_restart_an_internal_phy_x550em(struct ixgbe_hw *hw) { s32 status; u32 link_ctrl; /* Restart auto-negotiation. */ status = hw->mac.ops.read_iosf_sb_reg(hw, IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id), IXGBE_SB_IOSF_TARGET_KR_PHY, &link_ctrl); if (status) { hw_dbg(hw, "Auto-negotiation did not complete\n"); return status; } link_ctrl |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_RESTART; status = hw->mac.ops.write_iosf_sb_reg(hw, IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id), IXGBE_SB_IOSF_TARGET_KR_PHY, link_ctrl); if (hw->mac.type == ixgbe_mac_x550em_a) { u32 flx_mask_st20; /* Indicate to FW that AN restart has been asserted */ status = hw->mac.ops.read_iosf_sb_reg(hw, IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id), IXGBE_SB_IOSF_TARGET_KR_PHY, &flx_mask_st20); if (status) { hw_dbg(hw, "Auto-negotiation did not complete\n"); return status; } flx_mask_st20 |= IXGBE_KRM_PMD_FLX_MASK_ST20_FW_AN_RESTART; status = hw->mac.ops.write_iosf_sb_reg(hw, IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id), IXGBE_SB_IOSF_TARGET_KR_PHY, flx_mask_st20); } return status; } /** ixgbe_setup_ixfi_x550em - Configure the KR PHY for iXFI mode. * @hw: pointer to hardware structure * @speed: the link speed to force * * Configures the integrated KR PHY to use iXFI mode. Used to connect an * internal and external PHY at a specific speed, without autonegotiation. **/ static s32 ixgbe_setup_ixfi_x550em(struct ixgbe_hw *hw, ixgbe_link_speed *speed) { struct ixgbe_mac_info *mac = &hw->mac; s32 status; u32 reg_val; /* iXFI is only supported with X552 */ if (mac->type != ixgbe_mac_X550EM_x) return IXGBE_ERR_LINK_SETUP; /* Disable AN and force speed to 10G Serial. */ status = ixgbe_read_iosf_sb_reg_x550(hw, IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id), IXGBE_SB_IOSF_TARGET_KR_PHY, &reg_val); if (status) return status; reg_val &= ~IXGBE_KRM_LINK_CTRL_1_TETH_AN_ENABLE; reg_val &= ~IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_MASK; /* Select forced link speed for internal PHY. */ switch (*speed) { case IXGBE_LINK_SPEED_10GB_FULL: reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_10G; break; case IXGBE_LINK_SPEED_1GB_FULL: reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_1G; break; default: /* Other link speeds are not supported by internal KR PHY. */ return IXGBE_ERR_LINK_SETUP; } status = ixgbe_write_iosf_sb_reg_x550(hw, IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id), IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val); if (status) return status; /* Additional configuration needed for x550em_x */ if (hw->mac.type == ixgbe_mac_X550EM_x) { status = ixgbe_setup_ixfi_x550em_x(hw); if (status) return status; } /* Toggle port SW reset by AN reset. */ status = ixgbe_restart_an_internal_phy_x550em(hw); return status; } /** * ixgbe_supported_sfp_modules_X550em - Check if SFP module type is supported * @hw: pointer to hardware structure * @linear: true if SFP module is linear */ static s32 ixgbe_supported_sfp_modules_X550em(struct ixgbe_hw *hw, bool *linear) { switch (hw->phy.sfp_type) { case ixgbe_sfp_type_not_present: return IXGBE_ERR_SFP_NOT_PRESENT; case ixgbe_sfp_type_da_cu_core0: case ixgbe_sfp_type_da_cu_core1: *linear = true; break; case ixgbe_sfp_type_srlr_core0: case ixgbe_sfp_type_srlr_core1: case ixgbe_sfp_type_da_act_lmt_core0: case ixgbe_sfp_type_da_act_lmt_core1: case ixgbe_sfp_type_1g_sx_core0: case ixgbe_sfp_type_1g_sx_core1: case ixgbe_sfp_type_1g_lx_core0: case ixgbe_sfp_type_1g_lx_core1: *linear = false; break; case ixgbe_sfp_type_unknown: case ixgbe_sfp_type_1g_cu_core0: case ixgbe_sfp_type_1g_cu_core1: default: return IXGBE_ERR_SFP_NOT_SUPPORTED; } return 0; } /** * ixgbe_setup_mac_link_sfp_x550em - Configure the KR PHY for SFP. * @hw: pointer to hardware structure * @speed: the link speed to force * @autoneg_wait_to_complete: unused * * Configures the extern PHY and the integrated KR PHY for SFP support. */ static s32 ixgbe_setup_mac_link_sfp_x550em(struct ixgbe_hw *hw, ixgbe_link_speed speed, __always_unused bool autoneg_wait_to_complete) { s32 status; u16 reg_slice, reg_val; bool setup_linear = false; /* Check if SFP module is supported and linear */ status = ixgbe_supported_sfp_modules_X550em(hw, &setup_linear); /* If no SFP module present, then return success. Return success since * there is no reason to configure CS4227 and SFP not present error is * not accepted in the setup MAC link flow. */ if (status == IXGBE_ERR_SFP_NOT_PRESENT) return 0; if (status) return status; /* Configure internal PHY for KR/KX. */ ixgbe_setup_kr_speed_x550em(hw, speed); /* Configure CS4227 LINE side to proper mode. */ reg_slice = IXGBE_CS4227_LINE_SPARE24_LSB + (hw->bus.lan_id << 12); if (setup_linear) reg_val = (IXGBE_CS4227_EDC_MODE_CX1 << 1) | 0x1; else reg_val = (IXGBE_CS4227_EDC_MODE_SR << 1) | 0x1; status = hw->link.ops.write_link(hw, hw->link.addr, reg_slice, reg_val); return status; } /** * ixgbe_setup_sfi_x550a - Configure the internal PHY for native SFI mode * @hw: pointer to hardware structure * @speed: the link speed to force * * Configures the integrated PHY for native SFI mode. Used to connect the * internal PHY directly to an SFP cage, without autonegotiation. **/ static s32 ixgbe_setup_sfi_x550a(struct ixgbe_hw *hw, ixgbe_link_speed *speed) { struct ixgbe_mac_info *mac = &hw->mac; s32 status; u32 reg_val; /* Disable all AN and force speed to 10G Serial. */ status = mac->ops.read_iosf_sb_reg(hw, IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id), IXGBE_SB_IOSF_TARGET_KR_PHY, &reg_val); if (status) return status; reg_val &= ~IXGBE_KRM_PMD_FLX_MASK_ST20_AN_EN; reg_val &= ~IXGBE_KRM_PMD_FLX_MASK_ST20_AN37_EN; reg_val &= ~IXGBE_KRM_PMD_FLX_MASK_ST20_SGMII_EN; reg_val &= ~IXGBE_KRM_PMD_FLX_MASK_ST20_SPEED_MASK; /* Select forced link speed for internal PHY. */ switch (*speed) { case IXGBE_LINK_SPEED_10GB_FULL: reg_val |= IXGBE_KRM_PMD_FLX_MASK_ST20_SPEED_10G; break; case IXGBE_LINK_SPEED_1GB_FULL: reg_val |= IXGBE_KRM_PMD_FLX_MASK_ST20_SPEED_1G; break; default: /* Other link speeds are not supported by internal PHY. */ return IXGBE_ERR_LINK_SETUP; } (void)mac->ops.write_iosf_sb_reg(hw, IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id), IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val); /* change mode enforcement rules to hybrid */ (void)mac->ops.read_iosf_sb_reg(hw, IXGBE_KRM_FLX_TMRS_CTRL_ST31(hw->bus.lan_id), IXGBE_SB_IOSF_TARGET_KR_PHY, &reg_val); reg_val |= 0x0400; (void)mac->ops.write_iosf_sb_reg(hw, IXGBE_KRM_FLX_TMRS_CTRL_ST31(hw->bus.lan_id), IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val); /* manually control the config */ (void)mac->ops.read_iosf_sb_reg(hw, IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id), IXGBE_SB_IOSF_TARGET_KR_PHY, &reg_val); reg_val |= 0x20002240; (void)mac->ops.write_iosf_sb_reg(hw, IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id), IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val); /* move the AN base page values */ (void)mac->ops.read_iosf_sb_reg(hw, IXGBE_KRM_PCS_KX_AN(hw->bus.lan_id), IXGBE_SB_IOSF_TARGET_KR_PHY, &reg_val); reg_val |= 0x1; (void)mac->ops.write_iosf_sb_reg(hw, IXGBE_KRM_PCS_KX_AN(hw->bus.lan_id), IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val); /* set the AN37 over CB mode */ (void)mac->ops.read_iosf_sb_reg(hw, IXGBE_KRM_AN_CNTL_4(hw->bus.lan_id), IXGBE_SB_IOSF_TARGET_KR_PHY, &reg_val); reg_val |= 0x20000000; (void)mac->ops.write_iosf_sb_reg(hw, IXGBE_KRM_AN_CNTL_4(hw->bus.lan_id), IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val); /* restart AN manually */ (void)mac->ops.read_iosf_sb_reg(hw, IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id), IXGBE_SB_IOSF_TARGET_KR_PHY, &reg_val); reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_RESTART; (void)mac->ops.write_iosf_sb_reg(hw, IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id), IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val); /* Toggle port SW reset by AN reset. */ status = ixgbe_restart_an_internal_phy_x550em(hw); return status; } /** * ixgbe_setup_mac_link_sfp_n - Setup internal PHY for native SFP * @hw: pointer to hardware structure * @speed: link speed * @autoneg_wait_to_complete: unused * * Configure the integrated PHY for native SFP support. */ static s32 ixgbe_setup_mac_link_sfp_n(struct ixgbe_hw *hw, ixgbe_link_speed speed, __always_unused bool autoneg_wait_to_complete) { bool setup_linear = false; u32 reg_phy_int; s32 ret_val; /* Check if SFP module is supported and linear */ ret_val = ixgbe_supported_sfp_modules_X550em(hw, &setup_linear); /* If no SFP module present, then return success. Return success since * SFP not present error is not excepted in the setup MAC link flow. */ if (ret_val == IXGBE_ERR_SFP_NOT_PRESENT) return 0; if (ret_val) return ret_val; /* Configure internal PHY for native SFI based on module type */ ret_val = hw->mac.ops.read_iosf_sb_reg(hw, IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id), IXGBE_SB_IOSF_TARGET_KR_PHY, &reg_phy_int); if (ret_val) return ret_val; reg_phy_int &= IXGBE_KRM_PMD_FLX_MASK_ST20_SFI_10G_DA; if (!setup_linear) reg_phy_int |= IXGBE_KRM_PMD_FLX_MASK_ST20_SFI_10G_SR; ret_val = hw->mac.ops.write_iosf_sb_reg(hw, IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id), IXGBE_SB_IOSF_TARGET_KR_PHY, reg_phy_int); if (ret_val) return ret_val; /* Setup SFI internal link. */ return ixgbe_setup_sfi_x550a(hw, &speed); } /** * ixgbe_setup_mac_link_sfp_x550a - Setup internal PHY for SFP * @hw: pointer to hardware structure * @speed: link speed * @autoneg_wait_to_complete: unused * * Configure the integrated PHY for SFP support. */ static s32 ixgbe_setup_mac_link_sfp_x550a(struct ixgbe_hw *hw, ixgbe_link_speed speed, __always_unused bool autoneg_wait_to_complete) { u32 reg_slice, slice_offset; bool setup_linear = false; u16 reg_phy_ext; s32 ret_val; /* Check if SFP module is supported and linear */ ret_val = ixgbe_supported_sfp_modules_X550em(hw, &setup_linear); /* If no SFP module present, then return success. Return success since * SFP not present error is not excepted in the setup MAC link flow. */ if (ret_val == IXGBE_ERR_SFP_NOT_PRESENT) return 0; if (ret_val) return ret_val; /* Configure internal PHY for KR/KX. */ ixgbe_setup_kr_speed_x550em(hw, speed); if (hw->phy.mdio.prtad == MDIO_PRTAD_NONE) return IXGBE_ERR_PHY_ADDR_INVALID; /* Get external PHY SKU id */ ret_val = hw->phy.ops.read_reg(hw, IXGBE_CS4227_EFUSE_PDF_SKU, IXGBE_MDIO_ZERO_DEV_TYPE, &reg_phy_ext); if (ret_val) return ret_val; /* When configuring quad port CS4223, the MAC instance is part * of the slice offset. */ if (reg_phy_ext == IXGBE_CS4223_SKU_ID) slice_offset = (hw->bus.lan_id + (hw->bus.instance_id << 1)) << 12; else slice_offset = hw->bus.lan_id << 12; /* Configure CS4227/CS4223 LINE side to proper mode. */ reg_slice = IXGBE_CS4227_LINE_SPARE24_LSB + slice_offset; ret_val = hw->phy.ops.read_reg(hw, reg_slice, IXGBE_MDIO_ZERO_DEV_TYPE, &reg_phy_ext); if (ret_val) return ret_val; reg_phy_ext &= ~((IXGBE_CS4227_EDC_MODE_CX1 << 1) | (IXGBE_CS4227_EDC_MODE_SR << 1)); if (setup_linear) reg_phy_ext |= (IXGBE_CS4227_EDC_MODE_CX1 << 1) | 1; else reg_phy_ext |= (IXGBE_CS4227_EDC_MODE_SR << 1) | 1; ret_val = hw->phy.ops.write_reg(hw, reg_slice, IXGBE_MDIO_ZERO_DEV_TYPE, reg_phy_ext); if (ret_val) return ret_val; /* Flush previous write with a read */ return hw->phy.ops.read_reg(hw, reg_slice, IXGBE_MDIO_ZERO_DEV_TYPE, &reg_phy_ext); } /** * ixgbe_setup_mac_link_t_X550em - Sets the auto advertised link speed * @hw: pointer to hardware structure * @speed: new link speed * @autoneg_wait: true when waiting for completion is needed * * Setup internal/external PHY link speed based on link speed, then set * external PHY auto advertised link speed. * * Returns error status for any failure **/ static s32 ixgbe_setup_mac_link_t_X550em(struct ixgbe_hw *hw, ixgbe_link_speed speed, bool autoneg_wait) { s32 status; ixgbe_link_speed force_speed; /* Setup internal/external PHY link speed to iXFI (10G), unless * only 1G is auto advertised then setup KX link. */ if (speed & IXGBE_LINK_SPEED_10GB_FULL) force_speed = IXGBE_LINK_SPEED_10GB_FULL; else force_speed = IXGBE_LINK_SPEED_1GB_FULL; /* If X552 and internal link mode is XFI, then setup XFI internal link. */ if (hw->mac.type == ixgbe_mac_X550EM_x && !(hw->phy.nw_mng_if_sel & IXGBE_NW_MNG_IF_SEL_INT_PHY_MODE)) { status = ixgbe_setup_ixfi_x550em(hw, &force_speed); if (status) return status; } return hw->phy.ops.setup_link_speed(hw, speed, autoneg_wait); } /** ixgbe_check_link_t_X550em - Determine link and speed status * @hw: pointer to hardware structure * @speed: pointer to link speed * @link_up: true when link is up * @link_up_wait_to_complete: bool used to wait for link up or not * * Check that both the MAC and X557 external PHY have link. **/ static s32 ixgbe_check_link_t_X550em(struct ixgbe_hw *hw, ixgbe_link_speed *speed, bool *link_up, bool link_up_wait_to_complete) { u32 status; u16 i, autoneg_status; if (hw->mac.ops.get_media_type(hw) != ixgbe_media_type_copper) return IXGBE_ERR_CONFIG; status = ixgbe_check_mac_link_generic(hw, speed, link_up, link_up_wait_to_complete); /* If check link fails or MAC link is not up, then return */ if (status || !(*link_up)) return status; /* MAC link is up, so check external PHY link. * Link status is latching low, and can only be used to detect link * drop, and not the current status of the link without performing * back-to-back reads. */ for (i = 0; i < 2; i++) { status = hw->phy.ops.read_reg(hw, MDIO_STAT1, MDIO_MMD_AN, &autoneg_status); if (status) return status; } /* If external PHY link is not up, then indicate link not up */ if (!(autoneg_status & IXGBE_MDIO_AUTO_NEG_LINK_STATUS)) *link_up = false; return 0; } /** * ixgbe_setup_sgmii - Set up link for sgmii * @hw: pointer to hardware structure * @speed: unused * @autoneg_wait_to_complete: unused */ static s32 ixgbe_setup_sgmii(struct ixgbe_hw *hw, __always_unused ixgbe_link_speed speed, __always_unused bool autoneg_wait_to_complete) { struct ixgbe_mac_info *mac = &hw->mac; u32 lval, sval, flx_val; s32 rc; rc = mac->ops.read_iosf_sb_reg(hw, IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id), IXGBE_SB_IOSF_TARGET_KR_PHY, &lval); if (rc) return rc; lval &= ~IXGBE_KRM_LINK_CTRL_1_TETH_AN_ENABLE; lval &= ~IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_MASK; lval |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_SGMII_EN; lval |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_CLAUSE_37_EN; lval |= IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_1G; rc = mac->ops.write_iosf_sb_reg(hw, IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id), IXGBE_SB_IOSF_TARGET_KR_PHY, lval); if (rc) return rc; rc = mac->ops.read_iosf_sb_reg(hw, IXGBE_KRM_SGMII_CTRL(hw->bus.lan_id), IXGBE_SB_IOSF_TARGET_KR_PHY, &sval); if (rc) return rc; sval |= IXGBE_KRM_SGMII_CTRL_MAC_TAR_FORCE_10_D; sval |= IXGBE_KRM_SGMII_CTRL_MAC_TAR_FORCE_100_D; rc = mac->ops.write_iosf_sb_reg(hw, IXGBE_KRM_SGMII_CTRL(hw->bus.lan_id), IXGBE_SB_IOSF_TARGET_KR_PHY, sval); if (rc) return rc; rc = mac->ops.read_iosf_sb_reg(hw, IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id), IXGBE_SB_IOSF_TARGET_KR_PHY, &flx_val); if (rc) return rc; rc = mac->ops.read_iosf_sb_reg(hw, IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id), IXGBE_SB_IOSF_TARGET_KR_PHY, &flx_val); if (rc) return rc; flx_val &= ~IXGBE_KRM_PMD_FLX_MASK_ST20_SPEED_MASK; flx_val |= IXGBE_KRM_PMD_FLX_MASK_ST20_SPEED_1G; flx_val &= ~IXGBE_KRM_PMD_FLX_MASK_ST20_AN_EN; flx_val |= IXGBE_KRM_PMD_FLX_MASK_ST20_SGMII_EN; flx_val |= IXGBE_KRM_PMD_FLX_MASK_ST20_AN37_EN; rc = mac->ops.write_iosf_sb_reg(hw, IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id), IXGBE_SB_IOSF_TARGET_KR_PHY, flx_val); if (rc) return rc; rc = ixgbe_restart_an_internal_phy_x550em(hw); return rc; } /** * ixgbe_setup_sgmii_fw - Set up link for sgmii with firmware-controlled PHYs * @hw: pointer to hardware structure * @speed: the link speed to force * @autoneg_wait: true when waiting for completion is needed */ static s32 ixgbe_setup_sgmii_fw(struct ixgbe_hw *hw, ixgbe_link_speed speed, bool autoneg_wait) { struct ixgbe_mac_info *mac = &hw->mac; u32 lval, sval, flx_val; s32 rc; rc = mac->ops.read_iosf_sb_reg(hw, IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id), IXGBE_SB_IOSF_TARGET_KR_PHY, &lval); if (rc) return rc; lval &= ~IXGBE_KRM_LINK_CTRL_1_TETH_AN_ENABLE; lval &= ~IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_MASK; lval |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_SGMII_EN; lval |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_CLAUSE_37_EN; lval &= ~IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_1G; rc = mac->ops.write_iosf_sb_reg(hw, IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id), IXGBE_SB_IOSF_TARGET_KR_PHY, lval); if (rc) return rc; rc = mac->ops.read_iosf_sb_reg(hw, IXGBE_KRM_SGMII_CTRL(hw->bus.lan_id), IXGBE_SB_IOSF_TARGET_KR_PHY, &sval); if (rc) return rc; sval &= ~IXGBE_KRM_SGMII_CTRL_MAC_TAR_FORCE_10_D; sval &= ~IXGBE_KRM_SGMII_CTRL_MAC_TAR_FORCE_100_D; rc = mac->ops.write_iosf_sb_reg(hw, IXGBE_KRM_SGMII_CTRL(hw->bus.lan_id), IXGBE_SB_IOSF_TARGET_KR_PHY, sval); if (rc) return rc; rc = mac->ops.write_iosf_sb_reg(hw, IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id), IXGBE_SB_IOSF_TARGET_KR_PHY, lval); if (rc) return rc; rc = mac->ops.read_iosf_sb_reg(hw, IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id), IXGBE_SB_IOSF_TARGET_KR_PHY, &flx_val); if (rc) return rc; flx_val &= ~IXGBE_KRM_PMD_FLX_MASK_ST20_SPEED_MASK; flx_val |= IXGBE_KRM_PMD_FLX_MASK_ST20_SPEED_AN; flx_val &= ~IXGBE_KRM_PMD_FLX_MASK_ST20_AN_EN; flx_val |= IXGBE_KRM_PMD_FLX_MASK_ST20_SGMII_EN; flx_val |= IXGBE_KRM_PMD_FLX_MASK_ST20_AN37_EN; rc = mac->ops.write_iosf_sb_reg(hw, IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id), IXGBE_SB_IOSF_TARGET_KR_PHY, flx_val); if (rc) return rc; ixgbe_restart_an_internal_phy_x550em(hw); return hw->phy.ops.setup_link_speed(hw, speed, autoneg_wait); } /** * ixgbe_fc_autoneg_sgmii_x550em_a - Enable flow control IEEE clause 37 * @hw: pointer to hardware structure * * Enable flow control according to IEEE clause 37. */ static void ixgbe_fc_autoneg_sgmii_x550em_a(struct ixgbe_hw *hw) { s32 status = IXGBE_ERR_FC_NOT_NEGOTIATED; u32 info[FW_PHY_ACT_DATA_COUNT] = { 0 }; ixgbe_link_speed speed; bool link_up; /* AN should have completed when the cable was plugged in. * Look for reasons to bail out. Bail out if: * - FC autoneg is disabled, or if * - link is not up. */ if (hw->fc.disable_fc_autoneg) goto out; hw->mac.ops.check_link(hw, &speed, &link_up, false); if (!link_up) goto out; /* Check if auto-negotiation has completed */ status = ixgbe_fw_phy_activity(hw, FW_PHY_ACT_GET_LINK_INFO, &info); if (status || !(info[0] & FW_PHY_ACT_GET_LINK_INFO_AN_COMPLETE)) { status = IXGBE_ERR_FC_NOT_NEGOTIATED; goto out; } /* Negotiate the flow control */ status = ixgbe_negotiate_fc(hw, info[0], info[0], FW_PHY_ACT_GET_LINK_INFO_FC_RX, FW_PHY_ACT_GET_LINK_INFO_FC_TX, FW_PHY_ACT_GET_LINK_INFO_LP_FC_RX, FW_PHY_ACT_GET_LINK_INFO_LP_FC_TX); out: if (!status) { hw->fc.fc_was_autonegged = true; } else { hw->fc.fc_was_autonegged = false; hw->fc.current_mode = hw->fc.requested_mode; } } /** ixgbe_init_mac_link_ops_X550em_a - Init mac link function pointers * @hw: pointer to hardware structure **/ static void ixgbe_init_mac_link_ops_X550em_a(struct ixgbe_hw *hw) { struct ixgbe_mac_info *mac = &hw->mac; switch (mac->ops.get_media_type(hw)) { case ixgbe_media_type_fiber: mac->ops.setup_fc = NULL; mac->ops.fc_autoneg = ixgbe_fc_autoneg_fiber_x550em_a; break; case ixgbe_media_type_copper: if (hw->device_id != IXGBE_DEV_ID_X550EM_A_1G_T && hw->device_id != IXGBE_DEV_ID_X550EM_A_1G_T_L) { mac->ops.setup_link = ixgbe_setup_mac_link_t_X550em; break; } mac->ops.fc_autoneg = ixgbe_fc_autoneg_sgmii_x550em_a; mac->ops.setup_fc = ixgbe_fc_autoneg_fw; mac->ops.setup_link = ixgbe_setup_sgmii_fw; mac->ops.check_link = ixgbe_check_mac_link_generic; break; case ixgbe_media_type_backplane: mac->ops.fc_autoneg = ixgbe_fc_autoneg_backplane_x550em_a; mac->ops.setup_fc = ixgbe_setup_fc_backplane_x550em_a; break; default: break; } } /** ixgbe_init_mac_link_ops_X550em - init mac link function pointers * @hw: pointer to hardware structure **/ static void ixgbe_init_mac_link_ops_X550em(struct ixgbe_hw *hw) { struct ixgbe_mac_info *mac = &hw->mac; mac->ops.setup_fc = ixgbe_setup_fc_x550em; switch (mac->ops.get_media_type(hw)) { case ixgbe_media_type_fiber: /* CS4227 does not support autoneg, so disable the laser control * functions for SFP+ fiber */ mac->ops.disable_tx_laser = NULL; mac->ops.enable_tx_laser = NULL; mac->ops.flap_tx_laser = NULL; mac->ops.setup_link = ixgbe_setup_mac_link_multispeed_fiber; switch (hw->device_id) { case IXGBE_DEV_ID_X550EM_A_SFP_N: mac->ops.setup_mac_link = ixgbe_setup_mac_link_sfp_n; break; case IXGBE_DEV_ID_X550EM_A_SFP: mac->ops.setup_mac_link = ixgbe_setup_mac_link_sfp_x550a; break; default: mac->ops.setup_mac_link = ixgbe_setup_mac_link_sfp_x550em; break; } mac->ops.set_rate_select_speed = ixgbe_set_soft_rate_select_speed; break; case ixgbe_media_type_copper: if (hw->device_id == IXGBE_DEV_ID_X550EM_X_1G_T) break; mac->ops.setup_link = ixgbe_setup_mac_link_t_X550em; mac->ops.setup_fc = ixgbe_setup_fc_generic; mac->ops.check_link = ixgbe_check_link_t_X550em; break; case ixgbe_media_type_backplane: if (hw->device_id == IXGBE_DEV_ID_X550EM_A_SGMII || hw->device_id == IXGBE_DEV_ID_X550EM_A_SGMII_L) mac->ops.setup_link = ixgbe_setup_sgmii; break; default: break; } /* Additional modification for X550em_a devices */ if (hw->mac.type == ixgbe_mac_x550em_a) ixgbe_init_mac_link_ops_X550em_a(hw); } /** ixgbe_setup_sfp_modules_X550em - Setup SFP module * @hw: pointer to hardware structure */ static s32 ixgbe_setup_sfp_modules_X550em(struct ixgbe_hw *hw) { s32 status; bool linear; /* Check if SFP module is supported */ status = ixgbe_supported_sfp_modules_X550em(hw, &linear); if (status) return status; ixgbe_init_mac_link_ops_X550em(hw); hw->phy.ops.reset = NULL; return 0; } /** ixgbe_get_link_capabilities_x550em - Determines link capabilities * @hw: pointer to hardware structure * @speed: pointer to link speed * @autoneg: true when autoneg or autotry is enabled **/ static s32 ixgbe_get_link_capabilities_X550em(struct ixgbe_hw *hw, ixgbe_link_speed *speed, bool *autoneg) { if (hw->phy.type == ixgbe_phy_fw) { *autoneg = true; *speed = hw->phy.speeds_supported; return 0; } /* SFP */ if (hw->phy.media_type == ixgbe_media_type_fiber) { /* CS4227 SFP must not enable auto-negotiation */ *autoneg = false; if (hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core0 || hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core1 || hw->phy.sfp_type == ixgbe_sfp_type_1g_lx_core0 || hw->phy.sfp_type == ixgbe_sfp_type_1g_lx_core1) { *speed = IXGBE_LINK_SPEED_1GB_FULL; return 0; } /* Link capabilities are based on SFP */ if (hw->phy.multispeed_fiber) *speed = IXGBE_LINK_SPEED_10GB_FULL | IXGBE_LINK_SPEED_1GB_FULL; else *speed = IXGBE_LINK_SPEED_10GB_FULL; } else { switch (hw->phy.type) { case ixgbe_phy_x550em_kx4: *speed = IXGBE_LINK_SPEED_1GB_FULL | IXGBE_LINK_SPEED_2_5GB_FULL | IXGBE_LINK_SPEED_10GB_FULL; break; case ixgbe_phy_x550em_xfi: *speed = IXGBE_LINK_SPEED_1GB_FULL | IXGBE_LINK_SPEED_10GB_FULL; break; case ixgbe_phy_ext_1g_t: case ixgbe_phy_sgmii: *speed = IXGBE_LINK_SPEED_1GB_FULL; break; case ixgbe_phy_x550em_kr: if (hw->mac.type == ixgbe_mac_x550em_a) { /* check different backplane modes */ if (hw->phy.nw_mng_if_sel & IXGBE_NW_MNG_IF_SEL_PHY_SPEED_2_5G) { *speed = IXGBE_LINK_SPEED_2_5GB_FULL; break; } else if (hw->device_id == IXGBE_DEV_ID_X550EM_A_KR_L) { *speed = IXGBE_LINK_SPEED_1GB_FULL; break; } } fallthrough; default: *speed = IXGBE_LINK_SPEED_10GB_FULL | IXGBE_LINK_SPEED_1GB_FULL; break; } *autoneg = true; } return 0; } /** * ixgbe_get_lasi_ext_t_x550em - Determime external Base T PHY interrupt cause * @hw: pointer to hardware structure * @lsc: pointer to boolean flag which indicates whether external Base T * PHY interrupt is lsc * * Determime if external Base T PHY interrupt cause is high temperature * failure alarm or link status change. * * Return IXGBE_ERR_OVERTEMP if interrupt is high temperature * failure alarm, else return PHY access status. **/ static s32 ixgbe_get_lasi_ext_t_x550em(struct ixgbe_hw *hw, bool *lsc) { u32 status; u16 reg; *lsc = false; /* Vendor alarm triggered */ status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_GLOBAL_CHIP_STD_INT_FLAG, MDIO_MMD_VEND1, &reg); if (status || !(reg & IXGBE_MDIO_GLOBAL_VEN_ALM_INT_EN)) return status; /* Vendor Auto-Neg alarm triggered or Global alarm 1 triggered */ status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_GLOBAL_INT_CHIP_VEN_FLAG, MDIO_MMD_VEND1, &reg); if (status || !(reg & (IXGBE_MDIO_GLOBAL_AN_VEN_ALM_INT_EN | IXGBE_MDIO_GLOBAL_ALARM_1_INT))) return status; /* Global alarm triggered */ status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_GLOBAL_ALARM_1, MDIO_MMD_VEND1, &reg); if (status) return status; /* If high temperature failure, then return over temp error and exit */ if (reg & IXGBE_MDIO_GLOBAL_ALM_1_HI_TMP_FAIL) { /* power down the PHY in case the PHY FW didn't already */ ixgbe_set_copper_phy_power(hw, false); return IXGBE_ERR_OVERTEMP; } if (reg & IXGBE_MDIO_GLOBAL_ALM_1_DEV_FAULT) { /* device fault alarm triggered */ status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_GLOBAL_FAULT_MSG, MDIO_MMD_VEND1, &reg); if (status) return status; /* if device fault was due to high temp alarm handle and exit */ if (reg == IXGBE_MDIO_GLOBAL_FAULT_MSG_HI_TMP) { /* power down the PHY in case the PHY FW didn't */ ixgbe_set_copper_phy_power(hw, false); return IXGBE_ERR_OVERTEMP; } } /* Vendor alarm 2 triggered */ status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_GLOBAL_CHIP_STD_INT_FLAG, MDIO_MMD_AN, &reg); if (status || !(reg & IXGBE_MDIO_GLOBAL_STD_ALM2_INT)) return status; /* link connect/disconnect event occurred */ status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_VENDOR_TX_ALARM2, MDIO_MMD_AN, &reg); if (status) return status; /* Indicate LSC */ if (reg & IXGBE_MDIO_AUTO_NEG_VEN_LSC) *lsc = true; return 0; } /** * ixgbe_enable_lasi_ext_t_x550em - Enable external Base T PHY interrupts * @hw: pointer to hardware structure * * Enable link status change and temperature failure alarm for the external * Base T PHY * * Returns PHY access status **/ static s32 ixgbe_enable_lasi_ext_t_x550em(struct ixgbe_hw *hw) { u32 status; u16 reg; bool lsc; /* Clear interrupt flags */ status = ixgbe_get_lasi_ext_t_x550em(hw, &lsc); /* Enable link status change alarm */ /* Enable the LASI interrupts on X552 devices to receive notifications * of the link configurations of the external PHY and correspondingly * support the configuration of the internal iXFI link, since iXFI does * not support auto-negotiation. This is not required for X553 devices * having KR support, which performs auto-negotiations and which is used * as the internal link to the external PHY. Hence adding a check here * to avoid enabling LASI interrupts for X553 devices. */ if (hw->mac.type != ixgbe_mac_x550em_a) { status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_PMA_TX_VEN_LASI_INT_MASK, MDIO_MMD_AN, &reg); if (status) return status; reg |= IXGBE_MDIO_PMA_TX_VEN_LASI_INT_EN; status = hw->phy.ops.write_reg(hw, IXGBE_MDIO_PMA_TX_VEN_LASI_INT_MASK, MDIO_MMD_AN, reg); if (status) return status; } /* Enable high temperature failure and global fault alarms */ status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_GLOBAL_INT_MASK, MDIO_MMD_VEND1, &reg); if (status) return status; reg |= (IXGBE_MDIO_GLOBAL_INT_HI_TEMP_EN | IXGBE_MDIO_GLOBAL_INT_DEV_FAULT_EN); status = hw->phy.ops.write_reg(hw, IXGBE_MDIO_GLOBAL_INT_MASK, MDIO_MMD_VEND1, reg); if (status) return status; /* Enable vendor Auto-Neg alarm and Global Interrupt Mask 1 alarm */ status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_GLOBAL_INT_CHIP_VEN_MASK, MDIO_MMD_VEND1, &reg); if (status) return status; reg |= (IXGBE_MDIO_GLOBAL_AN_VEN_ALM_INT_EN | IXGBE_MDIO_GLOBAL_ALARM_1_INT); status = hw->phy.ops.write_reg(hw, IXGBE_MDIO_GLOBAL_INT_CHIP_VEN_MASK, MDIO_MMD_VEND1, reg); if (status) return status; /* Enable chip-wide vendor alarm */ status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_GLOBAL_INT_CHIP_STD_MASK, MDIO_MMD_VEND1, &reg); if (status) return status; reg |= IXGBE_MDIO_GLOBAL_VEN_ALM_INT_EN; status = hw->phy.ops.write_reg(hw, IXGBE_MDIO_GLOBAL_INT_CHIP_STD_MASK, MDIO_MMD_VEND1, reg); return status; } /** * ixgbe_handle_lasi_ext_t_x550em - Handle external Base T PHY interrupt * @hw: pointer to hardware structure * * Handle external Base T PHY interrupt. If high temperature * failure alarm then return error, else if link status change * then setup internal/external PHY link * * Return IXGBE_ERR_OVERTEMP if interrupt is high temperature * failure alarm, else return PHY access status. **/ static s32 ixgbe_handle_lasi_ext_t_x550em(struct ixgbe_hw *hw) { struct ixgbe_phy_info *phy = &hw->phy; bool lsc; u32 status; status = ixgbe_get_lasi_ext_t_x550em(hw, &lsc); if (status) return status; if (lsc && phy->ops.setup_internal_link) return phy->ops.setup_internal_link(hw); return 0; } /** * ixgbe_setup_kr_speed_x550em - Configure the KR PHY for link speed. * @hw: pointer to hardware structure * @speed: link speed * * Configures the integrated KR PHY. **/ static s32 ixgbe_setup_kr_speed_x550em(struct ixgbe_hw *hw, ixgbe_link_speed speed) { s32 status; u32 reg_val; status = hw->mac.ops.read_iosf_sb_reg(hw, IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id), IXGBE_SB_IOSF_TARGET_KR_PHY, &reg_val); if (status) return status; reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_ENABLE; reg_val &= ~(IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_KR | IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_KX); /* Advertise 10G support. */ if (speed & IXGBE_LINK_SPEED_10GB_FULL) reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_KR; /* Advertise 1G support. */ if (speed & IXGBE_LINK_SPEED_1GB_FULL) reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_KX; status = hw->mac.ops.write_iosf_sb_reg(hw, IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id), IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val); if (hw->mac.type == ixgbe_mac_x550em_a) { /* Set lane mode to KR auto negotiation */ status = hw->mac.ops.read_iosf_sb_reg(hw, IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id), IXGBE_SB_IOSF_TARGET_KR_PHY, &reg_val); if (status) return status; reg_val &= ~IXGBE_KRM_PMD_FLX_MASK_ST20_SPEED_MASK; reg_val |= IXGBE_KRM_PMD_FLX_MASK_ST20_SPEED_AN; reg_val |= IXGBE_KRM_PMD_FLX_MASK_ST20_AN_EN; reg_val &= ~IXGBE_KRM_PMD_FLX_MASK_ST20_AN37_EN; reg_val &= ~IXGBE_KRM_PMD_FLX_MASK_ST20_SGMII_EN; status = hw->mac.ops.write_iosf_sb_reg(hw, IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id), IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val); } return ixgbe_restart_an_internal_phy_x550em(hw); } /** * ixgbe_setup_kr_x550em - Configure the KR PHY * @hw: pointer to hardware structure **/ static s32 ixgbe_setup_kr_x550em(struct ixgbe_hw *hw) { /* leave link alone for 2.5G */ if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_2_5GB_FULL) return 0; if (ixgbe_check_reset_blocked(hw)) return 0; return ixgbe_setup_kr_speed_x550em(hw, hw->phy.autoneg_advertised); } /** ixgbe_ext_phy_t_x550em_get_link - Get ext phy link status * @hw: address of hardware structure * @link_up: address of boolean to indicate link status * * Returns error code if unable to get link status. **/ static s32 ixgbe_ext_phy_t_x550em_get_link(struct ixgbe_hw *hw, bool *link_up) { u32 ret; u16 autoneg_status; *link_up = false; /* read this twice back to back to indicate current status */ ret = hw->phy.ops.read_reg(hw, MDIO_STAT1, MDIO_MMD_AN, &autoneg_status); if (ret) return ret; ret = hw->phy.ops.read_reg(hw, MDIO_STAT1, MDIO_MMD_AN, &autoneg_status); if (ret) return ret; *link_up = !!(autoneg_status & IXGBE_MDIO_AUTO_NEG_LINK_STATUS); return 0; } /** ixgbe_setup_internal_phy_t_x550em - Configure KR PHY to X557 link * @hw: point to hardware structure * * Configures the link between the integrated KR PHY and the external X557 PHY * The driver will call this function when it gets a link status change * interrupt from the X557 PHY. This function configures the link speed * between the PHYs to match the link speed of the BASE-T link. * * A return of a non-zero value indicates an error, and the base driver should * not report link up. **/ static s32 ixgbe_setup_internal_phy_t_x550em(struct ixgbe_hw *hw) { ixgbe_link_speed force_speed; bool link_up; u32 status; u16 speed; if (hw->mac.ops.get_media_type(hw) != ixgbe_media_type_copper) return IXGBE_ERR_CONFIG; if (!(hw->mac.type == ixgbe_mac_X550EM_x && !(hw->phy.nw_mng_if_sel & IXGBE_NW_MNG_IF_SEL_INT_PHY_MODE))) { speed = IXGBE_LINK_SPEED_10GB_FULL | IXGBE_LINK_SPEED_1GB_FULL; return ixgbe_setup_kr_speed_x550em(hw, speed); } /* If link is not up, then there is no setup necessary so return */ status = ixgbe_ext_phy_t_x550em_get_link(hw, &link_up); if (status) return status; if (!link_up) return 0; status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_VENDOR_STAT, MDIO_MMD_AN, &speed); if (status) return status; /* If link is not still up, then no setup is necessary so return */ status = ixgbe_ext_phy_t_x550em_get_link(hw, &link_up); if (status) return status; if (!link_up) return 0; /* clear everything but the speed and duplex bits */ speed &= IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_MASK; switch (speed) { case IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_10GB_FULL: force_speed = IXGBE_LINK_SPEED_10GB_FULL; break; case IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_1GB_FULL: force_speed = IXGBE_LINK_SPEED_1GB_FULL; break; default: /* Internal PHY does not support anything else */ return IXGBE_ERR_INVALID_LINK_SETTINGS; } return ixgbe_setup_ixfi_x550em(hw, &force_speed); } /** ixgbe_reset_phy_t_X550em - Performs X557 PHY reset and enables LASI * @hw: pointer to hardware structure **/ static s32 ixgbe_reset_phy_t_X550em(struct ixgbe_hw *hw) { s32 status; status = ixgbe_reset_phy_generic(hw); if (status) return status; /* Configure Link Status Alarm and Temperature Threshold interrupts */ return ixgbe_enable_lasi_ext_t_x550em(hw); } /** * ixgbe_led_on_t_x550em - Turns on the software controllable LEDs. * @hw: pointer to hardware structure * @led_idx: led number to turn on **/ static s32 ixgbe_led_on_t_x550em(struct ixgbe_hw *hw, u32 led_idx) { u16 phy_data; if (led_idx >= IXGBE_X557_MAX_LED_INDEX) return IXGBE_ERR_PARAM; /* To turn on the LED, set mode to ON. */ hw->phy.ops.read_reg(hw, IXGBE_X557_LED_PROVISIONING + led_idx, MDIO_MMD_VEND1, &phy_data); phy_data |= IXGBE_X557_LED_MANUAL_SET_MASK; hw->phy.ops.write_reg(hw, IXGBE_X557_LED_PROVISIONING + led_idx, MDIO_MMD_VEND1, phy_data); return 0; } /** * ixgbe_led_off_t_x550em - Turns off the software controllable LEDs. * @hw: pointer to hardware structure * @led_idx: led number to turn off **/ static s32 ixgbe_led_off_t_x550em(struct ixgbe_hw *hw, u32 led_idx) { u16 phy_data; if (led_idx >= IXGBE_X557_MAX_LED_INDEX) return IXGBE_ERR_PARAM; /* To turn on the LED, set mode to ON. */ hw->phy.ops.read_reg(hw, IXGBE_X557_LED_PROVISIONING + led_idx, MDIO_MMD_VEND1, &phy_data); phy_data &= ~IXGBE_X557_LED_MANUAL_SET_MASK; hw->phy.ops.write_reg(hw, IXGBE_X557_LED_PROVISIONING + led_idx, MDIO_MMD_VEND1, phy_data); return 0; } /** * ixgbe_set_fw_drv_ver_x550 - Sends driver version to firmware * @hw: pointer to the HW structure * @maj: driver version major number * @min: driver version minor number * @build: driver version build number * @sub: driver version sub build number * @len: length of driver_ver string * @driver_ver: driver string * * Sends driver version number to firmware through the manageability * block. On success return 0 * else returns IXGBE_ERR_SWFW_SYNC when encountering an error acquiring * semaphore or IXGBE_ERR_HOST_INTERFACE_COMMAND when command fails. **/ static s32 ixgbe_set_fw_drv_ver_x550(struct ixgbe_hw *hw, u8 maj, u8 min, u8 build, u8 sub, u16 len, const char *driver_ver) { struct ixgbe_hic_drv_info2 fw_cmd; s32 ret_val; int i; if (!len || !driver_ver || (len > sizeof(fw_cmd.driver_string))) return IXGBE_ERR_INVALID_ARGUMENT; fw_cmd.hdr.cmd = FW_CEM_CMD_DRIVER_INFO; fw_cmd.hdr.buf_len = FW_CEM_CMD_DRIVER_INFO_LEN + len; fw_cmd.hdr.cmd_or_resp.cmd_resv = FW_CEM_CMD_RESERVED; fw_cmd.port_num = (u8)hw->bus.func; fw_cmd.ver_maj = maj; fw_cmd.ver_min = min; fw_cmd.ver_build = build; fw_cmd.ver_sub = sub; fw_cmd.hdr.checksum = 0; memcpy(fw_cmd.driver_string, driver_ver, len); fw_cmd.hdr.checksum = ixgbe_calculate_checksum((u8 *)&fw_cmd, (FW_CEM_HDR_LEN + fw_cmd.hdr.buf_len)); for (i = 0; i <= FW_CEM_MAX_RETRIES; i++) { ret_val = ixgbe_host_interface_command(hw, (u32 *)&fw_cmd, sizeof(fw_cmd), IXGBE_HI_COMMAND_TIMEOUT, true); if (ret_val) continue; if (fw_cmd.hdr.cmd_or_resp.ret_status != FW_CEM_RESP_STATUS_SUCCESS) return IXGBE_ERR_HOST_INTERFACE_COMMAND; return 0; } return ret_val; } /** ixgbe_get_lcd_x550em - Determine lowest common denominator * @hw: pointer to hardware structure * @lcd_speed: pointer to lowest common link speed * * Determine lowest common link speed with link partner. **/ static s32 ixgbe_get_lcd_t_x550em(struct ixgbe_hw *hw, ixgbe_link_speed *lcd_speed) { u16 an_lp_status; s32 status; u16 word = hw->eeprom.ctrl_word_3; *lcd_speed = IXGBE_LINK_SPEED_UNKNOWN; status = hw->phy.ops.read_reg(hw, IXGBE_AUTO_NEG_LP_STATUS, MDIO_MMD_AN, &an_lp_status); if (status) return status; /* If link partner advertised 1G, return 1G */ if (an_lp_status & IXGBE_AUTO_NEG_LP_1000BASE_CAP) { *lcd_speed = IXGBE_LINK_SPEED_1GB_FULL; return status; } /* If 10G disabled for LPLU via NVM D10GMP, then return no valid LCD */ if ((hw->bus.lan_id && (word & NVM_INIT_CTRL_3_D10GMP_PORT1)) || (word & NVM_INIT_CTRL_3_D10GMP_PORT0)) return status; /* Link partner not capable of lower speeds, return 10G */ *lcd_speed = IXGBE_LINK_SPEED_10GB_FULL; return status; } /** * ixgbe_setup_fc_x550em - Set up flow control * @hw: pointer to hardware structure */ static s32 ixgbe_setup_fc_x550em(struct ixgbe_hw *hw) { bool pause, asm_dir; u32 reg_val; s32 rc = 0; /* Validate the requested mode */ if (hw->fc.strict_ieee && hw->fc.requested_mode == ixgbe_fc_rx_pause) { hw_err(hw, "ixgbe_fc_rx_pause not valid in strict IEEE mode\n"); return IXGBE_ERR_INVALID_LINK_SETTINGS; } /* 10gig parts do not have a word in the EEPROM to determine the * default flow control setting, so we explicitly set it to full. */ if (hw->fc.requested_mode == ixgbe_fc_default) hw->fc.requested_mode = ixgbe_fc_full; /* Determine PAUSE and ASM_DIR bits. */ switch (hw->fc.requested_mode) { case ixgbe_fc_none: pause = false; asm_dir = false; break; case ixgbe_fc_tx_pause: pause = false; asm_dir = true; break; case ixgbe_fc_rx_pause: /* Rx Flow control is enabled and Tx Flow control is * disabled by software override. Since there really * isn't a way to advertise that we are capable of RX * Pause ONLY, we will advertise that we support both * symmetric and asymmetric Rx PAUSE, as such we fall * through to the fc_full statement. Later, we will * disable the adapter's ability to send PAUSE frames. */ fallthrough; case ixgbe_fc_full: pause = true; asm_dir = true; break; default: hw_err(hw, "Flow control param set incorrectly\n"); return IXGBE_ERR_CONFIG; } switch (hw->device_id) { case IXGBE_DEV_ID_X550EM_X_KR: case IXGBE_DEV_ID_X550EM_A_KR: case IXGBE_DEV_ID_X550EM_A_KR_L: rc = hw->mac.ops.read_iosf_sb_reg(hw, IXGBE_KRM_AN_CNTL_1(hw->bus.lan_id), IXGBE_SB_IOSF_TARGET_KR_PHY, &reg_val); if (rc) return rc; reg_val &= ~(IXGBE_KRM_AN_CNTL_1_SYM_PAUSE | IXGBE_KRM_AN_CNTL_1_ASM_PAUSE); if (pause) reg_val |= IXGBE_KRM_AN_CNTL_1_SYM_PAUSE; if (asm_dir) reg_val |= IXGBE_KRM_AN_CNTL_1_ASM_PAUSE; rc = hw->mac.ops.write_iosf_sb_reg(hw, IXGBE_KRM_AN_CNTL_1(hw->bus.lan_id), IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val); /* This device does not fully support AN. */ hw->fc.disable_fc_autoneg = true; break; case IXGBE_DEV_ID_X550EM_X_XFI: hw->fc.disable_fc_autoneg = true; break; default: break; } return rc; } /** * ixgbe_fc_autoneg_backplane_x550em_a - Enable flow control IEEE clause 37 * @hw: pointer to hardware structure **/ static void ixgbe_fc_autoneg_backplane_x550em_a(struct ixgbe_hw *hw) { u32 link_s1, lp_an_page_low, an_cntl_1; s32 status = IXGBE_ERR_FC_NOT_NEGOTIATED; ixgbe_link_speed speed; bool link_up; /* AN should have completed when the cable was plugged in. * Look for reasons to bail out. Bail out if: * - FC autoneg is disabled, or if * - link is not up. */ if (hw->fc.disable_fc_autoneg) { hw_err(hw, "Flow control autoneg is disabled"); goto out; } hw->mac.ops.check_link(hw, &speed, &link_up, false); if (!link_up) { hw_err(hw, "The link is down"); goto out; } /* Check at auto-negotiation has completed */ status = hw->mac.ops.read_iosf_sb_reg(hw, IXGBE_KRM_LINK_S1(hw->bus.lan_id), IXGBE_SB_IOSF_TARGET_KR_PHY, &link_s1); if (status || (link_s1 & IXGBE_KRM_LINK_S1_MAC_AN_COMPLETE) == 0) { hw_dbg(hw, "Auto-Negotiation did not complete\n"); status = IXGBE_ERR_FC_NOT_NEGOTIATED; goto out; } /* Read the 10g AN autoc and LP ability registers and resolve * local flow control settings accordingly */ status = hw->mac.ops.read_iosf_sb_reg(hw, IXGBE_KRM_AN_CNTL_1(hw->bus.lan_id), IXGBE_SB_IOSF_TARGET_KR_PHY, &an_cntl_1); if (status) { hw_dbg(hw, "Auto-Negotiation did not complete\n"); goto out; } status = hw->mac.ops.read_iosf_sb_reg(hw, IXGBE_KRM_LP_BASE_PAGE_HIGH(hw->bus.lan_id), IXGBE_SB_IOSF_TARGET_KR_PHY, &lp_an_page_low); if (status) { hw_dbg(hw, "Auto-Negotiation did not complete\n"); goto out; } status = ixgbe_negotiate_fc(hw, an_cntl_1, lp_an_page_low, IXGBE_KRM_AN_CNTL_1_SYM_PAUSE, IXGBE_KRM_AN_CNTL_1_ASM_PAUSE, IXGBE_KRM_LP_BASE_PAGE_HIGH_SYM_PAUSE, IXGBE_KRM_LP_BASE_PAGE_HIGH_ASM_PAUSE); out: if (!status) { hw->fc.fc_was_autonegged = true; } else { hw->fc.fc_was_autonegged = false; hw->fc.current_mode = hw->fc.requested_mode; } } /** * ixgbe_fc_autoneg_fiber_x550em_a - passthrough FC settings * @hw: pointer to hardware structure **/ static void ixgbe_fc_autoneg_fiber_x550em_a(struct ixgbe_hw *hw) { hw->fc.fc_was_autonegged = false; hw->fc.current_mode = hw->fc.requested_mode; } /** ixgbe_enter_lplu_x550em - Transition to low power states * @hw: pointer to hardware structure * * Configures Low Power Link Up on transition to low power states * (from D0 to non-D0). Link is required to enter LPLU so avoid resetting * the X557 PHY immediately prior to entering LPLU. **/ static s32 ixgbe_enter_lplu_t_x550em(struct ixgbe_hw *hw) { u16 an_10g_cntl_reg, autoneg_reg, speed; s32 status; ixgbe_link_speed lcd_speed; u32 save_autoneg; bool link_up; /* If blocked by MNG FW, then don't restart AN */ if (ixgbe_check_reset_blocked(hw)) return 0; status = ixgbe_ext_phy_t_x550em_get_link(hw, &link_up); if (status) return status; status = hw->eeprom.ops.read(hw, NVM_INIT_CTRL_3, &hw->eeprom.ctrl_word_3); if (status) return status; /* If link is down, LPLU disabled in NVM, WoL disabled, or * manageability disabled, then force link down by entering * low power mode. */ if (!link_up || !(hw->eeprom.ctrl_word_3 & NVM_INIT_CTRL_3_LPLU) || !(hw->wol_enabled || ixgbe_mng_present(hw))) return ixgbe_set_copper_phy_power(hw, false); /* Determine LCD */ status = ixgbe_get_lcd_t_x550em(hw, &lcd_speed); if (status) return status; /* If no valid LCD link speed, then force link down and exit. */ if (lcd_speed == IXGBE_LINK_SPEED_UNKNOWN) return ixgbe_set_copper_phy_power(hw, false); status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_VENDOR_STAT, MDIO_MMD_AN, &speed); if (status) return status; /* If no link now, speed is invalid so take link down */ status = ixgbe_ext_phy_t_x550em_get_link(hw, &link_up); if (status) return ixgbe_set_copper_phy_power(hw, false); /* clear everything but the speed bits */ speed &= IXGBE_MDIO_AUTO_NEG_VEN_STAT_SPEED_MASK; /* If current speed is already LCD, then exit. */ if (((speed == IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_1GB) && (lcd_speed == IXGBE_LINK_SPEED_1GB_FULL)) || ((speed == IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_10GB) && (lcd_speed == IXGBE_LINK_SPEED_10GB_FULL))) return status; /* Clear AN completed indication */ status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_VENDOR_TX_ALARM, MDIO_MMD_AN, &autoneg_reg); if (status) return status; status = hw->phy.ops.read_reg(hw, MDIO_AN_10GBT_CTRL, MDIO_MMD_AN, &an_10g_cntl_reg); if (status) return status; status = hw->phy.ops.read_reg(hw, IXGBE_MII_AUTONEG_VENDOR_PROVISION_1_REG, MDIO_MMD_AN, &autoneg_reg); if (status) return status; save_autoneg = hw->phy.autoneg_advertised; /* Setup link at least common link speed */ status = hw->mac.ops.setup_link(hw, lcd_speed, false); /* restore autoneg from before setting lplu speed */ hw->phy.autoneg_advertised = save_autoneg; return status; } /** * ixgbe_reset_phy_fw - Reset firmware-controlled PHYs * @hw: pointer to hardware structure */ static s32 ixgbe_reset_phy_fw(struct ixgbe_hw *hw) { u32 store[FW_PHY_ACT_DATA_COUNT] = { 0 }; s32 rc; if (hw->phy.reset_disable || ixgbe_check_reset_blocked(hw)) return 0; rc = ixgbe_fw_phy_activity(hw, FW_PHY_ACT_PHY_SW_RESET, &store); if (rc) return rc; memset(store, 0, sizeof(store)); rc = ixgbe_fw_phy_activity(hw, FW_PHY_ACT_INIT_PHY, &store); if (rc) return rc; return ixgbe_setup_fw_link(hw); } /** * ixgbe_check_overtemp_fw - Check firmware-controlled PHYs for overtemp * @hw: pointer to hardware structure */ static s32 ixgbe_check_overtemp_fw(struct ixgbe_hw *hw) { u32 store[FW_PHY_ACT_DATA_COUNT] = { 0 }; s32 rc; rc = ixgbe_fw_phy_activity(hw, FW_PHY_ACT_GET_LINK_INFO, &store); if (rc) return rc; if (store[0] & FW_PHY_ACT_GET_LINK_INFO_TEMP) { ixgbe_shutdown_fw_phy(hw); return IXGBE_ERR_OVERTEMP; } return 0; } /** * ixgbe_read_mng_if_sel_x550em - Read NW_MNG_IF_SEL register * @hw: pointer to hardware structure * * Read NW_MNG_IF_SEL register and save field values. */ static void ixgbe_read_mng_if_sel_x550em(struct ixgbe_hw *hw) { /* Save NW management interface connected on board. This is used * to determine internal PHY mode. */ hw->phy.nw_mng_if_sel = IXGBE_READ_REG(hw, IXGBE_NW_MNG_IF_SEL); /* If X552 (X550EM_a) and MDIO is connected to external PHY, then set * PHY address. This register field was has only been used for X552. */ if (hw->mac.type == ixgbe_mac_x550em_a && hw->phy.nw_mng_if_sel & IXGBE_NW_MNG_IF_SEL_MDIO_ACT) { hw->phy.mdio.prtad = (hw->phy.nw_mng_if_sel & IXGBE_NW_MNG_IF_SEL_MDIO_PHY_ADD) >> IXGBE_NW_MNG_IF_SEL_MDIO_PHY_ADD_SHIFT; } } /** ixgbe_init_phy_ops_X550em - PHY/SFP specific init * @hw: pointer to hardware structure * * Initialize any function pointers that were not able to be * set during init_shared_code because the PHY/SFP type was * not known. Perform the SFP init if necessary. **/ static s32 ixgbe_init_phy_ops_X550em(struct ixgbe_hw *hw) { struct ixgbe_phy_info *phy = &hw->phy; s32 ret_val; hw->mac.ops.set_lan_id(hw); ixgbe_read_mng_if_sel_x550em(hw); if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_fiber) { phy->phy_semaphore_mask = IXGBE_GSSR_SHARED_I2C_SM; ixgbe_setup_mux_ctl(hw); } /* Identify the PHY or SFP module */ ret_val = phy->ops.identify(hw); if (ret_val == IXGBE_ERR_SFP_NOT_SUPPORTED || ret_val == IXGBE_ERR_PHY_ADDR_INVALID) return ret_val; /* Setup function pointers based on detected hardware */ ixgbe_init_mac_link_ops_X550em(hw); if (phy->sfp_type != ixgbe_sfp_type_unknown) phy->ops.reset = NULL; /* Set functions pointers based on phy type */ switch (hw->phy.type) { case ixgbe_phy_x550em_kx4: phy->ops.setup_link = NULL; phy->ops.read_reg = ixgbe_read_phy_reg_x550em; phy->ops.write_reg = ixgbe_write_phy_reg_x550em; break; case ixgbe_phy_x550em_kr: phy->ops.setup_link = ixgbe_setup_kr_x550em; phy->ops.read_reg = ixgbe_read_phy_reg_x550em; phy->ops.write_reg = ixgbe_write_phy_reg_x550em; break; case ixgbe_phy_x550em_xfi: /* link is managed by HW */ phy->ops.setup_link = NULL; phy->ops.read_reg = ixgbe_read_phy_reg_x550em; phy->ops.write_reg = ixgbe_write_phy_reg_x550em; break; case ixgbe_phy_x550em_ext_t: /* Save NW management interface connected on board. This is used * to determine internal PHY mode */ phy->nw_mng_if_sel = IXGBE_READ_REG(hw, IXGBE_NW_MNG_IF_SEL); /* If internal link mode is XFI, then setup iXFI internal link, * else setup KR now. */ phy->ops.setup_internal_link = ixgbe_setup_internal_phy_t_x550em; /* setup SW LPLU only for first revision */ if (hw->mac.type == ixgbe_mac_X550EM_x && !(IXGBE_READ_REG(hw, IXGBE_FUSES0_GROUP(0)) & IXGBE_FUSES0_REV_MASK)) phy->ops.enter_lplu = ixgbe_enter_lplu_t_x550em; phy->ops.handle_lasi = ixgbe_handle_lasi_ext_t_x550em; phy->ops.reset = ixgbe_reset_phy_t_X550em; break; case ixgbe_phy_sgmii: phy->ops.setup_link = NULL; break; case ixgbe_phy_fw: phy->ops.setup_link = ixgbe_setup_fw_link; phy->ops.reset = ixgbe_reset_phy_fw; break; case ixgbe_phy_ext_1g_t: phy->ops.setup_link = NULL; phy->ops.read_reg = NULL; phy->ops.write_reg = NULL; phy->ops.reset = NULL; break; default: break; } return ret_val; } /** ixgbe_get_media_type_X550em - Get media type * @hw: pointer to hardware structure * * Returns the media type (fiber, copper, backplane) * */ static enum ixgbe_media_type ixgbe_get_media_type_X550em(struct ixgbe_hw *hw) { enum ixgbe_media_type media_type; /* Detect if there is a copper PHY attached. */ switch (hw->device_id) { case IXGBE_DEV_ID_X550EM_A_SGMII: case IXGBE_DEV_ID_X550EM_A_SGMII_L: hw->phy.type = ixgbe_phy_sgmii; fallthrough; case IXGBE_DEV_ID_X550EM_X_KR: case IXGBE_DEV_ID_X550EM_X_KX4: case IXGBE_DEV_ID_X550EM_X_XFI: case IXGBE_DEV_ID_X550EM_A_KR: case IXGBE_DEV_ID_X550EM_A_KR_L: media_type = ixgbe_media_type_backplane; break; case IXGBE_DEV_ID_X550EM_X_SFP: case IXGBE_DEV_ID_X550EM_A_SFP: case IXGBE_DEV_ID_X550EM_A_SFP_N: media_type = ixgbe_media_type_fiber; break; case IXGBE_DEV_ID_X550EM_X_1G_T: case IXGBE_DEV_ID_X550EM_X_10G_T: case IXGBE_DEV_ID_X550EM_A_10G_T: case IXGBE_DEV_ID_X550EM_A_1G_T: case IXGBE_DEV_ID_X550EM_A_1G_T_L: media_type = ixgbe_media_type_copper; break; default: media_type = ixgbe_media_type_unknown; break; } return media_type; } /** ixgbe_init_ext_t_x550em - Start (unstall) the external Base T PHY. ** @hw: pointer to hardware structure **/ static s32 ixgbe_init_ext_t_x550em(struct ixgbe_hw *hw) { s32 status; u16 reg; status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_TX_VENDOR_ALARMS_3, MDIO_MMD_PMAPMD, &reg); if (status) return status; /* If PHY FW reset completed bit is set then this is the first * SW instance after a power on so the PHY FW must be un-stalled. */ if (reg & IXGBE_MDIO_TX_VENDOR_ALARMS_3_RST_MASK) { status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_GLOBAL_RES_PR_10, MDIO_MMD_VEND1, &reg); if (status) return status; reg &= ~IXGBE_MDIO_POWER_UP_STALL; status = hw->phy.ops.write_reg(hw, IXGBE_MDIO_GLOBAL_RES_PR_10, MDIO_MMD_VEND1, reg); if (status) return status; } return status; } /** * ixgbe_set_mdio_speed - Set MDIO clock speed * @hw: pointer to hardware structure */ static void ixgbe_set_mdio_speed(struct ixgbe_hw *hw) { u32 hlreg0; switch (hw->device_id) { case IXGBE_DEV_ID_X550EM_X_10G_T: case IXGBE_DEV_ID_X550EM_A_SGMII: case IXGBE_DEV_ID_X550EM_A_SGMII_L: case IXGBE_DEV_ID_X550EM_A_10G_T: case IXGBE_DEV_ID_X550EM_A_SFP: /* Config MDIO clock speed before the first MDIO PHY access */ hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0); hlreg0 &= ~IXGBE_HLREG0_MDCSPD; IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0); break; case IXGBE_DEV_ID_X550EM_A_1G_T: case IXGBE_DEV_ID_X550EM_A_1G_T_L: /* Select fast MDIO clock speed for these devices */ hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0); hlreg0 |= IXGBE_HLREG0_MDCSPD; IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0); break; default: break; } } /** ixgbe_reset_hw_X550em - Perform hardware reset ** @hw: pointer to hardware structure ** ** Resets the hardware by resetting the transmit and receive units, masks ** and clears all interrupts, perform a PHY reset, and perform a link (MAC) ** reset. **/ static s32 ixgbe_reset_hw_X550em(struct ixgbe_hw *hw) { ixgbe_link_speed link_speed; s32 status; u32 ctrl = 0; u32 i; bool link_up = false; u32 swfw_mask = hw->phy.phy_semaphore_mask; /* Call adapter stop to disable Tx/Rx and clear interrupts */ status = hw->mac.ops.stop_adapter(hw); if (status) return status; /* flush pending Tx transactions */ ixgbe_clear_tx_pending(hw); /* set MDIO speed before talking to the PHY in case it's the 1st time */ ixgbe_set_mdio_speed(hw); /* PHY ops must be identified and initialized prior to reset */ status = hw->phy.ops.init(hw); if (status == IXGBE_ERR_SFP_NOT_SUPPORTED || status == IXGBE_ERR_PHY_ADDR_INVALID) return status; /* start the external PHY */ if (hw->phy.type == ixgbe_phy_x550em_ext_t) { status = ixgbe_init_ext_t_x550em(hw); if (status) return status; } /* Setup SFP module if there is one present. */ if (hw->phy.sfp_setup_needed) { status = hw->mac.ops.setup_sfp(hw); hw->phy.sfp_setup_needed = false; } if (status == IXGBE_ERR_SFP_NOT_SUPPORTED) return status; /* Reset PHY */ if (!hw->phy.reset_disable && hw->phy.ops.reset) hw->phy.ops.reset(hw); mac_reset_top: /* Issue global reset to the MAC. Needs to be SW reset if link is up. * If link reset is used when link is up, it might reset the PHY when * mng is using it. If link is down or the flag to force full link * reset is set, then perform link reset. */ ctrl = IXGBE_CTRL_LNK_RST; if (!hw->force_full_reset) { hw->mac.ops.check_link(hw, &link_speed, &link_up, false); if (link_up) ctrl = IXGBE_CTRL_RST; } status = hw->mac.ops.acquire_swfw_sync(hw, swfw_mask); if (status) { hw_dbg(hw, "semaphore failed with %d", status); return IXGBE_ERR_SWFW_SYNC; } ctrl |= IXGBE_READ_REG(hw, IXGBE_CTRL); IXGBE_WRITE_REG(hw, IXGBE_CTRL, ctrl); IXGBE_WRITE_FLUSH(hw); hw->mac.ops.release_swfw_sync(hw, swfw_mask); usleep_range(1000, 1200); /* Poll for reset bit to self-clear meaning reset is complete */ for (i = 0; i < 10; i++) { ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL); if (!(ctrl & IXGBE_CTRL_RST_MASK)) break; udelay(1); } if (ctrl & IXGBE_CTRL_RST_MASK) { status = IXGBE_ERR_RESET_FAILED; hw_dbg(hw, "Reset polling failed to complete.\n"); } msleep(50); /* Double resets are required for recovery from certain error * clear the multicast table. Also reset num_rar_entries to 128, * since we modify this value when programming the SAN MAC address. */ if (hw->mac.flags & IXGBE_FLAGS_DOUBLE_RESET_REQUIRED) { hw->mac.flags &= ~IXGBE_FLAGS_DOUBLE_RESET_REQUIRED; goto mac_reset_top; } /* Store the permanent mac address */ hw->mac.ops.get_mac_addr(hw, hw->mac.perm_addr); /* Store MAC address from RAR0, clear receive address registers, and * clear the multicast table. Also reset num_rar_entries to 128, * since we modify this value when programming the SAN MAC address. */ hw->mac.num_rar_entries = 128; hw->mac.ops.init_rx_addrs(hw); ixgbe_set_mdio_speed(hw); if (hw->device_id == IXGBE_DEV_ID_X550EM_X_SFP) ixgbe_setup_mux_ctl(hw); return status; } /** ixgbe_set_ethertype_anti_spoofing_X550 - Enable/Disable Ethertype * anti-spoofing * @hw: pointer to hardware structure * @enable: enable or disable switch for Ethertype anti-spoofing * @vf: Virtual Function pool - VF Pool to set for Ethertype anti-spoofing **/ static void ixgbe_set_ethertype_anti_spoofing_X550(struct ixgbe_hw *hw, bool enable, int vf) { int vf_target_reg = vf >> 3; int vf_target_shift = vf % 8 + IXGBE_SPOOF_ETHERTYPEAS_SHIFT; u32 pfvfspoof; pfvfspoof = IXGBE_READ_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg)); if (enable) pfvfspoof |= BIT(vf_target_shift); else pfvfspoof &= ~BIT(vf_target_shift); IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg), pfvfspoof); } /** ixgbe_set_source_address_pruning_X550 - Enable/Disbale src address pruning * @hw: pointer to hardware structure * @enable: enable or disable source address pruning * @pool: Rx pool to set source address pruning for **/ static void ixgbe_set_source_address_pruning_X550(struct ixgbe_hw *hw, bool enable, unsigned int pool) { u64 pfflp; /* max rx pool is 63 */ if (pool > 63) return; pfflp = (u64)IXGBE_READ_REG(hw, IXGBE_PFFLPL); pfflp |= (u64)IXGBE_READ_REG(hw, IXGBE_PFFLPH) << 32; if (enable) pfflp |= (1ULL << pool); else pfflp &= ~(1ULL << pool); IXGBE_WRITE_REG(hw, IXGBE_PFFLPL, (u32)pfflp); IXGBE_WRITE_REG(hw, IXGBE_PFFLPH, (u32)(pfflp >> 32)); } /** * ixgbe_setup_fc_backplane_x550em_a - Set up flow control * @hw: pointer to hardware structure * * Called at init time to set up flow control. **/ static s32 ixgbe_setup_fc_backplane_x550em_a(struct ixgbe_hw *hw) { s32 status = 0; u32 an_cntl = 0; /* Validate the requested mode */ if (hw->fc.strict_ieee && hw->fc.requested_mode == ixgbe_fc_rx_pause) { hw_err(hw, "ixgbe_fc_rx_pause not valid in strict IEEE mode\n"); return IXGBE_ERR_INVALID_LINK_SETTINGS; } if (hw->fc.requested_mode == ixgbe_fc_default) hw->fc.requested_mode = ixgbe_fc_full; /* Set up the 1G and 10G flow control advertisement registers so the * HW will be able to do FC autoneg once the cable is plugged in. If * we link at 10G, the 1G advertisement is harmless and vice versa. */ status = hw->mac.ops.read_iosf_sb_reg(hw, IXGBE_KRM_AN_CNTL_1(hw->bus.lan_id), IXGBE_SB_IOSF_TARGET_KR_PHY, &an_cntl); if (status) { hw_dbg(hw, "Auto-Negotiation did not complete\n"); return status; } /* The possible values of fc.requested_mode are: * 0: Flow control is completely disabled * 1: Rx flow control is enabled (we can receive pause frames, * but not send pause frames). * 2: Tx flow control is enabled (we can send pause frames but * we do not support receiving pause frames). * 3: Both Rx and Tx flow control (symmetric) are enabled. * other: Invalid. */ switch (hw->fc.requested_mode) { case ixgbe_fc_none: /* Flow control completely disabled by software override. */ an_cntl &= ~(IXGBE_KRM_AN_CNTL_1_SYM_PAUSE | IXGBE_KRM_AN_CNTL_1_ASM_PAUSE); break; case ixgbe_fc_tx_pause: /* Tx Flow control is enabled, and Rx Flow control is * disabled by software override. */ an_cntl |= IXGBE_KRM_AN_CNTL_1_ASM_PAUSE; an_cntl &= ~IXGBE_KRM_AN_CNTL_1_SYM_PAUSE; break; case ixgbe_fc_rx_pause: /* Rx Flow control is enabled and Tx Flow control is * disabled by software override. Since there really * isn't a way to advertise that we are capable of RX * Pause ONLY, we will advertise that we support both * symmetric and asymmetric Rx PAUSE, as such we fall * through to the fc_full statement. Later, we will * disable the adapter's ability to send PAUSE frames. */ case ixgbe_fc_full: /* Flow control (both Rx and Tx) is enabled by SW override. */ an_cntl |= IXGBE_KRM_AN_CNTL_1_SYM_PAUSE | IXGBE_KRM_AN_CNTL_1_ASM_PAUSE; break; default: hw_err(hw, "Flow control param set incorrectly\n"); return IXGBE_ERR_CONFIG; } status = hw->mac.ops.write_iosf_sb_reg(hw, IXGBE_KRM_AN_CNTL_1(hw->bus.lan_id), IXGBE_SB_IOSF_TARGET_KR_PHY, an_cntl); /* Restart auto-negotiation. */ status = ixgbe_restart_an_internal_phy_x550em(hw); return status; } /** * ixgbe_set_mux - Set mux for port 1 access with CS4227 * @hw: pointer to hardware structure * @state: set mux if 1, clear if 0 */ static void ixgbe_set_mux(struct ixgbe_hw *hw, u8 state) { u32 esdp; if (!hw->bus.lan_id) return; esdp = IXGBE_READ_REG(hw, IXGBE_ESDP); if (state) esdp |= IXGBE_ESDP_SDP1; else esdp &= ~IXGBE_ESDP_SDP1; IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp); IXGBE_WRITE_FLUSH(hw); } /** * ixgbe_acquire_swfw_sync_X550em - Acquire SWFW semaphore * @hw: pointer to hardware structure * @mask: Mask to specify which semaphore to acquire * * Acquires the SWFW semaphore and sets the I2C MUX */ static s32 ixgbe_acquire_swfw_sync_X550em(struct ixgbe_hw *hw, u32 mask) { s32 status; status = ixgbe_acquire_swfw_sync_X540(hw, mask); if (status) return status; if (mask & IXGBE_GSSR_I2C_MASK) ixgbe_set_mux(hw, 1); return 0; } /** * ixgbe_release_swfw_sync_X550em - Release SWFW semaphore * @hw: pointer to hardware structure * @mask: Mask to specify which semaphore to release * * Releases the SWFW semaphore and sets the I2C MUX */ static void ixgbe_release_swfw_sync_X550em(struct ixgbe_hw *hw, u32 mask) { if (mask & IXGBE_GSSR_I2C_MASK) ixgbe_set_mux(hw, 0); ixgbe_release_swfw_sync_X540(hw, mask); } /** * ixgbe_acquire_swfw_sync_x550em_a - Acquire SWFW semaphore * @hw: pointer to hardware structure * @mask: Mask to specify which semaphore to acquire * * Acquires the SWFW semaphore and get the shared PHY token as needed */ static s32 ixgbe_acquire_swfw_sync_x550em_a(struct ixgbe_hw *hw, u32 mask) { u32 hmask = mask & ~IXGBE_GSSR_TOKEN_SM; int retries = FW_PHY_TOKEN_RETRIES; s32 status; while (--retries) { status = 0; if (hmask) status = ixgbe_acquire_swfw_sync_X540(hw, hmask); if (status) return status; if (!(mask & IXGBE_GSSR_TOKEN_SM)) return 0; status = ixgbe_get_phy_token(hw); if (!status) return 0; if (hmask) ixgbe_release_swfw_sync_X540(hw, hmask); if (status != IXGBE_ERR_TOKEN_RETRY) return status; msleep(FW_PHY_TOKEN_DELAY); } return status; } /** * ixgbe_release_swfw_sync_x550em_a - Release SWFW semaphore * @hw: pointer to hardware structure * @mask: Mask to specify which semaphore to release * * Release the SWFW semaphore and puts the shared PHY token as needed */ static void ixgbe_release_swfw_sync_x550em_a(struct ixgbe_hw *hw, u32 mask) { u32 hmask = mask & ~IXGBE_GSSR_TOKEN_SM; if (mask & IXGBE_GSSR_TOKEN_SM) ixgbe_put_phy_token(hw); if (hmask) ixgbe_release_swfw_sync_X540(hw, hmask); } /** * ixgbe_read_phy_reg_x550a - Reads specified PHY register * @hw: pointer to hardware structure * @reg_addr: 32 bit address of PHY register to read * @device_type: 5 bit device type * @phy_data: Pointer to read data from PHY register * * Reads a value from a specified PHY register using the SWFW lock and PHY * Token. The PHY Token is needed since the MDIO is shared between to MAC * instances. */ static s32 ixgbe_read_phy_reg_x550a(struct ixgbe_hw *hw, u32 reg_addr, u32 device_type, u16 *phy_data) { u32 mask = hw->phy.phy_semaphore_mask | IXGBE_GSSR_TOKEN_SM; s32 status; if (hw->mac.ops.acquire_swfw_sync(hw, mask)) return IXGBE_ERR_SWFW_SYNC; status = hw->phy.ops.read_reg_mdi(hw, reg_addr, device_type, phy_data); hw->mac.ops.release_swfw_sync(hw, mask); return status; } /** * ixgbe_write_phy_reg_x550a - Writes specified PHY register * @hw: pointer to hardware structure * @reg_addr: 32 bit PHY register to write * @device_type: 5 bit device type * @phy_data: Data to write to the PHY register * * Writes a value to specified PHY register using the SWFW lock and PHY Token. * The PHY Token is needed since the MDIO is shared between to MAC instances. */ static s32 ixgbe_write_phy_reg_x550a(struct ixgbe_hw *hw, u32 reg_addr, u32 device_type, u16 phy_data) { u32 mask = hw->phy.phy_semaphore_mask | IXGBE_GSSR_TOKEN_SM; s32 status; if (hw->mac.ops.acquire_swfw_sync(hw, mask)) return IXGBE_ERR_SWFW_SYNC; status = ixgbe_write_phy_reg_mdi(hw, reg_addr, device_type, phy_data); hw->mac.ops.release_swfw_sync(hw, mask); return status; } #define X550_COMMON_MAC \ .init_hw = &ixgbe_init_hw_generic, \ .start_hw = &ixgbe_start_hw_X540, \ .clear_hw_cntrs = &ixgbe_clear_hw_cntrs_generic, \ .enable_rx_dma = &ixgbe_enable_rx_dma_generic, \ .get_mac_addr = &ixgbe_get_mac_addr_generic, \ .get_device_caps = &ixgbe_get_device_caps_generic, \ .stop_adapter = &ixgbe_stop_adapter_generic, \ .set_lan_id = &ixgbe_set_lan_id_multi_port_pcie, \ .read_analog_reg8 = NULL, \ .write_analog_reg8 = NULL, \ .set_rxpba = &ixgbe_set_rxpba_generic, \ .check_link = &ixgbe_check_mac_link_generic, \ .blink_led_start = &ixgbe_blink_led_start_X540, \ .blink_led_stop = &ixgbe_blink_led_stop_X540, \ .set_rar = &ixgbe_set_rar_generic, \ .clear_rar = &ixgbe_clear_rar_generic, \ .set_vmdq = &ixgbe_set_vmdq_generic, \ .set_vmdq_san_mac = &ixgbe_set_vmdq_san_mac_generic, \ .clear_vmdq = &ixgbe_clear_vmdq_generic, \ .init_rx_addrs = &ixgbe_init_rx_addrs_generic, \ .update_mc_addr_list = &ixgbe_update_mc_addr_list_generic, \ .enable_mc = &ixgbe_enable_mc_generic, \ .disable_mc = &ixgbe_disable_mc_generic, \ .clear_vfta = &ixgbe_clear_vfta_generic, \ .set_vfta = &ixgbe_set_vfta_generic, \ .fc_enable = &ixgbe_fc_enable_generic, \ .set_fw_drv_ver = &ixgbe_set_fw_drv_ver_x550, \ .init_uta_tables = &ixgbe_init_uta_tables_generic, \ .set_mac_anti_spoofing = &ixgbe_set_mac_anti_spoofing, \ .set_vlan_anti_spoofing = &ixgbe_set_vlan_anti_spoofing, \ .set_source_address_pruning = \ &ixgbe_set_source_address_pruning_X550, \ .set_ethertype_anti_spoofing = \ &ixgbe_set_ethertype_anti_spoofing_X550, \ .disable_rx_buff = &ixgbe_disable_rx_buff_generic, \ .enable_rx_buff = &ixgbe_enable_rx_buff_generic, \ .get_thermal_sensor_data = NULL, \ .init_thermal_sensor_thresh = NULL, \ .fw_recovery_mode = &ixgbe_fw_recovery_mode_X550, \ .enable_rx = &ixgbe_enable_rx_generic, \ .disable_rx = &ixgbe_disable_rx_x550, \ static const struct ixgbe_mac_operations mac_ops_X550 = { X550_COMMON_MAC .led_on = ixgbe_led_on_generic, .led_off = ixgbe_led_off_generic, .init_led_link_act = ixgbe_init_led_link_act_generic, .reset_hw = &ixgbe_reset_hw_X540, .get_media_type = &ixgbe_get_media_type_X540, .get_san_mac_addr = &ixgbe_get_san_mac_addr_generic, .get_wwn_prefix = &ixgbe_get_wwn_prefix_generic, .setup_link = &ixgbe_setup_mac_link_X540, .get_link_capabilities = &ixgbe_get_copper_link_capabilities_generic, .get_bus_info = &ixgbe_get_bus_info_generic, .setup_sfp = NULL, .acquire_swfw_sync = &ixgbe_acquire_swfw_sync_X540, .release_swfw_sync = &ixgbe_release_swfw_sync_X540, .init_swfw_sync = &ixgbe_init_swfw_sync_X540, .prot_autoc_read = prot_autoc_read_generic, .prot_autoc_write = prot_autoc_write_generic, .setup_fc = ixgbe_setup_fc_generic, .fc_autoneg = ixgbe_fc_autoneg, }; static const struct ixgbe_mac_operations mac_ops_X550EM_x = { X550_COMMON_MAC .led_on = ixgbe_led_on_t_x550em, .led_off = ixgbe_led_off_t_x550em, .init_led_link_act = ixgbe_init_led_link_act_generic, .reset_hw = &ixgbe_reset_hw_X550em, .get_media_type = &ixgbe_get_media_type_X550em, .get_san_mac_addr = NULL, .get_wwn_prefix = NULL, .setup_link = &ixgbe_setup_mac_link_X540, .get_link_capabilities = &ixgbe_get_link_capabilities_X550em, .get_bus_info = &ixgbe_get_bus_info_X550em, .setup_sfp = ixgbe_setup_sfp_modules_X550em, .acquire_swfw_sync = &ixgbe_acquire_swfw_sync_X550em, .release_swfw_sync = &ixgbe_release_swfw_sync_X550em, .init_swfw_sync = &ixgbe_init_swfw_sync_X540, .setup_fc = NULL, /* defined later */ .fc_autoneg = ixgbe_fc_autoneg, .read_iosf_sb_reg = ixgbe_read_iosf_sb_reg_x550, .write_iosf_sb_reg = ixgbe_write_iosf_sb_reg_x550, }; static const struct ixgbe_mac_operations mac_ops_X550EM_x_fw = { X550_COMMON_MAC .led_on = NULL, .led_off = NULL, .init_led_link_act = NULL, .reset_hw = &ixgbe_reset_hw_X550em, .get_media_type = &ixgbe_get_media_type_X550em, .get_san_mac_addr = NULL, .get_wwn_prefix = NULL, .setup_link = &ixgbe_setup_mac_link_X540, .get_link_capabilities = &ixgbe_get_link_capabilities_X550em, .get_bus_info = &ixgbe_get_bus_info_X550em, .setup_sfp = ixgbe_setup_sfp_modules_X550em, .acquire_swfw_sync = &ixgbe_acquire_swfw_sync_X550em, .release_swfw_sync = &ixgbe_release_swfw_sync_X550em, .init_swfw_sync = &ixgbe_init_swfw_sync_X540, .setup_fc = NULL, .fc_autoneg = ixgbe_fc_autoneg, .read_iosf_sb_reg = ixgbe_read_iosf_sb_reg_x550, .write_iosf_sb_reg = ixgbe_write_iosf_sb_reg_x550, }; static const struct ixgbe_mac_operations mac_ops_x550em_a = { X550_COMMON_MAC .led_on = ixgbe_led_on_t_x550em, .led_off = ixgbe_led_off_t_x550em, .init_led_link_act = ixgbe_init_led_link_act_generic, .reset_hw = ixgbe_reset_hw_X550em, .get_media_type = ixgbe_get_media_type_X550em, .get_san_mac_addr = NULL, .get_wwn_prefix = NULL, .setup_link = &ixgbe_setup_mac_link_X540, .get_link_capabilities = ixgbe_get_link_capabilities_X550em, .get_bus_info = ixgbe_get_bus_info_X550em, .setup_sfp = ixgbe_setup_sfp_modules_X550em, .acquire_swfw_sync = ixgbe_acquire_swfw_sync_x550em_a, .release_swfw_sync = ixgbe_release_swfw_sync_x550em_a, .setup_fc = ixgbe_setup_fc_x550em, .fc_autoneg = ixgbe_fc_autoneg, .read_iosf_sb_reg = ixgbe_read_iosf_sb_reg_x550a, .write_iosf_sb_reg = ixgbe_write_iosf_sb_reg_x550a, }; static const struct ixgbe_mac_operations mac_ops_x550em_a_fw = { X550_COMMON_MAC .led_on = ixgbe_led_on_generic, .led_off = ixgbe_led_off_generic, .init_led_link_act = ixgbe_init_led_link_act_generic, .reset_hw = ixgbe_reset_hw_X550em, .get_media_type = ixgbe_get_media_type_X550em, .get_san_mac_addr = NULL, .get_wwn_prefix = NULL, .setup_link = NULL, /* defined later */ .get_link_capabilities = ixgbe_get_link_capabilities_X550em, .get_bus_info = ixgbe_get_bus_info_X550em, .setup_sfp = ixgbe_setup_sfp_modules_X550em, .acquire_swfw_sync = ixgbe_acquire_swfw_sync_x550em_a, .release_swfw_sync = ixgbe_release_swfw_sync_x550em_a, .setup_fc = ixgbe_setup_fc_x550em, .fc_autoneg = ixgbe_fc_autoneg, .read_iosf_sb_reg = ixgbe_read_iosf_sb_reg_x550a, .write_iosf_sb_reg = ixgbe_write_iosf_sb_reg_x550a, }; #define X550_COMMON_EEP \ .read = &ixgbe_read_ee_hostif_X550, \ .read_buffer = &ixgbe_read_ee_hostif_buffer_X550, \ .write = &ixgbe_write_ee_hostif_X550, \ .write_buffer = &ixgbe_write_ee_hostif_buffer_X550, \ .validate_checksum = &ixgbe_validate_eeprom_checksum_X550, \ .update_checksum = &ixgbe_update_eeprom_checksum_X550, \ .calc_checksum = &ixgbe_calc_eeprom_checksum_X550, \ static const struct ixgbe_eeprom_operations eeprom_ops_X550 = { X550_COMMON_EEP .init_params = &ixgbe_init_eeprom_params_X550, }; static const struct ixgbe_eeprom_operations eeprom_ops_X550EM_x = { X550_COMMON_EEP .init_params = &ixgbe_init_eeprom_params_X540, }; #define X550_COMMON_PHY \ .identify_sfp = &ixgbe_identify_module_generic, \ .reset = NULL, \ .setup_link_speed = &ixgbe_setup_phy_link_speed_generic, \ .read_i2c_byte = &ixgbe_read_i2c_byte_generic, \ .write_i2c_byte = &ixgbe_write_i2c_byte_generic, \ .read_i2c_sff8472 = &ixgbe_read_i2c_sff8472_generic, \ .read_i2c_eeprom = &ixgbe_read_i2c_eeprom_generic, \ .write_i2c_eeprom = &ixgbe_write_i2c_eeprom_generic, \ .setup_link = &ixgbe_setup_phy_link_generic, \ .set_phy_power = NULL, static const struct ixgbe_phy_operations phy_ops_X550 = { X550_COMMON_PHY .check_overtemp = &ixgbe_tn_check_overtemp, .init = NULL, .identify = &ixgbe_identify_phy_generic, .read_reg = &ixgbe_read_phy_reg_generic, .write_reg = &ixgbe_write_phy_reg_generic, }; static const struct ixgbe_phy_operations phy_ops_X550EM_x = { X550_COMMON_PHY .check_overtemp = &ixgbe_tn_check_overtemp, .init = &ixgbe_init_phy_ops_X550em, .identify = &ixgbe_identify_phy_x550em, .read_reg = &ixgbe_read_phy_reg_generic, .write_reg = &ixgbe_write_phy_reg_generic, }; static const struct ixgbe_phy_operations phy_ops_x550em_x_fw = { X550_COMMON_PHY .check_overtemp = NULL, .init = ixgbe_init_phy_ops_X550em, .identify = ixgbe_identify_phy_x550em, .read_reg = NULL, .write_reg = NULL, .read_reg_mdi = NULL, .write_reg_mdi = NULL, }; static const struct ixgbe_phy_operations phy_ops_x550em_a = { X550_COMMON_PHY .check_overtemp = &ixgbe_tn_check_overtemp, .init = &ixgbe_init_phy_ops_X550em, .identify = &ixgbe_identify_phy_x550em, .read_reg = &ixgbe_read_phy_reg_x550a, .write_reg = &ixgbe_write_phy_reg_x550a, .read_reg_mdi = &ixgbe_read_phy_reg_mdi, .write_reg_mdi = &ixgbe_write_phy_reg_mdi, }; static const struct ixgbe_phy_operations phy_ops_x550em_a_fw = { X550_COMMON_PHY .check_overtemp = ixgbe_check_overtemp_fw, .init = ixgbe_init_phy_ops_X550em, .identify = ixgbe_identify_phy_fw, .read_reg = NULL, .write_reg = NULL, .read_reg_mdi = NULL, .write_reg_mdi = NULL, }; static const struct ixgbe_link_operations link_ops_x550em_x = { .read_link = &ixgbe_read_i2c_combined_generic, .read_link_unlocked = &ixgbe_read_i2c_combined_generic_unlocked, .write_link = &ixgbe_write_i2c_combined_generic, .write_link_unlocked = &ixgbe_write_i2c_combined_generic_unlocked, }; static const u32 ixgbe_mvals_X550[IXGBE_MVALS_IDX_LIMIT] = { IXGBE_MVALS_INIT(X550) }; static const u32 ixgbe_mvals_X550EM_x[IXGBE_MVALS_IDX_LIMIT] = { IXGBE_MVALS_INIT(X550EM_x) }; static const u32 ixgbe_mvals_x550em_a[IXGBE_MVALS_IDX_LIMIT] = { IXGBE_MVALS_INIT(X550EM_a) }; const struct ixgbe_info ixgbe_X550_info = { .mac = ixgbe_mac_X550, .get_invariants = &ixgbe_get_invariants_X540, .mac_ops = &mac_ops_X550, .eeprom_ops = &eeprom_ops_X550, .phy_ops = &phy_ops_X550, .mbx_ops = &mbx_ops_generic, .mvals = ixgbe_mvals_X550, }; const struct ixgbe_info ixgbe_X550EM_x_info = { .mac = ixgbe_mac_X550EM_x, .get_invariants = &ixgbe_get_invariants_X550_x, .mac_ops = &mac_ops_X550EM_x, .eeprom_ops = &eeprom_ops_X550EM_x, .phy_ops = &phy_ops_X550EM_x, .mbx_ops = &mbx_ops_generic, .mvals = ixgbe_mvals_X550EM_x, .link_ops = &link_ops_x550em_x, }; const struct ixgbe_info ixgbe_x550em_x_fw_info = { .mac = ixgbe_mac_X550EM_x, .get_invariants = ixgbe_get_invariants_X550_x_fw, .mac_ops = &mac_ops_X550EM_x_fw, .eeprom_ops = &eeprom_ops_X550EM_x, .phy_ops = &phy_ops_x550em_x_fw, .mbx_ops = &mbx_ops_generic, .mvals = ixgbe_mvals_X550EM_x, }; const struct ixgbe_info ixgbe_x550em_a_info = { .mac = ixgbe_mac_x550em_a, .get_invariants = &ixgbe_get_invariants_X550_a, .mac_ops = &mac_ops_x550em_a, .eeprom_ops = &eeprom_ops_X550EM_x, .phy_ops = &phy_ops_x550em_a, .mbx_ops = &mbx_ops_generic, .mvals = ixgbe_mvals_x550em_a, }; const struct ixgbe_info ixgbe_x550em_a_fw_info = { .mac = ixgbe_mac_x550em_a, .get_invariants = ixgbe_get_invariants_X550_a_fw, .mac_ops = &mac_ops_x550em_a_fw, .eeprom_ops = &eeprom_ops_X550EM_x, .phy_ops = &phy_ops_x550em_a_fw, .mbx_ops = &mbx_ops_generic, .mvals = ixgbe_mvals_x550em_a, };
linux-master
drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c
// SPDX-License-Identifier: GPL-2.0 /* Copyright(c) 1999 - 2018 Intel Corporation. */ #include "ixgbe.h" #include "ixgbe_sriov.h" #ifdef CONFIG_IXGBE_DCB /** * ixgbe_cache_ring_dcb_sriov - Descriptor ring to register mapping for SR-IOV * @adapter: board private structure to initialize * * Cache the descriptor ring offsets for SR-IOV to the assigned rings. It * will also try to cache the proper offsets if RSS/FCoE are enabled along * with VMDq. * **/ static bool ixgbe_cache_ring_dcb_sriov(struct ixgbe_adapter *adapter) { #ifdef IXGBE_FCOE struct ixgbe_ring_feature *fcoe = &adapter->ring_feature[RING_F_FCOE]; #endif /* IXGBE_FCOE */ struct ixgbe_ring_feature *vmdq = &adapter->ring_feature[RING_F_VMDQ]; int i; u16 reg_idx, pool; u8 tcs = adapter->hw_tcs; /* verify we have DCB queueing enabled before proceeding */ if (tcs <= 1) return false; /* verify we have VMDq enabled before proceeding */ if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)) return false; /* start at VMDq register offset for SR-IOV enabled setups */ reg_idx = vmdq->offset * __ALIGN_MASK(1, ~vmdq->mask); for (i = 0, pool = 0; i < adapter->num_rx_queues; i++, reg_idx++) { /* If we are greater than indices move to next pool */ if ((reg_idx & ~vmdq->mask) >= tcs) { pool++; reg_idx = __ALIGN_MASK(reg_idx, ~vmdq->mask); } adapter->rx_ring[i]->reg_idx = reg_idx; adapter->rx_ring[i]->netdev = pool ? NULL : adapter->netdev; } reg_idx = vmdq->offset * __ALIGN_MASK(1, ~vmdq->mask); for (i = 0; i < adapter->num_tx_queues; i++, reg_idx++) { /* If we are greater than indices move to next pool */ if ((reg_idx & ~vmdq->mask) >= tcs) reg_idx = __ALIGN_MASK(reg_idx, ~vmdq->mask); adapter->tx_ring[i]->reg_idx = reg_idx; } #ifdef IXGBE_FCOE /* nothing to do if FCoE is disabled */ if (!(adapter->flags & IXGBE_FLAG_FCOE_ENABLED)) return true; /* The work is already done if the FCoE ring is shared */ if (fcoe->offset < tcs) return true; /* The FCoE rings exist separately, we need to move their reg_idx */ if (fcoe->indices) { u16 queues_per_pool = __ALIGN_MASK(1, ~vmdq->mask); u8 fcoe_tc = ixgbe_fcoe_get_tc(adapter); reg_idx = (vmdq->offset + vmdq->indices) * queues_per_pool; for (i = fcoe->offset; i < adapter->num_rx_queues; i++) { reg_idx = __ALIGN_MASK(reg_idx, ~vmdq->mask) + fcoe_tc; adapter->rx_ring[i]->reg_idx = reg_idx; adapter->rx_ring[i]->netdev = adapter->netdev; reg_idx++; } reg_idx = (vmdq->offset + vmdq->indices) * queues_per_pool; for (i = fcoe->offset; i < adapter->num_tx_queues; i++) { reg_idx = __ALIGN_MASK(reg_idx, ~vmdq->mask) + fcoe_tc; adapter->tx_ring[i]->reg_idx = reg_idx; reg_idx++; } } #endif /* IXGBE_FCOE */ return true; } /* ixgbe_get_first_reg_idx - Return first register index associated with ring */ static void ixgbe_get_first_reg_idx(struct ixgbe_adapter *adapter, u8 tc, unsigned int *tx, unsigned int *rx) { struct ixgbe_hw *hw = &adapter->hw; u8 num_tcs = adapter->hw_tcs; *tx = 0; *rx = 0; switch (hw->mac.type) { case ixgbe_mac_82598EB: /* TxQs/TC: 4 RxQs/TC: 8 */ *tx = tc << 2; /* 0, 4, 8, 12, 16, 20, 24, 28 */ *rx = tc << 3; /* 0, 8, 16, 24, 32, 40, 48, 56 */ break; case ixgbe_mac_82599EB: case ixgbe_mac_X540: case ixgbe_mac_X550: case ixgbe_mac_X550EM_x: case ixgbe_mac_x550em_a: if (num_tcs > 4) { /* * TCs : TC0/1 TC2/3 TC4-7 * TxQs/TC: 32 16 8 * RxQs/TC: 16 16 16 */ *rx = tc << 4; if (tc < 3) *tx = tc << 5; /* 0, 32, 64 */ else if (tc < 5) *tx = (tc + 2) << 4; /* 80, 96 */ else *tx = (tc + 8) << 3; /* 104, 112, 120 */ } else { /* * TCs : TC0 TC1 TC2/3 * TxQs/TC: 64 32 16 * RxQs/TC: 32 32 32 */ *rx = tc << 5; if (tc < 2) *tx = tc << 6; /* 0, 64 */ else *tx = (tc + 4) << 4; /* 96, 112 */ } break; default: break; } } /** * ixgbe_cache_ring_dcb - Descriptor ring to register mapping for DCB * @adapter: board private structure to initialize * * Cache the descriptor ring offsets for DCB to the assigned rings. * **/ static bool ixgbe_cache_ring_dcb(struct ixgbe_adapter *adapter) { u8 num_tcs = adapter->hw_tcs; unsigned int tx_idx, rx_idx; int tc, offset, rss_i, i; /* verify we have DCB queueing enabled before proceeding */ if (num_tcs <= 1) return false; rss_i = adapter->ring_feature[RING_F_RSS].indices; for (tc = 0, offset = 0; tc < num_tcs; tc++, offset += rss_i) { ixgbe_get_first_reg_idx(adapter, tc, &tx_idx, &rx_idx); for (i = 0; i < rss_i; i++, tx_idx++, rx_idx++) { adapter->tx_ring[offset + i]->reg_idx = tx_idx; adapter->rx_ring[offset + i]->reg_idx = rx_idx; adapter->rx_ring[offset + i]->netdev = adapter->netdev; adapter->tx_ring[offset + i]->dcb_tc = tc; adapter->rx_ring[offset + i]->dcb_tc = tc; } } return true; } #endif /** * ixgbe_cache_ring_sriov - Descriptor ring to register mapping for sriov * @adapter: board private structure to initialize * * SR-IOV doesn't use any descriptor rings but changes the default if * no other mapping is used. * */ static bool ixgbe_cache_ring_sriov(struct ixgbe_adapter *adapter) { #ifdef IXGBE_FCOE struct ixgbe_ring_feature *fcoe = &adapter->ring_feature[RING_F_FCOE]; #endif /* IXGBE_FCOE */ struct ixgbe_ring_feature *vmdq = &adapter->ring_feature[RING_F_VMDQ]; struct ixgbe_ring_feature *rss = &adapter->ring_feature[RING_F_RSS]; u16 reg_idx, pool; int i; /* only proceed if VMDq is enabled */ if (!(adapter->flags & IXGBE_FLAG_VMDQ_ENABLED)) return false; /* start at VMDq register offset for SR-IOV enabled setups */ pool = 0; reg_idx = vmdq->offset * __ALIGN_MASK(1, ~vmdq->mask); for (i = 0; i < adapter->num_rx_queues; i++, reg_idx++) { #ifdef IXGBE_FCOE /* Allow first FCoE queue to be mapped as RSS */ if (fcoe->offset && (i > fcoe->offset)) break; #endif /* If we are greater than indices move to next pool */ if ((reg_idx & ~vmdq->mask) >= rss->indices) { pool++; reg_idx = __ALIGN_MASK(reg_idx, ~vmdq->mask); } adapter->rx_ring[i]->reg_idx = reg_idx; adapter->rx_ring[i]->netdev = pool ? NULL : adapter->netdev; } #ifdef IXGBE_FCOE /* FCoE uses a linear block of queues so just assigning 1:1 */ for (; i < adapter->num_rx_queues; i++, reg_idx++) { adapter->rx_ring[i]->reg_idx = reg_idx; adapter->rx_ring[i]->netdev = adapter->netdev; } #endif reg_idx = vmdq->offset * __ALIGN_MASK(1, ~vmdq->mask); for (i = 0; i < adapter->num_tx_queues; i++, reg_idx++) { #ifdef IXGBE_FCOE /* Allow first FCoE queue to be mapped as RSS */ if (fcoe->offset && (i > fcoe->offset)) break; #endif /* If we are greater than indices move to next pool */ if ((reg_idx & rss->mask) >= rss->indices) reg_idx = __ALIGN_MASK(reg_idx, ~vmdq->mask); adapter->tx_ring[i]->reg_idx = reg_idx; } #ifdef IXGBE_FCOE /* FCoE uses a linear block of queues so just assigning 1:1 */ for (; i < adapter->num_tx_queues; i++, reg_idx++) adapter->tx_ring[i]->reg_idx = reg_idx; #endif return true; } /** * ixgbe_cache_ring_rss - Descriptor ring to register mapping for RSS * @adapter: board private structure to initialize * * Cache the descriptor ring offsets for RSS to the assigned rings. * **/ static bool ixgbe_cache_ring_rss(struct ixgbe_adapter *adapter) { int i, reg_idx; for (i = 0; i < adapter->num_rx_queues; i++) { adapter->rx_ring[i]->reg_idx = i; adapter->rx_ring[i]->netdev = adapter->netdev; } for (i = 0, reg_idx = 0; i < adapter->num_tx_queues; i++, reg_idx++) adapter->tx_ring[i]->reg_idx = reg_idx; for (i = 0; i < adapter->num_xdp_queues; i++, reg_idx++) adapter->xdp_ring[i]->reg_idx = reg_idx; return true; } /** * ixgbe_cache_ring_register - Descriptor ring to register mapping * @adapter: board private structure to initialize * * Once we know the feature-set enabled for the device, we'll cache * the register offset the descriptor ring is assigned to. * * Note, the order the various feature calls is important. It must start with * the "most" features enabled at the same time, then trickle down to the * least amount of features turned on at once. **/ static void ixgbe_cache_ring_register(struct ixgbe_adapter *adapter) { /* start with default case */ adapter->rx_ring[0]->reg_idx = 0; adapter->tx_ring[0]->reg_idx = 0; #ifdef CONFIG_IXGBE_DCB if (ixgbe_cache_ring_dcb_sriov(adapter)) return; if (ixgbe_cache_ring_dcb(adapter)) return; #endif if (ixgbe_cache_ring_sriov(adapter)) return; ixgbe_cache_ring_rss(adapter); } static int ixgbe_xdp_queues(struct ixgbe_adapter *adapter) { int queues; queues = min_t(int, IXGBE_MAX_XDP_QS, nr_cpu_ids); return adapter->xdp_prog ? queues : 0; } #define IXGBE_RSS_64Q_MASK 0x3F #define IXGBE_RSS_16Q_MASK 0xF #define IXGBE_RSS_8Q_MASK 0x7 #define IXGBE_RSS_4Q_MASK 0x3 #define IXGBE_RSS_2Q_MASK 0x1 #define IXGBE_RSS_DISABLED_MASK 0x0 #ifdef CONFIG_IXGBE_DCB /** * ixgbe_set_dcb_sriov_queues: Allocate queues for SR-IOV devices w/ DCB * @adapter: board private structure to initialize * * When SR-IOV (Single Root IO Virtualiztion) is enabled, allocate queues * and VM pools where appropriate. Also assign queues based on DCB * priorities and map accordingly.. * **/ static bool ixgbe_set_dcb_sriov_queues(struct ixgbe_adapter *adapter) { int i; u16 vmdq_i = adapter->ring_feature[RING_F_VMDQ].limit; u16 vmdq_m = 0; #ifdef IXGBE_FCOE u16 fcoe_i = 0; #endif u8 tcs = adapter->hw_tcs; /* verify we have DCB queueing enabled before proceeding */ if (tcs <= 1) return false; /* verify we have VMDq enabled before proceeding */ if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)) return false; /* limit VMDq instances on the PF by number of Tx queues */ vmdq_i = min_t(u16, vmdq_i, MAX_TX_QUEUES / tcs); /* Add starting offset to total pool count */ vmdq_i += adapter->ring_feature[RING_F_VMDQ].offset; /* 16 pools w/ 8 TC per pool */ if (tcs > 4) { vmdq_i = min_t(u16, vmdq_i, 16); vmdq_m = IXGBE_82599_VMDQ_8Q_MASK; /* 32 pools w/ 4 TC per pool */ } else { vmdq_i = min_t(u16, vmdq_i, 32); vmdq_m = IXGBE_82599_VMDQ_4Q_MASK; } #ifdef IXGBE_FCOE /* queues in the remaining pools are available for FCoE */ fcoe_i = (128 / __ALIGN_MASK(1, ~vmdq_m)) - vmdq_i; #endif /* remove the starting offset from the pool count */ vmdq_i -= adapter->ring_feature[RING_F_VMDQ].offset; /* save features for later use */ adapter->ring_feature[RING_F_VMDQ].indices = vmdq_i; adapter->ring_feature[RING_F_VMDQ].mask = vmdq_m; /* * We do not support DCB, VMDq, and RSS all simultaneously * so we will disable RSS since it is the lowest priority */ adapter->ring_feature[RING_F_RSS].indices = 1; adapter->ring_feature[RING_F_RSS].mask = IXGBE_RSS_DISABLED_MASK; /* disable ATR as it is not supported when VMDq is enabled */ adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE; adapter->num_rx_pools = vmdq_i; adapter->num_rx_queues_per_pool = tcs; adapter->num_tx_queues = vmdq_i * tcs; adapter->num_xdp_queues = 0; adapter->num_rx_queues = vmdq_i * tcs; #ifdef IXGBE_FCOE if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) { struct ixgbe_ring_feature *fcoe; fcoe = &adapter->ring_feature[RING_F_FCOE]; /* limit ourselves based on feature limits */ fcoe_i = min_t(u16, fcoe_i, fcoe->limit); if (fcoe_i) { /* alloc queues for FCoE separately */ fcoe->indices = fcoe_i; fcoe->offset = vmdq_i * tcs; /* add queues to adapter */ adapter->num_tx_queues += fcoe_i; adapter->num_rx_queues += fcoe_i; } else if (tcs > 1) { /* use queue belonging to FcoE TC */ fcoe->indices = 1; fcoe->offset = ixgbe_fcoe_get_tc(adapter); } else { adapter->flags &= ~IXGBE_FLAG_FCOE_ENABLED; fcoe->indices = 0; fcoe->offset = 0; } } #endif /* IXGBE_FCOE */ /* configure TC to queue mapping */ for (i = 0; i < tcs; i++) netdev_set_tc_queue(adapter->netdev, i, 1, i); return true; } static bool ixgbe_set_dcb_queues(struct ixgbe_adapter *adapter) { struct net_device *dev = adapter->netdev; struct ixgbe_ring_feature *f; int rss_i, rss_m, i; int tcs; /* Map queue offset and counts onto allocated tx queues */ tcs = adapter->hw_tcs; /* verify we have DCB queueing enabled before proceeding */ if (tcs <= 1) return false; /* determine the upper limit for our current DCB mode */ rss_i = dev->num_tx_queues / tcs; if (adapter->hw.mac.type == ixgbe_mac_82598EB) { /* 8 TC w/ 4 queues per TC */ rss_i = min_t(u16, rss_i, 4); rss_m = IXGBE_RSS_4Q_MASK; } else if (tcs > 4) { /* 8 TC w/ 8 queues per TC */ rss_i = min_t(u16, rss_i, 8); rss_m = IXGBE_RSS_8Q_MASK; } else { /* 4 TC w/ 16 queues per TC */ rss_i = min_t(u16, rss_i, 16); rss_m = IXGBE_RSS_16Q_MASK; } /* set RSS mask and indices */ f = &adapter->ring_feature[RING_F_RSS]; rss_i = min_t(int, rss_i, f->limit); f->indices = rss_i; f->mask = rss_m; /* disable ATR as it is not supported when multiple TCs are enabled */ adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE; #ifdef IXGBE_FCOE /* FCoE enabled queues require special configuration indexed * by feature specific indices and offset. Here we map FCoE * indices onto the DCB queue pairs allowing FCoE to own * configuration later. */ if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) { u8 tc = ixgbe_fcoe_get_tc(adapter); f = &adapter->ring_feature[RING_F_FCOE]; f->indices = min_t(u16, rss_i, f->limit); f->offset = rss_i * tc; } #endif /* IXGBE_FCOE */ for (i = 0; i < tcs; i++) netdev_set_tc_queue(dev, i, rss_i, rss_i * i); adapter->num_tx_queues = rss_i * tcs; adapter->num_xdp_queues = 0; adapter->num_rx_queues = rss_i * tcs; return true; } #endif /** * ixgbe_set_sriov_queues - Allocate queues for SR-IOV devices * @adapter: board private structure to initialize * * When SR-IOV (Single Root IO Virtualiztion) is enabled, allocate queues * and VM pools where appropriate. If RSS is available, then also try and * enable RSS and map accordingly. * **/ static bool ixgbe_set_sriov_queues(struct ixgbe_adapter *adapter) { u16 vmdq_i = adapter->ring_feature[RING_F_VMDQ].limit; u16 vmdq_m = 0; u16 rss_i = adapter->ring_feature[RING_F_RSS].limit; u16 rss_m = IXGBE_RSS_DISABLED_MASK; #ifdef IXGBE_FCOE u16 fcoe_i = 0; #endif /* only proceed if SR-IOV is enabled */ if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)) return false; /* limit l2fwd RSS based on total Tx queue limit */ rss_i = min_t(u16, rss_i, MAX_TX_QUEUES / vmdq_i); /* Add starting offset to total pool count */ vmdq_i += adapter->ring_feature[RING_F_VMDQ].offset; /* double check we are limited to maximum pools */ vmdq_i = min_t(u16, IXGBE_MAX_VMDQ_INDICES, vmdq_i); /* 64 pool mode with 2 queues per pool */ if (vmdq_i > 32) { vmdq_m = IXGBE_82599_VMDQ_2Q_MASK; rss_m = IXGBE_RSS_2Q_MASK; rss_i = min_t(u16, rss_i, 2); /* 32 pool mode with up to 4 queues per pool */ } else { vmdq_m = IXGBE_82599_VMDQ_4Q_MASK; rss_m = IXGBE_RSS_4Q_MASK; /* We can support 4, 2, or 1 queues */ rss_i = (rss_i > 3) ? 4 : (rss_i > 1) ? 2 : 1; } #ifdef IXGBE_FCOE /* queues in the remaining pools are available for FCoE */ fcoe_i = 128 - (vmdq_i * __ALIGN_MASK(1, ~vmdq_m)); #endif /* remove the starting offset from the pool count */ vmdq_i -= adapter->ring_feature[RING_F_VMDQ].offset; /* save features for later use */ adapter->ring_feature[RING_F_VMDQ].indices = vmdq_i; adapter->ring_feature[RING_F_VMDQ].mask = vmdq_m; /* limit RSS based on user input and save for later use */ adapter->ring_feature[RING_F_RSS].indices = rss_i; adapter->ring_feature[RING_F_RSS].mask = rss_m; adapter->num_rx_pools = vmdq_i; adapter->num_rx_queues_per_pool = rss_i; adapter->num_rx_queues = vmdq_i * rss_i; adapter->num_tx_queues = vmdq_i * rss_i; adapter->num_xdp_queues = 0; /* disable ATR as it is not supported when VMDq is enabled */ adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE; #ifdef IXGBE_FCOE /* * FCoE can use rings from adjacent buffers to allow RSS * like behavior. To account for this we need to add the * FCoE indices to the total ring count. */ if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) { struct ixgbe_ring_feature *fcoe; fcoe = &adapter->ring_feature[RING_F_FCOE]; /* limit ourselves based on feature limits */ fcoe_i = min_t(u16, fcoe_i, fcoe->limit); if (vmdq_i > 1 && fcoe_i) { /* alloc queues for FCoE separately */ fcoe->indices = fcoe_i; fcoe->offset = vmdq_i * rss_i; } else { /* merge FCoE queues with RSS queues */ fcoe_i = min_t(u16, fcoe_i + rss_i, num_online_cpus()); /* limit indices to rss_i if MSI-X is disabled */ if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED)) fcoe_i = rss_i; /* attempt to reserve some queues for just FCoE */ fcoe->indices = min_t(u16, fcoe_i, fcoe->limit); fcoe->offset = fcoe_i - fcoe->indices; fcoe_i -= rss_i; } /* add queues to adapter */ adapter->num_tx_queues += fcoe_i; adapter->num_rx_queues += fcoe_i; } #endif /* To support macvlan offload we have to use num_tc to * restrict the queues that can be used by the device. * By doing this we can avoid reporting a false number of * queues. */ if (vmdq_i > 1) netdev_set_num_tc(adapter->netdev, 1); /* populate TC0 for use by pool 0 */ netdev_set_tc_queue(adapter->netdev, 0, adapter->num_rx_queues_per_pool, 0); return true; } /** * ixgbe_set_rss_queues - Allocate queues for RSS * @adapter: board private structure to initialize * * This is our "base" multiqueue mode. RSS (Receive Side Scaling) will try * to allocate one Rx queue per CPU, and if available, one Tx queue per CPU. * **/ static bool ixgbe_set_rss_queues(struct ixgbe_adapter *adapter) { struct ixgbe_hw *hw = &adapter->hw; struct ixgbe_ring_feature *f; u16 rss_i; /* set mask for 16 queue limit of RSS */ f = &adapter->ring_feature[RING_F_RSS]; rss_i = f->limit; f->indices = rss_i; if (hw->mac.type < ixgbe_mac_X550) f->mask = IXGBE_RSS_16Q_MASK; else f->mask = IXGBE_RSS_64Q_MASK; /* disable ATR by default, it will be configured below */ adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE; /* * Use Flow Director in addition to RSS to ensure the best * distribution of flows across cores, even when an FDIR flow * isn't matched. */ if (rss_i > 1 && adapter->atr_sample_rate) { f = &adapter->ring_feature[RING_F_FDIR]; rss_i = f->indices = f->limit; if (!(adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)) adapter->flags |= IXGBE_FLAG_FDIR_HASH_CAPABLE; } #ifdef IXGBE_FCOE /* * FCoE can exist on the same rings as standard network traffic * however it is preferred to avoid that if possible. In order * to get the best performance we allocate as many FCoE queues * as we can and we place them at the end of the ring array to * avoid sharing queues with standard RSS on systems with 24 or * more CPUs. */ if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) { struct net_device *dev = adapter->netdev; u16 fcoe_i; f = &adapter->ring_feature[RING_F_FCOE]; /* merge FCoE queues with RSS queues */ fcoe_i = min_t(u16, f->limit + rss_i, num_online_cpus()); fcoe_i = min_t(u16, fcoe_i, dev->num_tx_queues); /* limit indices to rss_i if MSI-X is disabled */ if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED)) fcoe_i = rss_i; /* attempt to reserve some queues for just FCoE */ f->indices = min_t(u16, fcoe_i, f->limit); f->offset = fcoe_i - f->indices; rss_i = max_t(u16, fcoe_i, rss_i); } #endif /* IXGBE_FCOE */ adapter->num_rx_queues = rss_i; adapter->num_tx_queues = rss_i; adapter->num_xdp_queues = ixgbe_xdp_queues(adapter); return true; } /** * ixgbe_set_num_queues - Allocate queues for device, feature dependent * @adapter: board private structure to initialize * * This is the top level queue allocation routine. The order here is very * important, starting with the "most" number of features turned on at once, * and ending with the smallest set of features. This way large combinations * can be allocated if they're turned on, and smaller combinations are the * fallthrough conditions. * **/ static void ixgbe_set_num_queues(struct ixgbe_adapter *adapter) { /* Start with base case */ adapter->num_rx_queues = 1; adapter->num_tx_queues = 1; adapter->num_xdp_queues = 0; adapter->num_rx_pools = 1; adapter->num_rx_queues_per_pool = 1; #ifdef CONFIG_IXGBE_DCB if (ixgbe_set_dcb_sriov_queues(adapter)) return; if (ixgbe_set_dcb_queues(adapter)) return; #endif if (ixgbe_set_sriov_queues(adapter)) return; ixgbe_set_rss_queues(adapter); } /** * ixgbe_acquire_msix_vectors - acquire MSI-X vectors * @adapter: board private structure * * Attempts to acquire a suitable range of MSI-X vector interrupts. Will * return a negative error code if unable to acquire MSI-X vectors for any * reason. */ static int ixgbe_acquire_msix_vectors(struct ixgbe_adapter *adapter) { struct ixgbe_hw *hw = &adapter->hw; int i, vectors, vector_threshold; /* We start by asking for one vector per queue pair with XDP queues * being stacked with TX queues. */ vectors = max(adapter->num_rx_queues, adapter->num_tx_queues); vectors = max(vectors, adapter->num_xdp_queues); /* It is easy to be greedy for MSI-X vectors. However, it really * doesn't do much good if we have a lot more vectors than CPUs. We'll * be somewhat conservative and only ask for (roughly) the same number * of vectors as there are CPUs. */ vectors = min_t(int, vectors, num_online_cpus()); /* Some vectors are necessary for non-queue interrupts */ vectors += NON_Q_VECTORS; /* Hardware can only support a maximum of hw.mac->max_msix_vectors. * With features such as RSS and VMDq, we can easily surpass the * number of Rx and Tx descriptor queues supported by our device. * Thus, we cap the maximum in the rare cases where the CPU count also * exceeds our vector limit */ vectors = min_t(int, vectors, hw->mac.max_msix_vectors); /* We want a minimum of two MSI-X vectors for (1) a TxQ[0] + RxQ[0] * handler, and (2) an Other (Link Status Change, etc.) handler. */ vector_threshold = MIN_MSIX_COUNT; adapter->msix_entries = kcalloc(vectors, sizeof(struct msix_entry), GFP_KERNEL); if (!adapter->msix_entries) return -ENOMEM; for (i = 0; i < vectors; i++) adapter->msix_entries[i].entry = i; vectors = pci_enable_msix_range(adapter->pdev, adapter->msix_entries, vector_threshold, vectors); if (vectors < 0) { /* A negative count of allocated vectors indicates an error in * acquiring within the specified range of MSI-X vectors */ e_dev_warn("Failed to allocate MSI-X interrupts. Err: %d\n", vectors); adapter->flags &= ~IXGBE_FLAG_MSIX_ENABLED; kfree(adapter->msix_entries); adapter->msix_entries = NULL; return vectors; } /* we successfully allocated some number of vectors within our * requested range. */ adapter->flags |= IXGBE_FLAG_MSIX_ENABLED; /* Adjust for only the vectors we'll use, which is minimum * of max_q_vectors, or the number of vectors we were allocated. */ vectors -= NON_Q_VECTORS; adapter->num_q_vectors = min_t(int, vectors, adapter->max_q_vectors); return 0; } static void ixgbe_add_ring(struct ixgbe_ring *ring, struct ixgbe_ring_container *head) { ring->next = head->ring; head->ring = ring; head->count++; head->next_update = jiffies + 1; } /** * ixgbe_alloc_q_vector - Allocate memory for a single interrupt vector * @adapter: board private structure to initialize * @v_count: q_vectors allocated on adapter, used for ring interleaving * @v_idx: index of vector in adapter struct * @txr_count: total number of Tx rings to allocate * @txr_idx: index of first Tx ring to allocate * @xdp_count: total number of XDP rings to allocate * @xdp_idx: index of first XDP ring to allocate * @rxr_count: total number of Rx rings to allocate * @rxr_idx: index of first Rx ring to allocate * * We allocate one q_vector. If allocation fails we return -ENOMEM. **/ static int ixgbe_alloc_q_vector(struct ixgbe_adapter *adapter, int v_count, int v_idx, int txr_count, int txr_idx, int xdp_count, int xdp_idx, int rxr_count, int rxr_idx) { int node = dev_to_node(&adapter->pdev->dev); struct ixgbe_q_vector *q_vector; struct ixgbe_ring *ring; int cpu = -1; int ring_count; u8 tcs = adapter->hw_tcs; ring_count = txr_count + rxr_count + xdp_count; /* customize cpu for Flow Director mapping */ if ((tcs <= 1) && !(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)) { u16 rss_i = adapter->ring_feature[RING_F_RSS].indices; if (rss_i > 1 && adapter->atr_sample_rate) { cpu = cpumask_local_spread(v_idx, node); node = cpu_to_node(cpu); } } /* allocate q_vector and rings */ q_vector = kzalloc_node(struct_size(q_vector, ring, ring_count), GFP_KERNEL, node); if (!q_vector) q_vector = kzalloc(struct_size(q_vector, ring, ring_count), GFP_KERNEL); if (!q_vector) return -ENOMEM; /* setup affinity mask and node */ if (cpu != -1) cpumask_set_cpu(cpu, &q_vector->affinity_mask); q_vector->numa_node = node; #ifdef CONFIG_IXGBE_DCA /* initialize CPU for DCA */ q_vector->cpu = -1; #endif /* initialize NAPI */ netif_napi_add(adapter->netdev, &q_vector->napi, ixgbe_poll); /* tie q_vector and adapter together */ adapter->q_vector[v_idx] = q_vector; q_vector->adapter = adapter; q_vector->v_idx = v_idx; /* initialize work limits */ q_vector->tx.work_limit = adapter->tx_work_limit; /* Initialize setting for adaptive ITR */ q_vector->tx.itr = IXGBE_ITR_ADAPTIVE_MAX_USECS | IXGBE_ITR_ADAPTIVE_LATENCY; q_vector->rx.itr = IXGBE_ITR_ADAPTIVE_MAX_USECS | IXGBE_ITR_ADAPTIVE_LATENCY; /* intialize ITR */ if (txr_count && !rxr_count) { /* tx only vector */ if (adapter->tx_itr_setting == 1) q_vector->itr = IXGBE_12K_ITR; else q_vector->itr = adapter->tx_itr_setting; } else { /* rx or rx/tx vector */ if (adapter->rx_itr_setting == 1) q_vector->itr = IXGBE_20K_ITR; else q_vector->itr = adapter->rx_itr_setting; } /* initialize pointer to rings */ ring = q_vector->ring; while (txr_count) { /* assign generic ring traits */ ring->dev = &adapter->pdev->dev; ring->netdev = adapter->netdev; /* configure backlink on ring */ ring->q_vector = q_vector; /* update q_vector Tx values */ ixgbe_add_ring(ring, &q_vector->tx); /* apply Tx specific ring traits */ ring->count = adapter->tx_ring_count; ring->queue_index = txr_idx; /* assign ring to adapter */ WRITE_ONCE(adapter->tx_ring[txr_idx], ring); /* update count and index */ txr_count--; txr_idx += v_count; /* push pointer to next ring */ ring++; } while (xdp_count) { /* assign generic ring traits */ ring->dev = &adapter->pdev->dev; ring->netdev = adapter->netdev; /* configure backlink on ring */ ring->q_vector = q_vector; /* update q_vector Tx values */ ixgbe_add_ring(ring, &q_vector->tx); /* apply Tx specific ring traits */ ring->count = adapter->tx_ring_count; ring->queue_index = xdp_idx; set_ring_xdp(ring); spin_lock_init(&ring->tx_lock); /* assign ring to adapter */ WRITE_ONCE(adapter->xdp_ring[xdp_idx], ring); /* update count and index */ xdp_count--; xdp_idx++; /* push pointer to next ring */ ring++; } while (rxr_count) { /* assign generic ring traits */ ring->dev = &adapter->pdev->dev; ring->netdev = adapter->netdev; /* configure backlink on ring */ ring->q_vector = q_vector; /* update q_vector Rx values */ ixgbe_add_ring(ring, &q_vector->rx); /* * 82599 errata, UDP frames with a 0 checksum * can be marked as checksum errors. */ if (adapter->hw.mac.type == ixgbe_mac_82599EB) set_bit(__IXGBE_RX_CSUM_UDP_ZERO_ERR, &ring->state); #ifdef IXGBE_FCOE if (adapter->netdev->features & NETIF_F_FCOE_MTU) { struct ixgbe_ring_feature *f; f = &adapter->ring_feature[RING_F_FCOE]; if ((rxr_idx >= f->offset) && (rxr_idx < f->offset + f->indices)) set_bit(__IXGBE_RX_FCOE, &ring->state); } #endif /* IXGBE_FCOE */ /* apply Rx specific ring traits */ ring->count = adapter->rx_ring_count; ring->queue_index = rxr_idx; /* assign ring to adapter */ WRITE_ONCE(adapter->rx_ring[rxr_idx], ring); /* update count and index */ rxr_count--; rxr_idx += v_count; /* push pointer to next ring */ ring++; } return 0; } /** * ixgbe_free_q_vector - Free memory allocated for specific interrupt vector * @adapter: board private structure to initialize * @v_idx: Index of vector to be freed * * This function frees the memory allocated to the q_vector. In addition if * NAPI is enabled it will delete any references to the NAPI struct prior * to freeing the q_vector. **/ static void ixgbe_free_q_vector(struct ixgbe_adapter *adapter, int v_idx) { struct ixgbe_q_vector *q_vector = adapter->q_vector[v_idx]; struct ixgbe_ring *ring; ixgbe_for_each_ring(ring, q_vector->tx) { if (ring_is_xdp(ring)) WRITE_ONCE(adapter->xdp_ring[ring->queue_index], NULL); else WRITE_ONCE(adapter->tx_ring[ring->queue_index], NULL); } ixgbe_for_each_ring(ring, q_vector->rx) WRITE_ONCE(adapter->rx_ring[ring->queue_index], NULL); adapter->q_vector[v_idx] = NULL; __netif_napi_del(&q_vector->napi); /* * after a call to __netif_napi_del() napi may still be used and * ixgbe_get_stats64() might access the rings on this vector, * we must wait a grace period before freeing it. */ kfree_rcu(q_vector, rcu); } /** * ixgbe_alloc_q_vectors - Allocate memory for interrupt vectors * @adapter: board private structure to initialize * * We allocate one q_vector per queue interrupt. If allocation fails we * return -ENOMEM. **/ static int ixgbe_alloc_q_vectors(struct ixgbe_adapter *adapter) { int q_vectors = adapter->num_q_vectors; int rxr_remaining = adapter->num_rx_queues; int txr_remaining = adapter->num_tx_queues; int xdp_remaining = adapter->num_xdp_queues; int rxr_idx = 0, txr_idx = 0, xdp_idx = 0, v_idx = 0; int err, i; /* only one q_vector if MSI-X is disabled. */ if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED)) q_vectors = 1; if (q_vectors >= (rxr_remaining + txr_remaining + xdp_remaining)) { for (; rxr_remaining; v_idx++) { err = ixgbe_alloc_q_vector(adapter, q_vectors, v_idx, 0, 0, 0, 0, 1, rxr_idx); if (err) goto err_out; /* update counts and index */ rxr_remaining--; rxr_idx++; } } for (; v_idx < q_vectors; v_idx++) { int rqpv = DIV_ROUND_UP(rxr_remaining, q_vectors - v_idx); int tqpv = DIV_ROUND_UP(txr_remaining, q_vectors - v_idx); int xqpv = DIV_ROUND_UP(xdp_remaining, q_vectors - v_idx); err = ixgbe_alloc_q_vector(adapter, q_vectors, v_idx, tqpv, txr_idx, xqpv, xdp_idx, rqpv, rxr_idx); if (err) goto err_out; /* update counts and index */ rxr_remaining -= rqpv; txr_remaining -= tqpv; xdp_remaining -= xqpv; rxr_idx++; txr_idx++; xdp_idx += xqpv; } for (i = 0; i < adapter->num_rx_queues; i++) { if (adapter->rx_ring[i]) adapter->rx_ring[i]->ring_idx = i; } for (i = 0; i < adapter->num_tx_queues; i++) { if (adapter->tx_ring[i]) adapter->tx_ring[i]->ring_idx = i; } for (i = 0; i < adapter->num_xdp_queues; i++) { if (adapter->xdp_ring[i]) adapter->xdp_ring[i]->ring_idx = i; } return 0; err_out: adapter->num_tx_queues = 0; adapter->num_xdp_queues = 0; adapter->num_rx_queues = 0; adapter->num_q_vectors = 0; while (v_idx--) ixgbe_free_q_vector(adapter, v_idx); return -ENOMEM; } /** * ixgbe_free_q_vectors - Free memory allocated for interrupt vectors * @adapter: board private structure to initialize * * This function frees the memory allocated to the q_vectors. In addition if * NAPI is enabled it will delete any references to the NAPI struct prior * to freeing the q_vector. **/ static void ixgbe_free_q_vectors(struct ixgbe_adapter *adapter) { int v_idx = adapter->num_q_vectors; adapter->num_tx_queues = 0; adapter->num_xdp_queues = 0; adapter->num_rx_queues = 0; adapter->num_q_vectors = 0; while (v_idx--) ixgbe_free_q_vector(adapter, v_idx); } static void ixgbe_reset_interrupt_capability(struct ixgbe_adapter *adapter) { if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) { adapter->flags &= ~IXGBE_FLAG_MSIX_ENABLED; pci_disable_msix(adapter->pdev); kfree(adapter->msix_entries); adapter->msix_entries = NULL; } else if (adapter->flags & IXGBE_FLAG_MSI_ENABLED) { adapter->flags &= ~IXGBE_FLAG_MSI_ENABLED; pci_disable_msi(adapter->pdev); } } /** * ixgbe_set_interrupt_capability - set MSI-X or MSI if supported * @adapter: board private structure to initialize * * Attempt to configure the interrupts using the best available * capabilities of the hardware and the kernel. **/ static void ixgbe_set_interrupt_capability(struct ixgbe_adapter *adapter) { int err; /* We will try to get MSI-X interrupts first */ if (!ixgbe_acquire_msix_vectors(adapter)) return; /* At this point, we do not have MSI-X capabilities. We need to * reconfigure or disable various features which require MSI-X * capability. */ /* Disable DCB unless we only have a single traffic class */ if (adapter->hw_tcs > 1) { e_dev_warn("Number of DCB TCs exceeds number of available queues. Disabling DCB support.\n"); netdev_reset_tc(adapter->netdev); if (adapter->hw.mac.type == ixgbe_mac_82598EB) adapter->hw.fc.requested_mode = adapter->last_lfc_mode; adapter->flags &= ~IXGBE_FLAG_DCB_ENABLED; adapter->temp_dcb_cfg.pfc_mode_enable = false; adapter->dcb_cfg.pfc_mode_enable = false; } adapter->hw_tcs = 0; adapter->dcb_cfg.num_tcs.pg_tcs = 1; adapter->dcb_cfg.num_tcs.pfc_tcs = 1; /* Disable SR-IOV support */ e_dev_warn("Disabling SR-IOV support\n"); ixgbe_disable_sriov(adapter); /* Disable RSS */ e_dev_warn("Disabling RSS support\n"); adapter->ring_feature[RING_F_RSS].limit = 1; /* recalculate number of queues now that many features have been * changed or disabled. */ ixgbe_set_num_queues(adapter); adapter->num_q_vectors = 1; err = pci_enable_msi(adapter->pdev); if (err) e_dev_warn("Failed to allocate MSI interrupt, falling back to legacy. Error: %d\n", err); else adapter->flags |= IXGBE_FLAG_MSI_ENABLED; } /** * ixgbe_init_interrupt_scheme - Determine proper interrupt scheme * @adapter: board private structure to initialize * * We determine which interrupt scheme to use based on... * - Kernel support (MSI, MSI-X) * - which can be user-defined (via MODULE_PARAM) * - Hardware queue count (num_*_queues) * - defined by miscellaneous hardware support/features (RSS, etc.) **/ int ixgbe_init_interrupt_scheme(struct ixgbe_adapter *adapter) { int err; /* Number of supported queues */ ixgbe_set_num_queues(adapter); /* Set interrupt mode */ ixgbe_set_interrupt_capability(adapter); err = ixgbe_alloc_q_vectors(adapter); if (err) { e_dev_err("Unable to allocate memory for queue vectors\n"); goto err_alloc_q_vectors; } ixgbe_cache_ring_register(adapter); e_dev_info("Multiqueue %s: Rx Queue count = %u, Tx Queue count = %u XDP Queue count = %u\n", (adapter->num_rx_queues > 1) ? "Enabled" : "Disabled", adapter->num_rx_queues, adapter->num_tx_queues, adapter->num_xdp_queues); set_bit(__IXGBE_DOWN, &adapter->state); return 0; err_alloc_q_vectors: ixgbe_reset_interrupt_capability(adapter); return err; } /** * ixgbe_clear_interrupt_scheme - Clear the current interrupt scheme settings * @adapter: board private structure to clear interrupt scheme on * * We go through and clear interrupt specific resources and reset the structure * to pre-load conditions **/ void ixgbe_clear_interrupt_scheme(struct ixgbe_adapter *adapter) { adapter->num_tx_queues = 0; adapter->num_xdp_queues = 0; adapter->num_rx_queues = 0; ixgbe_free_q_vectors(adapter); ixgbe_reset_interrupt_capability(adapter); } void ixgbe_tx_ctxtdesc(struct ixgbe_ring *tx_ring, u32 vlan_macip_lens, u32 fceof_saidx, u32 type_tucmd, u32 mss_l4len_idx) { struct ixgbe_adv_tx_context_desc *context_desc; u16 i = tx_ring->next_to_use; context_desc = IXGBE_TX_CTXTDESC(tx_ring, i); i++; tx_ring->next_to_use = (i < tx_ring->count) ? i : 0; /* set bits to identify this as an advanced context descriptor */ type_tucmd |= IXGBE_TXD_CMD_DEXT | IXGBE_ADVTXD_DTYP_CTXT; context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens); context_desc->fceof_saidx = cpu_to_le32(fceof_saidx); context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd); context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx); }
linux-master
drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
// SPDX-License-Identifier: GPL-2.0 /* Copyright(c) 1999 - 2018 Intel Corporation. */ #include <linux/types.h> #include <linux/module.h> #include <linux/pci.h> #include <linux/netdevice.h> #include <linux/vmalloc.h> #include <linux/string.h> #include <linux/in.h> #include <linux/ip.h> #include <linux/tcp.h> #include <linux/ipv6.h> #include <linux/if_bridge.h> #ifdef NETIF_F_HW_VLAN_CTAG_TX #include <linux/if_vlan.h> #endif #include "ixgbe.h" #include "ixgbe_type.h" #include "ixgbe_sriov.h" #ifdef CONFIG_PCI_IOV static inline void ixgbe_alloc_vf_macvlans(struct ixgbe_adapter *adapter, unsigned int num_vfs) { struct ixgbe_hw *hw = &adapter->hw; struct vf_macvlans *mv_list; int num_vf_macvlans, i; num_vf_macvlans = hw->mac.num_rar_entries - (IXGBE_MAX_PF_MACVLANS + 1 + num_vfs); if (!num_vf_macvlans) return; mv_list = kcalloc(num_vf_macvlans, sizeof(struct vf_macvlans), GFP_KERNEL); if (mv_list) { /* Initialize list of VF macvlans */ INIT_LIST_HEAD(&adapter->vf_mvs.l); for (i = 0; i < num_vf_macvlans; i++) { mv_list[i].vf = -1; mv_list[i].free = true; list_add(&mv_list[i].l, &adapter->vf_mvs.l); } adapter->mv_list = mv_list; } } static int __ixgbe_enable_sriov(struct ixgbe_adapter *adapter, unsigned int num_vfs) { struct ixgbe_hw *hw = &adapter->hw; int i; if (adapter->xdp_prog) { e_warn(probe, "SRIOV is not supported with XDP\n"); return -EINVAL; } /* Enable VMDq flag so device will be set in VM mode */ adapter->flags |= IXGBE_FLAG_SRIOV_ENABLED | IXGBE_FLAG_VMDQ_ENABLED; /* Allocate memory for per VF control structures */ adapter->vfinfo = kcalloc(num_vfs, sizeof(struct vf_data_storage), GFP_KERNEL); if (!adapter->vfinfo) return -ENOMEM; adapter->num_vfs = num_vfs; ixgbe_alloc_vf_macvlans(adapter, num_vfs); adapter->ring_feature[RING_F_VMDQ].offset = num_vfs; /* Initialize default switching mode VEB */ IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, IXGBE_PFDTXGSWC_VT_LBEN); adapter->bridge_mode = BRIDGE_MODE_VEB; /* limit traffic classes based on VFs enabled */ if ((adapter->hw.mac.type == ixgbe_mac_82599EB) && (num_vfs < 16)) { adapter->dcb_cfg.num_tcs.pg_tcs = MAX_TRAFFIC_CLASS; adapter->dcb_cfg.num_tcs.pfc_tcs = MAX_TRAFFIC_CLASS; } else if (num_vfs < 32) { adapter->dcb_cfg.num_tcs.pg_tcs = 4; adapter->dcb_cfg.num_tcs.pfc_tcs = 4; } else { adapter->dcb_cfg.num_tcs.pg_tcs = 1; adapter->dcb_cfg.num_tcs.pfc_tcs = 1; } /* Disable RSC when in SR-IOV mode */ adapter->flags2 &= ~(IXGBE_FLAG2_RSC_CAPABLE | IXGBE_FLAG2_RSC_ENABLED); for (i = 0; i < num_vfs; i++) { /* enable spoof checking for all VFs */ adapter->vfinfo[i].spoofchk_enabled = true; adapter->vfinfo[i].link_enable = true; /* We support VF RSS querying only for 82599 and x540 * devices at the moment. These devices share RSS * indirection table and RSS hash key with PF therefore * we want to disable the querying by default. */ adapter->vfinfo[i].rss_query_enabled = false; /* Untrust all VFs */ adapter->vfinfo[i].trusted = false; /* set the default xcast mode */ adapter->vfinfo[i].xcast_mode = IXGBEVF_XCAST_MODE_NONE; } e_info(probe, "SR-IOV enabled with %d VFs\n", num_vfs); return 0; } /** * ixgbe_get_vfs - Find and take references to all vf devices * @adapter: Pointer to adapter struct */ static void ixgbe_get_vfs(struct ixgbe_adapter *adapter) { struct pci_dev *pdev = adapter->pdev; u16 vendor = pdev->vendor; struct pci_dev *vfdev; int vf = 0; u16 vf_id; int pos; pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV); if (!pos) return; pci_read_config_word(pdev, pos + PCI_SRIOV_VF_DID, &vf_id); vfdev = pci_get_device(vendor, vf_id, NULL); for (; vfdev; vfdev = pci_get_device(vendor, vf_id, vfdev)) { if (!vfdev->is_virtfn) continue; if (vfdev->physfn != pdev) continue; if (vf >= adapter->num_vfs) continue; pci_dev_get(vfdev); adapter->vfinfo[vf].vfdev = vfdev; ++vf; } } /* Note this function is called when the user wants to enable SR-IOV * VFs using the now deprecated module parameter */ void ixgbe_enable_sriov(struct ixgbe_adapter *adapter, unsigned int max_vfs) { int pre_existing_vfs = 0; unsigned int num_vfs; pre_existing_vfs = pci_num_vf(adapter->pdev); if (!pre_existing_vfs && !max_vfs) return; /* If there are pre-existing VFs then we have to force * use of that many - over ride any module parameter value. * This may result from the user unloading the PF driver * while VFs were assigned to guest VMs or because the VFs * have been created via the new PCI SR-IOV sysfs interface. */ if (pre_existing_vfs) { num_vfs = pre_existing_vfs; dev_warn(&adapter->pdev->dev, "Virtual Functions already enabled for this device - Please reload all VF drivers to avoid spoofed packet errors\n"); } else { int err; /* * The 82599 supports up to 64 VFs per physical function * but this implementation limits allocation to 63 so that * basic networking resources are still available to the * physical function. If the user requests greater than * 63 VFs then it is an error - reset to default of zero. */ num_vfs = min_t(unsigned int, max_vfs, IXGBE_MAX_VFS_DRV_LIMIT); err = pci_enable_sriov(adapter->pdev, num_vfs); if (err) { e_err(probe, "Failed to enable PCI sriov: %d\n", err); return; } } if (!__ixgbe_enable_sriov(adapter, num_vfs)) { ixgbe_get_vfs(adapter); return; } /* If we have gotten to this point then there is no memory available * to manage the VF devices - print message and bail. */ e_err(probe, "Unable to allocate memory for VF Data Storage - " "SRIOV disabled\n"); ixgbe_disable_sriov(adapter); } #endif /* #ifdef CONFIG_PCI_IOV */ int ixgbe_disable_sriov(struct ixgbe_adapter *adapter) { unsigned int num_vfs = adapter->num_vfs, vf; unsigned long flags; int rss; spin_lock_irqsave(&adapter->vfs_lock, flags); /* set num VFs to 0 to prevent access to vfinfo */ adapter->num_vfs = 0; spin_unlock_irqrestore(&adapter->vfs_lock, flags); /* put the reference to all of the vf devices */ for (vf = 0; vf < num_vfs; ++vf) { struct pci_dev *vfdev = adapter->vfinfo[vf].vfdev; if (!vfdev) continue; adapter->vfinfo[vf].vfdev = NULL; pci_dev_put(vfdev); } /* free VF control structures */ kfree(adapter->vfinfo); adapter->vfinfo = NULL; /* free macvlan list */ kfree(adapter->mv_list); adapter->mv_list = NULL; /* if SR-IOV is already disabled then there is nothing to do */ if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)) return 0; #ifdef CONFIG_PCI_IOV /* * If our VFs are assigned we cannot shut down SR-IOV * without causing issues, so just leave the hardware * available but disabled */ if (pci_vfs_assigned(adapter->pdev)) { e_dev_warn("Unloading driver while VFs are assigned - VFs will not be deallocated\n"); return -EPERM; } /* disable iov and allow time for transactions to clear */ pci_disable_sriov(adapter->pdev); #endif /* Disable VMDq flag so device will be set in VM mode */ if (bitmap_weight(adapter->fwd_bitmask, adapter->num_rx_pools) == 1) { adapter->flags &= ~IXGBE_FLAG_VMDQ_ENABLED; adapter->flags &= ~IXGBE_FLAG_SRIOV_ENABLED; rss = min_t(int, ixgbe_max_rss_indices(adapter), num_online_cpus()); } else { rss = min_t(int, IXGBE_MAX_L2A_QUEUES, num_online_cpus()); } adapter->ring_feature[RING_F_VMDQ].offset = 0; adapter->ring_feature[RING_F_RSS].limit = rss; /* take a breather then clean up driver data */ msleep(100); return 0; } static int ixgbe_pci_sriov_enable(struct pci_dev *dev, int num_vfs) { #ifdef CONFIG_PCI_IOV struct ixgbe_adapter *adapter = pci_get_drvdata(dev); int pre_existing_vfs = pci_num_vf(dev); int err = 0, num_rx_pools, i, limit; u8 num_tc; if (pre_existing_vfs && pre_existing_vfs != num_vfs) err = ixgbe_disable_sriov(adapter); else if (pre_existing_vfs && pre_existing_vfs == num_vfs) return num_vfs; if (err) return err; /* While the SR-IOV capability structure reports total VFs to be 64, * we limit the actual number allocated as below based on two factors. * Num_TCs MAX_VFs * 1 63 * <=4 31 * >4 15 * First, we reserve some transmit/receive resources for the PF. * Second, VMDQ also uses the same pools that SR-IOV does. We need to * account for this, so that we don't accidentally allocate more VFs * than we have available pools. The PCI bus driver already checks for * other values out of range. */ num_tc = adapter->hw_tcs; num_rx_pools = bitmap_weight(adapter->fwd_bitmask, adapter->num_rx_pools); limit = (num_tc > 4) ? IXGBE_MAX_VFS_8TC : (num_tc > 1) ? IXGBE_MAX_VFS_4TC : IXGBE_MAX_VFS_1TC; if (num_vfs > (limit - num_rx_pools)) { e_dev_err("Currently configured with %d TCs, and %d offloaded macvlans. Creating more than %d VFs is not allowed\n", num_tc, num_rx_pools - 1, limit - num_rx_pools); return -EPERM; } err = __ixgbe_enable_sriov(adapter, num_vfs); if (err) return err; for (i = 0; i < num_vfs; i++) ixgbe_vf_configuration(dev, (i | 0x10000000)); /* reset before enabling SRIOV to avoid mailbox issues */ ixgbe_sriov_reinit(adapter); err = pci_enable_sriov(dev, num_vfs); if (err) { e_dev_warn("Failed to enable PCI sriov: %d\n", err); return err; } ixgbe_get_vfs(adapter); return num_vfs; #else return 0; #endif } static int ixgbe_pci_sriov_disable(struct pci_dev *dev) { struct ixgbe_adapter *adapter = pci_get_drvdata(dev); int err; #ifdef CONFIG_PCI_IOV u32 current_flags = adapter->flags; int prev_num_vf = pci_num_vf(dev); #endif err = ixgbe_disable_sriov(adapter); /* Only reinit if no error and state changed */ #ifdef CONFIG_PCI_IOV if (!err && (current_flags != adapter->flags || prev_num_vf != pci_num_vf(dev))) ixgbe_sriov_reinit(adapter); #endif return err; } int ixgbe_pci_sriov_configure(struct pci_dev *dev, int num_vfs) { if (num_vfs == 0) return ixgbe_pci_sriov_disable(dev); else return ixgbe_pci_sriov_enable(dev, num_vfs); } static int ixgbe_set_vf_multicasts(struct ixgbe_adapter *adapter, u32 *msgbuf, u32 vf) { int entries = (msgbuf[0] & IXGBE_VT_MSGINFO_MASK) >> IXGBE_VT_MSGINFO_SHIFT; u16 *hash_list = (u16 *)&msgbuf[1]; struct vf_data_storage *vfinfo = &adapter->vfinfo[vf]; struct ixgbe_hw *hw = &adapter->hw; int i; u32 vector_bit; u32 vector_reg; u32 mta_reg; u32 vmolr = IXGBE_READ_REG(hw, IXGBE_VMOLR(vf)); /* only so many hash values supported */ entries = min(entries, IXGBE_MAX_VF_MC_ENTRIES); /* * salt away the number of multi cast addresses assigned * to this VF for later use to restore when the PF multi cast * list changes */ vfinfo->num_vf_mc_hashes = entries; /* * VFs are limited to using the MTA hash table for their multicast * addresses */ for (i = 0; i < entries; i++) { vfinfo->vf_mc_hashes[i] = hash_list[i]; } for (i = 0; i < vfinfo->num_vf_mc_hashes; i++) { vector_reg = (vfinfo->vf_mc_hashes[i] >> 5) & 0x7F; vector_bit = vfinfo->vf_mc_hashes[i] & 0x1F; mta_reg = IXGBE_READ_REG(hw, IXGBE_MTA(vector_reg)); mta_reg |= BIT(vector_bit); IXGBE_WRITE_REG(hw, IXGBE_MTA(vector_reg), mta_reg); } vmolr |= IXGBE_VMOLR_ROMPE; IXGBE_WRITE_REG(hw, IXGBE_VMOLR(vf), vmolr); return 0; } #ifdef CONFIG_PCI_IOV void ixgbe_restore_vf_multicasts(struct ixgbe_adapter *adapter) { struct ixgbe_hw *hw = &adapter->hw; struct vf_data_storage *vfinfo; int i, j; u32 vector_bit; u32 vector_reg; u32 mta_reg; for (i = 0; i < adapter->num_vfs; i++) { u32 vmolr = IXGBE_READ_REG(hw, IXGBE_VMOLR(i)); vfinfo = &adapter->vfinfo[i]; for (j = 0; j < vfinfo->num_vf_mc_hashes; j++) { hw->addr_ctrl.mta_in_use++; vector_reg = (vfinfo->vf_mc_hashes[j] >> 5) & 0x7F; vector_bit = vfinfo->vf_mc_hashes[j] & 0x1F; mta_reg = IXGBE_READ_REG(hw, IXGBE_MTA(vector_reg)); mta_reg |= BIT(vector_bit); IXGBE_WRITE_REG(hw, IXGBE_MTA(vector_reg), mta_reg); } if (vfinfo->num_vf_mc_hashes) vmolr |= IXGBE_VMOLR_ROMPE; else vmolr &= ~IXGBE_VMOLR_ROMPE; IXGBE_WRITE_REG(hw, IXGBE_VMOLR(i), vmolr); } /* Restore any VF macvlans */ ixgbe_full_sync_mac_table(adapter); } #endif static int ixgbe_set_vf_vlan(struct ixgbe_adapter *adapter, int add, int vid, u32 vf) { struct ixgbe_hw *hw = &adapter->hw; int err; /* If VLAN overlaps with one the PF is currently monitoring make * sure that we are able to allocate a VLVF entry. This may be * redundant but it guarantees PF will maintain visibility to * the VLAN. */ if (add && test_bit(vid, adapter->active_vlans)) { err = hw->mac.ops.set_vfta(hw, vid, VMDQ_P(0), true, false); if (err) return err; } err = hw->mac.ops.set_vfta(hw, vid, vf, !!add, false); if (add && !err) return err; /* If we failed to add the VF VLAN or we are removing the VF VLAN * we may need to drop the PF pool bit in order to allow us to free * up the VLVF resources. */ if (test_bit(vid, adapter->active_vlans) || (adapter->flags2 & IXGBE_FLAG2_VLAN_PROMISC)) ixgbe_update_pf_promisc_vlvf(adapter, vid); return err; } static int ixgbe_set_vf_lpe(struct ixgbe_adapter *adapter, u32 max_frame, u32 vf) { struct ixgbe_hw *hw = &adapter->hw; u32 max_frs; if (max_frame < ETH_MIN_MTU || max_frame > IXGBE_MAX_JUMBO_FRAME_SIZE) { e_err(drv, "VF max_frame %d out of range\n", max_frame); return -EINVAL; } /* * For 82599EB we have to keep all PFs and VFs operating with * the same max_frame value in order to avoid sending an oversize * frame to a VF. In order to guarantee this is handled correctly * for all cases we have several special exceptions to take into * account before we can enable the VF for receive */ if (adapter->hw.mac.type == ixgbe_mac_82599EB) { struct net_device *dev = adapter->netdev; int pf_max_frame = dev->mtu + ETH_HLEN; u32 reg_offset, vf_shift, vfre; s32 err = 0; #ifdef CONFIG_FCOE if (dev->features & NETIF_F_FCOE_MTU) pf_max_frame = max_t(int, pf_max_frame, IXGBE_FCOE_JUMBO_FRAME_SIZE); #endif /* CONFIG_FCOE */ switch (adapter->vfinfo[vf].vf_api) { case ixgbe_mbox_api_11: case ixgbe_mbox_api_12: case ixgbe_mbox_api_13: case ixgbe_mbox_api_14: /* Version 1.1 supports jumbo frames on VFs if PF has * jumbo frames enabled which means legacy VFs are * disabled */ if (pf_max_frame > ETH_FRAME_LEN) break; fallthrough; default: /* If the PF or VF are running w/ jumbo frames enabled * we need to shut down the VF Rx path as we cannot * support jumbo frames on legacy VFs */ if ((pf_max_frame > ETH_FRAME_LEN) || (max_frame > (ETH_FRAME_LEN + ETH_FCS_LEN))) err = -EINVAL; break; } /* determine VF receive enable location */ vf_shift = vf % 32; reg_offset = vf / 32; /* enable or disable receive depending on error */ vfre = IXGBE_READ_REG(hw, IXGBE_VFRE(reg_offset)); if (err) vfre &= ~BIT(vf_shift); else vfre |= BIT(vf_shift); IXGBE_WRITE_REG(hw, IXGBE_VFRE(reg_offset), vfre); if (err) { e_err(drv, "VF max_frame %d out of range\n", max_frame); return err; } } /* pull current max frame size from hardware */ max_frs = IXGBE_READ_REG(hw, IXGBE_MAXFRS); max_frs &= IXGBE_MHADD_MFS_MASK; max_frs >>= IXGBE_MHADD_MFS_SHIFT; if (max_frs < max_frame) { max_frs = max_frame << IXGBE_MHADD_MFS_SHIFT; IXGBE_WRITE_REG(hw, IXGBE_MAXFRS, max_frs); } e_info(hw, "VF requests change max MTU to %d\n", max_frame); return 0; } static void ixgbe_set_vmolr(struct ixgbe_hw *hw, u32 vf, bool aupe) { u32 vmolr = IXGBE_READ_REG(hw, IXGBE_VMOLR(vf)); vmolr |= IXGBE_VMOLR_BAM; if (aupe) vmolr |= IXGBE_VMOLR_AUPE; else vmolr &= ~IXGBE_VMOLR_AUPE; IXGBE_WRITE_REG(hw, IXGBE_VMOLR(vf), vmolr); } static void ixgbe_clear_vmvir(struct ixgbe_adapter *adapter, u32 vf) { struct ixgbe_hw *hw = &adapter->hw; IXGBE_WRITE_REG(hw, IXGBE_VMVIR(vf), 0); } static void ixgbe_clear_vf_vlans(struct ixgbe_adapter *adapter, u32 vf) { struct ixgbe_hw *hw = &adapter->hw; u32 vlvfb_mask, pool_mask, i; /* create mask for VF and other pools */ pool_mask = ~BIT(VMDQ_P(0) % 32); vlvfb_mask = BIT(vf % 32); /* post increment loop, covers VLVF_ENTRIES - 1 to 0 */ for (i = IXGBE_VLVF_ENTRIES; i--;) { u32 bits[2], vlvfb, vid, vfta, vlvf; u32 word = i * 2 + vf / 32; u32 mask; vlvfb = IXGBE_READ_REG(hw, IXGBE_VLVFB(word)); /* if our bit isn't set we can skip it */ if (!(vlvfb & vlvfb_mask)) continue; /* clear our bit from vlvfb */ vlvfb ^= vlvfb_mask; /* create 64b mask to chedk to see if we should clear VLVF */ bits[word % 2] = vlvfb; bits[~word % 2] = IXGBE_READ_REG(hw, IXGBE_VLVFB(word ^ 1)); /* if other pools are present, just remove ourselves */ if (bits[(VMDQ_P(0) / 32) ^ 1] || (bits[VMDQ_P(0) / 32] & pool_mask)) goto update_vlvfb; /* if PF is present, leave VFTA */ if (bits[0] || bits[1]) goto update_vlvf; /* if we cannot determine VLAN just remove ourselves */ vlvf = IXGBE_READ_REG(hw, IXGBE_VLVF(i)); if (!vlvf) goto update_vlvfb; vid = vlvf & VLAN_VID_MASK; mask = BIT(vid % 32); /* clear bit from VFTA */ vfta = IXGBE_READ_REG(hw, IXGBE_VFTA(vid / 32)); if (vfta & mask) IXGBE_WRITE_REG(hw, IXGBE_VFTA(vid / 32), vfta ^ mask); update_vlvf: /* clear POOL selection enable */ IXGBE_WRITE_REG(hw, IXGBE_VLVF(i), 0); if (!(adapter->flags2 & IXGBE_FLAG2_VLAN_PROMISC)) vlvfb = 0; update_vlvfb: /* clear pool bits */ IXGBE_WRITE_REG(hw, IXGBE_VLVFB(word), vlvfb); } } static int ixgbe_set_vf_macvlan(struct ixgbe_adapter *adapter, int vf, int index, unsigned char *mac_addr) { struct vf_macvlans *entry; struct list_head *pos; int retval = 0; if (index <= 1) { list_for_each(pos, &adapter->vf_mvs.l) { entry = list_entry(pos, struct vf_macvlans, l); if (entry->vf == vf) { entry->vf = -1; entry->free = true; entry->is_macvlan = false; ixgbe_del_mac_filter(adapter, entry->vf_macvlan, vf); } } } /* * If index was zero then we were asked to clear the uc list * for the VF. We're done. */ if (!index) return 0; entry = NULL; list_for_each(pos, &adapter->vf_mvs.l) { entry = list_entry(pos, struct vf_macvlans, l); if (entry->free) break; } /* * If we traversed the entire list and didn't find a free entry * then we're out of space on the RAR table. Also entry may * be NULL because the original memory allocation for the list * failed, which is not fatal but does mean we can't support * VF requests for MACVLAN because we couldn't allocate * memory for the list management required. */ if (!entry || !entry->free) return -ENOSPC; retval = ixgbe_add_mac_filter(adapter, mac_addr, vf); if (retval < 0) return retval; entry->free = false; entry->is_macvlan = true; entry->vf = vf; memcpy(entry->vf_macvlan, mac_addr, ETH_ALEN); return 0; } static inline void ixgbe_vf_reset_event(struct ixgbe_adapter *adapter, u32 vf) { struct ixgbe_hw *hw = &adapter->hw; struct ixgbe_ring_feature *vmdq = &adapter->ring_feature[RING_F_VMDQ]; struct vf_data_storage *vfinfo = &adapter->vfinfo[vf]; u32 q_per_pool = __ALIGN_MASK(1, ~vmdq->mask); u8 num_tcs = adapter->hw_tcs; u32 reg_val; u32 queue; /* remove VLAN filters beloning to this VF */ ixgbe_clear_vf_vlans(adapter, vf); /* add back PF assigned VLAN or VLAN 0 */ ixgbe_set_vf_vlan(adapter, true, vfinfo->pf_vlan, vf); /* reset offloads to defaults */ ixgbe_set_vmolr(hw, vf, !vfinfo->pf_vlan); /* set outgoing tags for VFs */ if (!vfinfo->pf_vlan && !vfinfo->pf_qos && !num_tcs) { ixgbe_clear_vmvir(adapter, vf); } else { if (vfinfo->pf_qos || !num_tcs) ixgbe_set_vmvir(adapter, vfinfo->pf_vlan, vfinfo->pf_qos, vf); else ixgbe_set_vmvir(adapter, vfinfo->pf_vlan, adapter->default_up, vf); if (vfinfo->spoofchk_enabled) { hw->mac.ops.set_vlan_anti_spoofing(hw, true, vf); hw->mac.ops.set_mac_anti_spoofing(hw, true, vf); } } /* reset multicast table array for vf */ adapter->vfinfo[vf].num_vf_mc_hashes = 0; /* clear any ipsec table info */ ixgbe_ipsec_vf_clear(adapter, vf); /* Flush and reset the mta with the new values */ ixgbe_set_rx_mode(adapter->netdev); ixgbe_del_mac_filter(adapter, adapter->vfinfo[vf].vf_mac_addresses, vf); ixgbe_set_vf_macvlan(adapter, vf, 0, NULL); /* reset VF api back to unknown */ adapter->vfinfo[vf].vf_api = ixgbe_mbox_api_10; /* Restart each queue for given VF */ for (queue = 0; queue < q_per_pool; queue++) { unsigned int reg_idx = (vf * q_per_pool) + queue; reg_val = IXGBE_READ_REG(hw, IXGBE_PVFTXDCTL(reg_idx)); /* Re-enabling only configured queues */ if (reg_val) { reg_val |= IXGBE_TXDCTL_ENABLE; IXGBE_WRITE_REG(hw, IXGBE_PVFTXDCTL(reg_idx), reg_val); reg_val &= ~IXGBE_TXDCTL_ENABLE; IXGBE_WRITE_REG(hw, IXGBE_PVFTXDCTL(reg_idx), reg_val); } } IXGBE_WRITE_FLUSH(hw); } static void ixgbe_vf_clear_mbx(struct ixgbe_adapter *adapter, u32 vf) { struct ixgbe_hw *hw = &adapter->hw; u32 word; /* Clear VF's mailbox memory */ for (word = 0; word < IXGBE_VFMAILBOX_SIZE; word++) IXGBE_WRITE_REG_ARRAY(hw, IXGBE_PFMBMEM(vf), word, 0); IXGBE_WRITE_FLUSH(hw); } static int ixgbe_set_vf_mac(struct ixgbe_adapter *adapter, int vf, unsigned char *mac_addr) { s32 retval; ixgbe_del_mac_filter(adapter, adapter->vfinfo[vf].vf_mac_addresses, vf); retval = ixgbe_add_mac_filter(adapter, mac_addr, vf); if (retval >= 0) memcpy(adapter->vfinfo[vf].vf_mac_addresses, mac_addr, ETH_ALEN); else eth_zero_addr(adapter->vfinfo[vf].vf_mac_addresses); return retval; } int ixgbe_vf_configuration(struct pci_dev *pdev, unsigned int event_mask) { struct ixgbe_adapter *adapter = pci_get_drvdata(pdev); unsigned int vfn = (event_mask & 0x3f); bool enable = ((event_mask & 0x10000000U) != 0); if (enable) eth_zero_addr(adapter->vfinfo[vfn].vf_mac_addresses); return 0; } static inline void ixgbe_write_qde(struct ixgbe_adapter *adapter, u32 vf, u32 qde) { struct ixgbe_hw *hw = &adapter->hw; struct ixgbe_ring_feature *vmdq = &adapter->ring_feature[RING_F_VMDQ]; u32 q_per_pool = __ALIGN_MASK(1, ~vmdq->mask); int i; for (i = vf * q_per_pool; i < ((vf + 1) * q_per_pool); i++) { u32 reg; /* flush previous write */ IXGBE_WRITE_FLUSH(hw); /* indicate to hardware that we want to set drop enable */ reg = IXGBE_QDE_WRITE | qde; reg |= i << IXGBE_QDE_IDX_SHIFT; IXGBE_WRITE_REG(hw, IXGBE_QDE, reg); } } /** * ixgbe_set_vf_rx_tx - Set VF rx tx * @adapter: Pointer to adapter struct * @vf: VF identifier * * Set or reset correct transmit and receive for vf **/ static void ixgbe_set_vf_rx_tx(struct ixgbe_adapter *adapter, int vf) { u32 reg_cur_tx, reg_cur_rx, reg_req_tx, reg_req_rx; struct ixgbe_hw *hw = &adapter->hw; u32 reg_offset, vf_shift; vf_shift = vf % 32; reg_offset = vf / 32; reg_cur_tx = IXGBE_READ_REG(hw, IXGBE_VFTE(reg_offset)); reg_cur_rx = IXGBE_READ_REG(hw, IXGBE_VFRE(reg_offset)); if (adapter->vfinfo[vf].link_enable) { reg_req_tx = reg_cur_tx | 1 << vf_shift; reg_req_rx = reg_cur_rx | 1 << vf_shift; } else { reg_req_tx = reg_cur_tx & ~(1 << vf_shift); reg_req_rx = reg_cur_rx & ~(1 << vf_shift); } /* The 82599 cannot support a mix of jumbo and non-jumbo PF/VFs. * For more info take a look at ixgbe_set_vf_lpe */ if (adapter->hw.mac.type == ixgbe_mac_82599EB) { struct net_device *dev = adapter->netdev; int pf_max_frame = dev->mtu + ETH_HLEN; #if IS_ENABLED(CONFIG_FCOE) if (dev->features & NETIF_F_FCOE_MTU) pf_max_frame = max_t(int, pf_max_frame, IXGBE_FCOE_JUMBO_FRAME_SIZE); #endif /* CONFIG_FCOE */ if (pf_max_frame > ETH_FRAME_LEN) reg_req_rx = reg_cur_rx & ~(1 << vf_shift); } /* Enable/Disable particular VF */ if (reg_cur_tx != reg_req_tx) IXGBE_WRITE_REG(hw, IXGBE_VFTE(reg_offset), reg_req_tx); if (reg_cur_rx != reg_req_rx) IXGBE_WRITE_REG(hw, IXGBE_VFRE(reg_offset), reg_req_rx); } static int ixgbe_vf_reset_msg(struct ixgbe_adapter *adapter, u32 vf) { struct ixgbe_ring_feature *vmdq = &adapter->ring_feature[RING_F_VMDQ]; struct ixgbe_hw *hw = &adapter->hw; unsigned char *vf_mac = adapter->vfinfo[vf].vf_mac_addresses; u32 reg, reg_offset, vf_shift; u32 msgbuf[4] = {0, 0, 0, 0}; u8 *addr = (u8 *)(&msgbuf[1]); u32 q_per_pool = __ALIGN_MASK(1, ~vmdq->mask); int i; e_info(probe, "VF Reset msg received from vf %d\n", vf); /* reset the filters for the device */ ixgbe_vf_reset_event(adapter, vf); ixgbe_vf_clear_mbx(adapter, vf); /* set vf mac address */ if (!is_zero_ether_addr(vf_mac)) ixgbe_set_vf_mac(adapter, vf, vf_mac); vf_shift = vf % 32; reg_offset = vf / 32; /* force drop enable for all VF Rx queues */ reg = IXGBE_QDE_ENABLE; if (adapter->vfinfo[vf].pf_vlan) reg |= IXGBE_QDE_HIDE_VLAN; ixgbe_write_qde(adapter, vf, reg); ixgbe_set_vf_rx_tx(adapter, vf); /* enable VF mailbox for further messages */ adapter->vfinfo[vf].clear_to_send = true; /* Enable counting of spoofed packets in the SSVPC register */ reg = IXGBE_READ_REG(hw, IXGBE_VMECM(reg_offset)); reg |= BIT(vf_shift); IXGBE_WRITE_REG(hw, IXGBE_VMECM(reg_offset), reg); /* * Reset the VFs TDWBAL and TDWBAH registers * which are not cleared by an FLR */ for (i = 0; i < q_per_pool; i++) { IXGBE_WRITE_REG(hw, IXGBE_PVFTDWBAHn(q_per_pool, vf, i), 0); IXGBE_WRITE_REG(hw, IXGBE_PVFTDWBALn(q_per_pool, vf, i), 0); } /* reply to reset with ack and vf mac address */ msgbuf[0] = IXGBE_VF_RESET; if (!is_zero_ether_addr(vf_mac) && adapter->vfinfo[vf].pf_set_mac) { msgbuf[0] |= IXGBE_VT_MSGTYPE_ACK; memcpy(addr, vf_mac, ETH_ALEN); } else { msgbuf[0] |= IXGBE_VT_MSGTYPE_NACK; } /* * Piggyback the multicast filter type so VF can compute the * correct vectors */ msgbuf[3] = hw->mac.mc_filter_type; ixgbe_write_mbx(hw, msgbuf, IXGBE_VF_PERMADDR_MSG_LEN, vf); return 0; } static int ixgbe_set_vf_mac_addr(struct ixgbe_adapter *adapter, u32 *msgbuf, u32 vf) { u8 *new_mac = ((u8 *)(&msgbuf[1])); if (!is_valid_ether_addr(new_mac)) { e_warn(drv, "VF %d attempted to set invalid mac\n", vf); return -1; } if (adapter->vfinfo[vf].pf_set_mac && !adapter->vfinfo[vf].trusted && !ether_addr_equal(adapter->vfinfo[vf].vf_mac_addresses, new_mac)) { e_warn(drv, "VF %d attempted to override administratively set MAC address\n" "Reload the VF driver to resume operations\n", vf); return -1; } return ixgbe_set_vf_mac(adapter, vf, new_mac) < 0; } static int ixgbe_set_vf_vlan_msg(struct ixgbe_adapter *adapter, u32 *msgbuf, u32 vf) { u32 add = (msgbuf[0] & IXGBE_VT_MSGINFO_MASK) >> IXGBE_VT_MSGINFO_SHIFT; u32 vid = (msgbuf[1] & IXGBE_VLVF_VLANID_MASK); u8 tcs = adapter->hw_tcs; if (adapter->vfinfo[vf].pf_vlan || tcs) { e_warn(drv, "VF %d attempted to override administratively set VLAN configuration\n" "Reload the VF driver to resume operations\n", vf); return -1; } /* VLAN 0 is a special case, don't allow it to be removed */ if (!vid && !add) return 0; return ixgbe_set_vf_vlan(adapter, add, vid, vf); } static int ixgbe_set_vf_macvlan_msg(struct ixgbe_adapter *adapter, u32 *msgbuf, u32 vf) { u8 *new_mac = ((u8 *)(&msgbuf[1])); int index = (msgbuf[0] & IXGBE_VT_MSGINFO_MASK) >> IXGBE_VT_MSGINFO_SHIFT; int err; if (adapter->vfinfo[vf].pf_set_mac && !adapter->vfinfo[vf].trusted && index > 0) { e_warn(drv, "VF %d requested MACVLAN filter but is administratively denied\n", vf); return -1; } /* An non-zero index indicates the VF is setting a filter */ if (index) { if (!is_valid_ether_addr(new_mac)) { e_warn(drv, "VF %d attempted to set invalid mac\n", vf); return -1; } /* * If the VF is allowed to set MAC filters then turn off * anti-spoofing to avoid false positives. */ if (adapter->vfinfo[vf].spoofchk_enabled) { struct ixgbe_hw *hw = &adapter->hw; hw->mac.ops.set_mac_anti_spoofing(hw, false, vf); hw->mac.ops.set_vlan_anti_spoofing(hw, false, vf); } } err = ixgbe_set_vf_macvlan(adapter, vf, index, new_mac); if (err == -ENOSPC) e_warn(drv, "VF %d has requested a MACVLAN filter but there is no space for it\n", vf); return err < 0; } static int ixgbe_negotiate_vf_api(struct ixgbe_adapter *adapter, u32 *msgbuf, u32 vf) { int api = msgbuf[1]; switch (api) { case ixgbe_mbox_api_10: case ixgbe_mbox_api_11: case ixgbe_mbox_api_12: case ixgbe_mbox_api_13: case ixgbe_mbox_api_14: adapter->vfinfo[vf].vf_api = api; return 0; default: break; } e_info(drv, "VF %d requested invalid api version %u\n", vf, api); return -1; } static int ixgbe_get_vf_queues(struct ixgbe_adapter *adapter, u32 *msgbuf, u32 vf) { struct net_device *dev = adapter->netdev; struct ixgbe_ring_feature *vmdq = &adapter->ring_feature[RING_F_VMDQ]; unsigned int default_tc = 0; u8 num_tcs = adapter->hw_tcs; /* verify the PF is supporting the correct APIs */ switch (adapter->vfinfo[vf].vf_api) { case ixgbe_mbox_api_20: case ixgbe_mbox_api_11: case ixgbe_mbox_api_12: case ixgbe_mbox_api_13: case ixgbe_mbox_api_14: break; default: return -1; } /* only allow 1 Tx queue for bandwidth limiting */ msgbuf[IXGBE_VF_TX_QUEUES] = __ALIGN_MASK(1, ~vmdq->mask); msgbuf[IXGBE_VF_RX_QUEUES] = __ALIGN_MASK(1, ~vmdq->mask); /* if TCs > 1 determine which TC belongs to default user priority */ if (num_tcs > 1) default_tc = netdev_get_prio_tc_map(dev, adapter->default_up); /* notify VF of need for VLAN tag stripping, and correct queue */ if (num_tcs) msgbuf[IXGBE_VF_TRANS_VLAN] = num_tcs; else if (adapter->vfinfo[vf].pf_vlan || adapter->vfinfo[vf].pf_qos) msgbuf[IXGBE_VF_TRANS_VLAN] = 1; else msgbuf[IXGBE_VF_TRANS_VLAN] = 0; /* notify VF of default queue */ msgbuf[IXGBE_VF_DEF_QUEUE] = default_tc; return 0; } static int ixgbe_get_vf_reta(struct ixgbe_adapter *adapter, u32 *msgbuf, u32 vf) { u32 i, j; u32 *out_buf = &msgbuf[1]; const u8 *reta = adapter->rss_indir_tbl; u32 reta_size = ixgbe_rss_indir_tbl_entries(adapter); /* Check if operation is permitted */ if (!adapter->vfinfo[vf].rss_query_enabled) return -EPERM; /* verify the PF is supporting the correct API */ switch (adapter->vfinfo[vf].vf_api) { case ixgbe_mbox_api_14: case ixgbe_mbox_api_13: case ixgbe_mbox_api_12: break; default: return -EOPNOTSUPP; } /* This mailbox command is supported (required) only for 82599 and x540 * VFs which support up to 4 RSS queues. Therefore we will compress the * RETA by saving only 2 bits from each entry. This way we will be able * to transfer the whole RETA in a single mailbox operation. */ for (i = 0; i < reta_size / 16; i++) { out_buf[i] = 0; for (j = 0; j < 16; j++) out_buf[i] |= (u32)(reta[16 * i + j] & 0x3) << (2 * j); } return 0; } static int ixgbe_get_vf_rss_key(struct ixgbe_adapter *adapter, u32 *msgbuf, u32 vf) { u32 *rss_key = &msgbuf[1]; /* Check if the operation is permitted */ if (!adapter->vfinfo[vf].rss_query_enabled) return -EPERM; /* verify the PF is supporting the correct API */ switch (adapter->vfinfo[vf].vf_api) { case ixgbe_mbox_api_14: case ixgbe_mbox_api_13: case ixgbe_mbox_api_12: break; default: return -EOPNOTSUPP; } memcpy(rss_key, adapter->rss_key, IXGBE_RSS_KEY_SIZE); return 0; } static int ixgbe_update_vf_xcast_mode(struct ixgbe_adapter *adapter, u32 *msgbuf, u32 vf) { struct ixgbe_hw *hw = &adapter->hw; int xcast_mode = msgbuf[1]; u32 vmolr, fctrl, disable, enable; /* verify the PF is supporting the correct APIs */ switch (adapter->vfinfo[vf].vf_api) { case ixgbe_mbox_api_12: /* promisc introduced in 1.3 version */ if (xcast_mode == IXGBEVF_XCAST_MODE_PROMISC) return -EOPNOTSUPP; fallthrough; case ixgbe_mbox_api_13: case ixgbe_mbox_api_14: break; default: return -EOPNOTSUPP; } if (xcast_mode > IXGBEVF_XCAST_MODE_MULTI && !adapter->vfinfo[vf].trusted) { xcast_mode = IXGBEVF_XCAST_MODE_MULTI; } if (adapter->vfinfo[vf].xcast_mode == xcast_mode) goto out; switch (xcast_mode) { case IXGBEVF_XCAST_MODE_NONE: disable = IXGBE_VMOLR_ROMPE | IXGBE_VMOLR_MPE | IXGBE_VMOLR_UPE | IXGBE_VMOLR_VPE; enable = IXGBE_VMOLR_BAM; break; case IXGBEVF_XCAST_MODE_MULTI: disable = IXGBE_VMOLR_MPE | IXGBE_VMOLR_UPE | IXGBE_VMOLR_VPE; enable = IXGBE_VMOLR_BAM | IXGBE_VMOLR_ROMPE; break; case IXGBEVF_XCAST_MODE_ALLMULTI: disable = IXGBE_VMOLR_UPE | IXGBE_VMOLR_VPE; enable = IXGBE_VMOLR_BAM | IXGBE_VMOLR_ROMPE | IXGBE_VMOLR_MPE; break; case IXGBEVF_XCAST_MODE_PROMISC: if (hw->mac.type <= ixgbe_mac_82599EB) return -EOPNOTSUPP; fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL); if (!(fctrl & IXGBE_FCTRL_UPE)) { /* VF promisc requires PF in promisc */ e_warn(drv, "Enabling VF promisc requires PF in promisc\n"); return -EPERM; } disable = IXGBE_VMOLR_VPE; enable = IXGBE_VMOLR_BAM | IXGBE_VMOLR_ROMPE | IXGBE_VMOLR_MPE | IXGBE_VMOLR_UPE; break; default: return -EOPNOTSUPP; } vmolr = IXGBE_READ_REG(hw, IXGBE_VMOLR(vf)); vmolr &= ~disable; vmolr |= enable; IXGBE_WRITE_REG(hw, IXGBE_VMOLR(vf), vmolr); adapter->vfinfo[vf].xcast_mode = xcast_mode; out: msgbuf[1] = xcast_mode; return 0; } static int ixgbe_get_vf_link_state(struct ixgbe_adapter *adapter, u32 *msgbuf, u32 vf) { u32 *link_state = &msgbuf[1]; /* verify the PF is supporting the correct API */ switch (adapter->vfinfo[vf].vf_api) { case ixgbe_mbox_api_12: case ixgbe_mbox_api_13: case ixgbe_mbox_api_14: break; default: return -EOPNOTSUPP; } *link_state = adapter->vfinfo[vf].link_enable; return 0; } static int ixgbe_rcv_msg_from_vf(struct ixgbe_adapter *adapter, u32 vf) { u32 mbx_size = IXGBE_VFMAILBOX_SIZE; u32 msgbuf[IXGBE_VFMAILBOX_SIZE]; struct ixgbe_hw *hw = &adapter->hw; s32 retval; retval = ixgbe_read_mbx(hw, msgbuf, mbx_size, vf); if (retval) { pr_err("Error receiving message from VF\n"); return retval; } /* this is a message we already processed, do nothing */ if (msgbuf[0] & (IXGBE_VT_MSGTYPE_ACK | IXGBE_VT_MSGTYPE_NACK)) return 0; /* flush the ack before we write any messages back */ IXGBE_WRITE_FLUSH(hw); if (msgbuf[0] == IXGBE_VF_RESET) return ixgbe_vf_reset_msg(adapter, vf); /* * until the vf completes a virtual function reset it should not be * allowed to start any configuration. */ if (!adapter->vfinfo[vf].clear_to_send) { msgbuf[0] |= IXGBE_VT_MSGTYPE_NACK; ixgbe_write_mbx(hw, msgbuf, 1, vf); return 0; } switch ((msgbuf[0] & 0xFFFF)) { case IXGBE_VF_SET_MAC_ADDR: retval = ixgbe_set_vf_mac_addr(adapter, msgbuf, vf); break; case IXGBE_VF_SET_MULTICAST: retval = ixgbe_set_vf_multicasts(adapter, msgbuf, vf); break; case IXGBE_VF_SET_VLAN: retval = ixgbe_set_vf_vlan_msg(adapter, msgbuf, vf); break; case IXGBE_VF_SET_LPE: retval = ixgbe_set_vf_lpe(adapter, msgbuf[1], vf); break; case IXGBE_VF_SET_MACVLAN: retval = ixgbe_set_vf_macvlan_msg(adapter, msgbuf, vf); break; case IXGBE_VF_API_NEGOTIATE: retval = ixgbe_negotiate_vf_api(adapter, msgbuf, vf); break; case IXGBE_VF_GET_QUEUES: retval = ixgbe_get_vf_queues(adapter, msgbuf, vf); break; case IXGBE_VF_GET_RETA: retval = ixgbe_get_vf_reta(adapter, msgbuf, vf); break; case IXGBE_VF_GET_RSS_KEY: retval = ixgbe_get_vf_rss_key(adapter, msgbuf, vf); break; case IXGBE_VF_UPDATE_XCAST_MODE: retval = ixgbe_update_vf_xcast_mode(adapter, msgbuf, vf); break; case IXGBE_VF_GET_LINK_STATE: retval = ixgbe_get_vf_link_state(adapter, msgbuf, vf); break; case IXGBE_VF_IPSEC_ADD: retval = ixgbe_ipsec_vf_add_sa(adapter, msgbuf, vf); break; case IXGBE_VF_IPSEC_DEL: retval = ixgbe_ipsec_vf_del_sa(adapter, msgbuf, vf); break; default: e_err(drv, "Unhandled Msg %8.8x\n", msgbuf[0]); retval = IXGBE_ERR_MBX; break; } /* notify the VF of the results of what it sent us */ if (retval) msgbuf[0] |= IXGBE_VT_MSGTYPE_NACK; else msgbuf[0] |= IXGBE_VT_MSGTYPE_ACK; msgbuf[0] |= IXGBE_VT_MSGTYPE_CTS; ixgbe_write_mbx(hw, msgbuf, mbx_size, vf); return retval; } static void ixgbe_rcv_ack_from_vf(struct ixgbe_adapter *adapter, u32 vf) { struct ixgbe_hw *hw = &adapter->hw; u32 msg = IXGBE_VT_MSGTYPE_NACK; /* if device isn't clear to send it shouldn't be reading either */ if (!adapter->vfinfo[vf].clear_to_send) ixgbe_write_mbx(hw, &msg, 1, vf); } void ixgbe_msg_task(struct ixgbe_adapter *adapter) { struct ixgbe_hw *hw = &adapter->hw; unsigned long flags; u32 vf; spin_lock_irqsave(&adapter->vfs_lock, flags); for (vf = 0; vf < adapter->num_vfs; vf++) { /* process any reset requests */ if (!ixgbe_check_for_rst(hw, vf)) ixgbe_vf_reset_event(adapter, vf); /* process any messages pending */ if (!ixgbe_check_for_msg(hw, vf)) ixgbe_rcv_msg_from_vf(adapter, vf); /* process any acks */ if (!ixgbe_check_for_ack(hw, vf)) ixgbe_rcv_ack_from_vf(adapter, vf); } spin_unlock_irqrestore(&adapter->vfs_lock, flags); } static inline void ixgbe_ping_vf(struct ixgbe_adapter *adapter, int vf) { struct ixgbe_hw *hw = &adapter->hw; u32 ping; ping = IXGBE_PF_CONTROL_MSG; if (adapter->vfinfo[vf].clear_to_send) ping |= IXGBE_VT_MSGTYPE_CTS; ixgbe_write_mbx(hw, &ping, 1, vf); } void ixgbe_ping_all_vfs(struct ixgbe_adapter *adapter) { struct ixgbe_hw *hw = &adapter->hw; u32 ping; int i; for (i = 0 ; i < adapter->num_vfs; i++) { ping = IXGBE_PF_CONTROL_MSG; if (adapter->vfinfo[i].clear_to_send) ping |= IXGBE_VT_MSGTYPE_CTS; ixgbe_write_mbx(hw, &ping, 1, i); } } /** * ixgbe_set_all_vfs - update vfs queues * @adapter: Pointer to adapter struct * * Update setting transmit and receive queues for all vfs **/ void ixgbe_set_all_vfs(struct ixgbe_adapter *adapter) { int i; for (i = 0 ; i < adapter->num_vfs; i++) ixgbe_set_vf_link_state(adapter, i, adapter->vfinfo[i].link_state); } int ixgbe_ndo_set_vf_mac(struct net_device *netdev, int vf, u8 *mac) { struct ixgbe_adapter *adapter = netdev_priv(netdev); s32 retval; if (vf >= adapter->num_vfs) return -EINVAL; if (is_valid_ether_addr(mac)) { dev_info(&adapter->pdev->dev, "setting MAC %pM on VF %d\n", mac, vf); dev_info(&adapter->pdev->dev, "Reload the VF driver to make this change effective."); retval = ixgbe_set_vf_mac(adapter, vf, mac); if (retval >= 0) { adapter->vfinfo[vf].pf_set_mac = true; if (test_bit(__IXGBE_DOWN, &adapter->state)) { dev_warn(&adapter->pdev->dev, "The VF MAC address has been set, but the PF device is not up.\n"); dev_warn(&adapter->pdev->dev, "Bring the PF device up before attempting to use the VF device.\n"); } } else { dev_warn(&adapter->pdev->dev, "The VF MAC address was NOT set due to invalid or duplicate MAC address.\n"); } } else if (is_zero_ether_addr(mac)) { unsigned char *vf_mac_addr = adapter->vfinfo[vf].vf_mac_addresses; /* nothing to do */ if (is_zero_ether_addr(vf_mac_addr)) return 0; dev_info(&adapter->pdev->dev, "removing MAC on VF %d\n", vf); retval = ixgbe_del_mac_filter(adapter, vf_mac_addr, vf); if (retval >= 0) { adapter->vfinfo[vf].pf_set_mac = false; memcpy(vf_mac_addr, mac, ETH_ALEN); } else { dev_warn(&adapter->pdev->dev, "Could NOT remove the VF MAC address.\n"); } } else { retval = -EINVAL; } return retval; } static int ixgbe_enable_port_vlan(struct ixgbe_adapter *adapter, int vf, u16 vlan, u8 qos) { struct ixgbe_hw *hw = &adapter->hw; int err; err = ixgbe_set_vf_vlan(adapter, true, vlan, vf); if (err) goto out; /* Revoke tagless access via VLAN 0 */ ixgbe_set_vf_vlan(adapter, false, 0, vf); ixgbe_set_vmvir(adapter, vlan, qos, vf); ixgbe_set_vmolr(hw, vf, false); /* enable hide vlan on X550 */ if (hw->mac.type >= ixgbe_mac_X550) ixgbe_write_qde(adapter, vf, IXGBE_QDE_ENABLE | IXGBE_QDE_HIDE_VLAN); adapter->vfinfo[vf].pf_vlan = vlan; adapter->vfinfo[vf].pf_qos = qos; dev_info(&adapter->pdev->dev, "Setting VLAN %d, QOS 0x%x on VF %d\n", vlan, qos, vf); if (test_bit(__IXGBE_DOWN, &adapter->state)) { dev_warn(&adapter->pdev->dev, "The VF VLAN has been set, but the PF device is not up.\n"); dev_warn(&adapter->pdev->dev, "Bring the PF device up before attempting to use the VF device.\n"); } out: return err; } static int ixgbe_disable_port_vlan(struct ixgbe_adapter *adapter, int vf) { struct ixgbe_hw *hw = &adapter->hw; int err; err = ixgbe_set_vf_vlan(adapter, false, adapter->vfinfo[vf].pf_vlan, vf); /* Restore tagless access via VLAN 0 */ ixgbe_set_vf_vlan(adapter, true, 0, vf); ixgbe_clear_vmvir(adapter, vf); ixgbe_set_vmolr(hw, vf, true); /* disable hide VLAN on X550 */ if (hw->mac.type >= ixgbe_mac_X550) ixgbe_write_qde(adapter, vf, IXGBE_QDE_ENABLE); adapter->vfinfo[vf].pf_vlan = 0; adapter->vfinfo[vf].pf_qos = 0; return err; } int ixgbe_ndo_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos, __be16 vlan_proto) { int err = 0; struct ixgbe_adapter *adapter = netdev_priv(netdev); if ((vf >= adapter->num_vfs) || (vlan > 4095) || (qos > 7)) return -EINVAL; if (vlan_proto != htons(ETH_P_8021Q)) return -EPROTONOSUPPORT; if (vlan || qos) { /* Check if there is already a port VLAN set, if so * we have to delete the old one first before we * can set the new one. The usage model had * previously assumed the user would delete the * old port VLAN before setting a new one but this * is not necessarily the case. */ if (adapter->vfinfo[vf].pf_vlan) err = ixgbe_disable_port_vlan(adapter, vf); if (err) goto out; err = ixgbe_enable_port_vlan(adapter, vf, vlan, qos); } else { err = ixgbe_disable_port_vlan(adapter, vf); } out: return err; } int ixgbe_link_mbps(struct ixgbe_adapter *adapter) { switch (adapter->link_speed) { case IXGBE_LINK_SPEED_100_FULL: return 100; case IXGBE_LINK_SPEED_1GB_FULL: return 1000; case IXGBE_LINK_SPEED_10GB_FULL: return 10000; default: return 0; } } static void ixgbe_set_vf_rate_limit(struct ixgbe_adapter *adapter, int vf) { struct ixgbe_ring_feature *vmdq = &adapter->ring_feature[RING_F_VMDQ]; struct ixgbe_hw *hw = &adapter->hw; u32 bcnrc_val = 0; u16 queue, queues_per_pool; u16 tx_rate = adapter->vfinfo[vf].tx_rate; if (tx_rate) { /* start with base link speed value */ bcnrc_val = adapter->vf_rate_link_speed; /* Calculate the rate factor values to set */ bcnrc_val <<= IXGBE_RTTBCNRC_RF_INT_SHIFT; bcnrc_val /= tx_rate; /* clear everything but the rate factor */ bcnrc_val &= IXGBE_RTTBCNRC_RF_INT_MASK | IXGBE_RTTBCNRC_RF_DEC_MASK; /* enable the rate scheduler */ bcnrc_val |= IXGBE_RTTBCNRC_RS_ENA; } /* * Set global transmit compensation time to the MMW_SIZE in RTTBCNRM * register. Typically MMW_SIZE=0x014 if 9728-byte jumbo is supported * and 0x004 otherwise. */ switch (hw->mac.type) { case ixgbe_mac_82599EB: IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRM, 0x4); break; case ixgbe_mac_X540: IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRM, 0x14); break; default: break; } /* determine how many queues per pool based on VMDq mask */ queues_per_pool = __ALIGN_MASK(1, ~vmdq->mask); /* write value for all Tx queues belonging to VF */ for (queue = 0; queue < queues_per_pool; queue++) { unsigned int reg_idx = (vf * queues_per_pool) + queue; IXGBE_WRITE_REG(hw, IXGBE_RTTDQSEL, reg_idx); IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRC, bcnrc_val); } } void ixgbe_check_vf_rate_limit(struct ixgbe_adapter *adapter) { int i; /* VF Tx rate limit was not set */ if (!adapter->vf_rate_link_speed) return; if (ixgbe_link_mbps(adapter) != adapter->vf_rate_link_speed) { adapter->vf_rate_link_speed = 0; dev_info(&adapter->pdev->dev, "Link speed has been changed. VF Transmit rate is disabled\n"); } for (i = 0; i < adapter->num_vfs; i++) { if (!adapter->vf_rate_link_speed) adapter->vfinfo[i].tx_rate = 0; ixgbe_set_vf_rate_limit(adapter, i); } } int ixgbe_ndo_set_vf_bw(struct net_device *netdev, int vf, int min_tx_rate, int max_tx_rate) { struct ixgbe_adapter *adapter = netdev_priv(netdev); int link_speed; /* verify VF is active */ if (vf >= adapter->num_vfs) return -EINVAL; /* verify link is up */ if (!adapter->link_up) return -EINVAL; /* verify we are linked at 10Gbps */ link_speed = ixgbe_link_mbps(adapter); if (link_speed != 10000) return -EINVAL; if (min_tx_rate) return -EINVAL; /* rate limit cannot be less than 10Mbs or greater than link speed */ if (max_tx_rate && ((max_tx_rate <= 10) || (max_tx_rate > link_speed))) return -EINVAL; /* store values */ adapter->vf_rate_link_speed = link_speed; adapter->vfinfo[vf].tx_rate = max_tx_rate; /* update hardware configuration */ ixgbe_set_vf_rate_limit(adapter, vf); return 0; } int ixgbe_ndo_set_vf_spoofchk(struct net_device *netdev, int vf, bool setting) { struct ixgbe_adapter *adapter = netdev_priv(netdev); struct ixgbe_hw *hw = &adapter->hw; if (vf >= adapter->num_vfs) return -EINVAL; adapter->vfinfo[vf].spoofchk_enabled = setting; /* configure MAC spoofing */ hw->mac.ops.set_mac_anti_spoofing(hw, setting, vf); /* configure VLAN spoofing */ hw->mac.ops.set_vlan_anti_spoofing(hw, setting, vf); /* Ensure LLDP and FC is set for Ethertype Antispoofing if we will be * calling set_ethertype_anti_spoofing for each VF in loop below */ if (hw->mac.ops.set_ethertype_anti_spoofing) { IXGBE_WRITE_REG(hw, IXGBE_ETQF(IXGBE_ETQF_FILTER_LLDP), (IXGBE_ETQF_FILTER_EN | IXGBE_ETQF_TX_ANTISPOOF | ETH_P_LLDP)); IXGBE_WRITE_REG(hw, IXGBE_ETQF(IXGBE_ETQF_FILTER_FC), (IXGBE_ETQF_FILTER_EN | IXGBE_ETQF_TX_ANTISPOOF | ETH_P_PAUSE)); hw->mac.ops.set_ethertype_anti_spoofing(hw, setting, vf); } return 0; } /** * ixgbe_set_vf_link_state - Set link state * @adapter: Pointer to adapter struct * @vf: VF identifier * @state: required link state * * Set a link force state on/off a single vf **/ void ixgbe_set_vf_link_state(struct ixgbe_adapter *adapter, int vf, int state) { adapter->vfinfo[vf].link_state = state; switch (state) { case IFLA_VF_LINK_STATE_AUTO: if (test_bit(__IXGBE_DOWN, &adapter->state)) adapter->vfinfo[vf].link_enable = false; else adapter->vfinfo[vf].link_enable = true; break; case IFLA_VF_LINK_STATE_ENABLE: adapter->vfinfo[vf].link_enable = true; break; case IFLA_VF_LINK_STATE_DISABLE: adapter->vfinfo[vf].link_enable = false; break; } ixgbe_set_vf_rx_tx(adapter, vf); /* restart the VF */ adapter->vfinfo[vf].clear_to_send = false; ixgbe_ping_vf(adapter, vf); } /** * ixgbe_ndo_set_vf_link_state - Set link state * @netdev: network interface device structure * @vf: VF identifier * @state: required link state * * Set the link state of a specified VF, regardless of physical link state **/ int ixgbe_ndo_set_vf_link_state(struct net_device *netdev, int vf, int state) { struct ixgbe_adapter *adapter = netdev_priv(netdev); int ret = 0; if (vf < 0 || vf >= adapter->num_vfs) { dev_err(&adapter->pdev->dev, "NDO set VF link - invalid VF identifier %d\n", vf); return -EINVAL; } switch (state) { case IFLA_VF_LINK_STATE_ENABLE: dev_info(&adapter->pdev->dev, "NDO set VF %d link state %d - not supported\n", vf, state); break; case IFLA_VF_LINK_STATE_DISABLE: dev_info(&adapter->pdev->dev, "NDO set VF %d link state disable\n", vf); ixgbe_set_vf_link_state(adapter, vf, state); break; case IFLA_VF_LINK_STATE_AUTO: dev_info(&adapter->pdev->dev, "NDO set VF %d link state auto\n", vf); ixgbe_set_vf_link_state(adapter, vf, state); break; default: dev_err(&adapter->pdev->dev, "NDO set VF %d - invalid link state %d\n", vf, state); ret = -EINVAL; } return ret; } int ixgbe_ndo_set_vf_rss_query_en(struct net_device *netdev, int vf, bool setting) { struct ixgbe_adapter *adapter = netdev_priv(netdev); /* This operation is currently supported only for 82599 and x540 * devices. */ if (adapter->hw.mac.type < ixgbe_mac_82599EB || adapter->hw.mac.type >= ixgbe_mac_X550) return -EOPNOTSUPP; if (vf >= adapter->num_vfs) return -EINVAL; adapter->vfinfo[vf].rss_query_enabled = setting; return 0; } int ixgbe_ndo_set_vf_trust(struct net_device *netdev, int vf, bool setting) { struct ixgbe_adapter *adapter = netdev_priv(netdev); if (vf >= adapter->num_vfs) return -EINVAL; /* nothing to do */ if (adapter->vfinfo[vf].trusted == setting) return 0; adapter->vfinfo[vf].trusted = setting; /* reset VF to reconfigure features */ adapter->vfinfo[vf].clear_to_send = false; ixgbe_ping_vf(adapter, vf); e_info(drv, "VF %u is %strusted\n", vf, setting ? "" : "not "); return 0; } int ixgbe_ndo_get_vf_config(struct net_device *netdev, int vf, struct ifla_vf_info *ivi) { struct ixgbe_adapter *adapter = netdev_priv(netdev); if (vf >= adapter->num_vfs) return -EINVAL; ivi->vf = vf; memcpy(&ivi->mac, adapter->vfinfo[vf].vf_mac_addresses, ETH_ALEN); ivi->max_tx_rate = adapter->vfinfo[vf].tx_rate; ivi->min_tx_rate = 0; ivi->vlan = adapter->vfinfo[vf].pf_vlan; ivi->qos = adapter->vfinfo[vf].pf_qos; ivi->spoofchk = adapter->vfinfo[vf].spoofchk_enabled; ivi->rss_query_en = adapter->vfinfo[vf].rss_query_enabled; ivi->trusted = adapter->vfinfo[vf].trusted; return 0; }
linux-master
drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
// SPDX-License-Identifier: GPL-2.0 /* Copyright(c) 1999 - 2018 Intel Corporation. */ #include "ixgbe.h" #include <linux/if_ether.h> #include <linux/gfp.h> #include <linux/if_vlan.h> #include <generated/utsrelease.h> #include <scsi/scsi_cmnd.h> #include <scsi/scsi_device.h> #include <scsi/fc/fc_fs.h> #include <scsi/fc/fc_fcoe.h> #include <scsi/libfc.h> #include <scsi/libfcoe.h> /** * ixgbe_fcoe_clear_ddp - clear the given ddp context * @ddp: ptr to the ixgbe_fcoe_ddp * * Returns : none * */ static inline void ixgbe_fcoe_clear_ddp(struct ixgbe_fcoe_ddp *ddp) { ddp->len = 0; ddp->err = 1; ddp->udl = NULL; ddp->udp = 0UL; ddp->sgl = NULL; ddp->sgc = 0; } /** * ixgbe_fcoe_ddp_put - free the ddp context for a given xid * @netdev: the corresponding net_device * @xid: the xid that corresponding ddp will be freed * * This is the implementation of net_device_ops.ndo_fcoe_ddp_done * and it is expected to be called by ULD, i.e., FCP layer of libfc * to release the corresponding ddp context when the I/O is done. * * Returns : data length already ddp-ed in bytes */ int ixgbe_fcoe_ddp_put(struct net_device *netdev, u16 xid) { int len; struct ixgbe_fcoe *fcoe; struct ixgbe_adapter *adapter; struct ixgbe_fcoe_ddp *ddp; struct ixgbe_hw *hw; u32 fcbuff; if (!netdev) return 0; if (xid >= netdev->fcoe_ddp_xid) return 0; adapter = netdev_priv(netdev); fcoe = &adapter->fcoe; ddp = &fcoe->ddp[xid]; if (!ddp->udl) return 0; hw = &adapter->hw; len = ddp->len; /* if no error then skip ddp context invalidation */ if (!ddp->err) goto skip_ddpinv; if (hw->mac.type == ixgbe_mac_X550) { /* X550 does not require DDP FCoE lock */ IXGBE_WRITE_REG(hw, IXGBE_FCDFC(0, xid), 0); IXGBE_WRITE_REG(hw, IXGBE_FCDFC(3, xid), (xid | IXGBE_FCFLTRW_WE)); /* program FCBUFF */ IXGBE_WRITE_REG(hw, IXGBE_FCDDC(2, xid), 0); /* program FCDMARW */ IXGBE_WRITE_REG(hw, IXGBE_FCDDC(3, xid), (xid | IXGBE_FCDMARW_WE)); /* read FCBUFF to check context invalidated */ IXGBE_WRITE_REG(hw, IXGBE_FCDDC(3, xid), (xid | IXGBE_FCDMARW_RE)); fcbuff = IXGBE_READ_REG(hw, IXGBE_FCDDC(2, xid)); } else { /* other hardware requires DDP FCoE lock */ spin_lock_bh(&fcoe->lock); IXGBE_WRITE_REG(hw, IXGBE_FCFLT, 0); IXGBE_WRITE_REG(hw, IXGBE_FCFLTRW, (xid | IXGBE_FCFLTRW_WE)); IXGBE_WRITE_REG(hw, IXGBE_FCBUFF, 0); IXGBE_WRITE_REG(hw, IXGBE_FCDMARW, (xid | IXGBE_FCDMARW_WE)); /* guaranteed to be invalidated after 100us */ IXGBE_WRITE_REG(hw, IXGBE_FCDMARW, (xid | IXGBE_FCDMARW_RE)); fcbuff = IXGBE_READ_REG(hw, IXGBE_FCBUFF); spin_unlock_bh(&fcoe->lock); } if (fcbuff & IXGBE_FCBUFF_VALID) usleep_range(100, 150); skip_ddpinv: if (ddp->sgl) dma_unmap_sg(&adapter->pdev->dev, ddp->sgl, ddp->sgc, DMA_FROM_DEVICE); if (ddp->pool) { dma_pool_free(ddp->pool, ddp->udl, ddp->udp); ddp->pool = NULL; } ixgbe_fcoe_clear_ddp(ddp); return len; } /** * ixgbe_fcoe_ddp_setup - called to set up ddp context * @netdev: the corresponding net_device * @xid: the exchange id requesting ddp * @sgl: the scatter-gather list for this request * @sgc: the number of scatter-gather items * @target_mode: 1 to setup target mode, 0 to setup initiator mode * * Returns : 1 for success and 0 for no ddp */ static int ixgbe_fcoe_ddp_setup(struct net_device *netdev, u16 xid, struct scatterlist *sgl, unsigned int sgc, int target_mode) { struct ixgbe_adapter *adapter; struct ixgbe_hw *hw; struct ixgbe_fcoe *fcoe; struct ixgbe_fcoe_ddp *ddp; struct ixgbe_fcoe_ddp_pool *ddp_pool; struct scatterlist *sg; unsigned int i, j, dmacount; unsigned int len; static const unsigned int bufflen = IXGBE_FCBUFF_MIN; unsigned int firstoff = 0; unsigned int lastsize; unsigned int thisoff = 0; unsigned int thislen = 0; u32 fcbuff, fcdmarw, fcfltrw, fcrxctl; dma_addr_t addr = 0; if (!netdev || !sgl) return 0; adapter = netdev_priv(netdev); if (xid >= netdev->fcoe_ddp_xid) { e_warn(drv, "xid=0x%x out-of-range\n", xid); return 0; } /* no DDP if we are already down or resetting */ if (test_bit(__IXGBE_DOWN, &adapter->state) || test_bit(__IXGBE_RESETTING, &adapter->state)) return 0; fcoe = &adapter->fcoe; ddp = &fcoe->ddp[xid]; if (ddp->sgl) { e_err(drv, "xid 0x%x w/ non-null sgl=%p nents=%d\n", xid, ddp->sgl, ddp->sgc); return 0; } ixgbe_fcoe_clear_ddp(ddp); if (!fcoe->ddp_pool) { e_warn(drv, "No ddp_pool resources allocated\n"); return 0; } ddp_pool = per_cpu_ptr(fcoe->ddp_pool, get_cpu()); if (!ddp_pool->pool) { e_warn(drv, "xid=0x%x no ddp pool for fcoe\n", xid); goto out_noddp; } /* setup dma from scsi command sgl */ dmacount = dma_map_sg(&adapter->pdev->dev, sgl, sgc, DMA_FROM_DEVICE); if (dmacount == 0) { e_err(drv, "xid 0x%x DMA map error\n", xid); goto out_noddp; } /* alloc the udl from per cpu ddp pool */ ddp->udl = dma_pool_alloc(ddp_pool->pool, GFP_ATOMIC, &ddp->udp); if (!ddp->udl) { e_err(drv, "failed allocated ddp context\n"); goto out_noddp_unmap; } ddp->pool = ddp_pool->pool; ddp->sgl = sgl; ddp->sgc = sgc; j = 0; for_each_sg(sgl, sg, dmacount, i) { addr = sg_dma_address(sg); len = sg_dma_len(sg); while (len) { /* max number of buffers allowed in one DDP context */ if (j >= IXGBE_BUFFCNT_MAX) { ddp_pool->noddp++; goto out_noddp_free; } /* get the offset of length of current buffer */ thisoff = addr & ((dma_addr_t)bufflen - 1); thislen = min((bufflen - thisoff), len); /* * all but the 1st buffer (j == 0) * must be aligned on bufflen */ if ((j != 0) && (thisoff)) goto out_noddp_free; /* * all but the last buffer * ((i == (dmacount - 1)) && (thislen == len)) * must end at bufflen */ if (((i != (dmacount - 1)) || (thislen != len)) && ((thislen + thisoff) != bufflen)) goto out_noddp_free; ddp->udl[j] = (u64)(addr - thisoff); /* only the first buffer may have none-zero offset */ if (j == 0) firstoff = thisoff; len -= thislen; addr += thislen; j++; } } /* only the last buffer may have non-full bufflen */ lastsize = thisoff + thislen; /* * lastsize can not be buffer len. * If it is then adding another buffer with lastsize = 1. */ if (lastsize == bufflen) { if (j >= IXGBE_BUFFCNT_MAX) { ddp_pool->noddp_ext_buff++; goto out_noddp_free; } ddp->udl[j] = (u64)(fcoe->extra_ddp_buffer_dma); j++; lastsize = 1; } put_cpu(); fcbuff = (IXGBE_FCBUFF_4KB << IXGBE_FCBUFF_BUFFSIZE_SHIFT); fcbuff |= ((j & 0xff) << IXGBE_FCBUFF_BUFFCNT_SHIFT); fcbuff |= (firstoff << IXGBE_FCBUFF_OFFSET_SHIFT); /* Set WRCONTX bit to allow DDP for target */ if (target_mode) fcbuff |= (IXGBE_FCBUFF_WRCONTX); fcbuff |= (IXGBE_FCBUFF_VALID); fcdmarw = xid; fcdmarw |= IXGBE_FCDMARW_WE; fcdmarw |= (lastsize << IXGBE_FCDMARW_LASTSIZE_SHIFT); fcfltrw = xid; fcfltrw |= IXGBE_FCFLTRW_WE; /* program DMA context */ hw = &adapter->hw; /* turn on last frame indication for target mode as FCP_RSPtarget is * supposed to send FCP_RSP when it is done. */ if (target_mode && !test_bit(__IXGBE_FCOE_TARGET, &fcoe->mode)) { set_bit(__IXGBE_FCOE_TARGET, &fcoe->mode); fcrxctl = IXGBE_READ_REG(hw, IXGBE_FCRXCTRL); fcrxctl |= IXGBE_FCRXCTRL_LASTSEQH; IXGBE_WRITE_REG(hw, IXGBE_FCRXCTRL, fcrxctl); } if (hw->mac.type == ixgbe_mac_X550) { /* X550 does not require DDP lock */ IXGBE_WRITE_REG(hw, IXGBE_FCDDC(0, xid), ddp->udp & DMA_BIT_MASK(32)); IXGBE_WRITE_REG(hw, IXGBE_FCDDC(1, xid), (u64)ddp->udp >> 32); IXGBE_WRITE_REG(hw, IXGBE_FCDDC(2, xid), fcbuff); IXGBE_WRITE_REG(hw, IXGBE_FCDDC(3, xid), fcdmarw); /* program filter context */ IXGBE_WRITE_REG(hw, IXGBE_FCDFC(0, xid), IXGBE_FCFLT_VALID); IXGBE_WRITE_REG(hw, IXGBE_FCDFC(1, xid), 0); IXGBE_WRITE_REG(hw, IXGBE_FCDFC(3, xid), fcfltrw); } else { /* DDP lock for indirect DDP context access */ spin_lock_bh(&fcoe->lock); IXGBE_WRITE_REG(hw, IXGBE_FCPTRL, ddp->udp & DMA_BIT_MASK(32)); IXGBE_WRITE_REG(hw, IXGBE_FCPTRH, (u64)ddp->udp >> 32); IXGBE_WRITE_REG(hw, IXGBE_FCBUFF, fcbuff); IXGBE_WRITE_REG(hw, IXGBE_FCDMARW, fcdmarw); /* program filter context */ IXGBE_WRITE_REG(hw, IXGBE_FCPARAM, 0); IXGBE_WRITE_REG(hw, IXGBE_FCFLT, IXGBE_FCFLT_VALID); IXGBE_WRITE_REG(hw, IXGBE_FCFLTRW, fcfltrw); spin_unlock_bh(&fcoe->lock); } return 1; out_noddp_free: dma_pool_free(ddp->pool, ddp->udl, ddp->udp); ixgbe_fcoe_clear_ddp(ddp); out_noddp_unmap: dma_unmap_sg(&adapter->pdev->dev, sgl, sgc, DMA_FROM_DEVICE); out_noddp: put_cpu(); return 0; } /** * ixgbe_fcoe_ddp_get - called to set up ddp context in initiator mode * @netdev: the corresponding net_device * @xid: the exchange id requesting ddp * @sgl: the scatter-gather list for this request * @sgc: the number of scatter-gather items * * This is the implementation of net_device_ops.ndo_fcoe_ddp_setup * and is expected to be called from ULD, e.g., FCP layer of libfc * to set up ddp for the corresponding xid of the given sglist for * the corresponding I/O. * * Returns : 1 for success and 0 for no ddp */ int ixgbe_fcoe_ddp_get(struct net_device *netdev, u16 xid, struct scatterlist *sgl, unsigned int sgc) { return ixgbe_fcoe_ddp_setup(netdev, xid, sgl, sgc, 0); } /** * ixgbe_fcoe_ddp_target - called to set up ddp context in target mode * @netdev: the corresponding net_device * @xid: the exchange id requesting ddp * @sgl: the scatter-gather list for this request * @sgc: the number of scatter-gather items * * This is the implementation of net_device_ops.ndo_fcoe_ddp_target * and is expected to be called from ULD, e.g., FCP layer of libfc * to set up ddp for the corresponding xid of the given sglist for * the corresponding I/O. The DDP in target mode is a write I/O request * from the initiator. * * Returns : 1 for success and 0 for no ddp */ int ixgbe_fcoe_ddp_target(struct net_device *netdev, u16 xid, struct scatterlist *sgl, unsigned int sgc) { return ixgbe_fcoe_ddp_setup(netdev, xid, sgl, sgc, 1); } /** * ixgbe_fcoe_ddp - check ddp status and mark it done * @adapter: ixgbe adapter * @rx_desc: advanced rx descriptor * @skb: the skb holding the received data * * This checks ddp status. * * Returns : < 0 indicates an error or not a FCiE ddp, 0 indicates * not passing the skb to ULD, > 0 indicates is the length of data * being ddped. */ int ixgbe_fcoe_ddp(struct ixgbe_adapter *adapter, union ixgbe_adv_rx_desc *rx_desc, struct sk_buff *skb) { int rc = -EINVAL; struct ixgbe_fcoe *fcoe; struct ixgbe_fcoe_ddp *ddp; struct fc_frame_header *fh; struct fcoe_crc_eof *crc; __le32 fcerr = ixgbe_test_staterr(rx_desc, IXGBE_RXDADV_ERR_FCERR); __le32 ddp_err; int ddp_max; u32 fctl; u16 xid; if (fcerr == cpu_to_le32(IXGBE_FCERR_BADCRC)) skb->ip_summed = CHECKSUM_NONE; else skb->ip_summed = CHECKSUM_UNNECESSARY; if (eth_hdr(skb)->h_proto == htons(ETH_P_8021Q)) fh = (struct fc_frame_header *)(skb->data + sizeof(struct vlan_hdr) + sizeof(struct fcoe_hdr)); else fh = (struct fc_frame_header *)(skb->data + sizeof(struct fcoe_hdr)); fctl = ntoh24(fh->fh_f_ctl); if (fctl & FC_FC_EX_CTX) xid = be16_to_cpu(fh->fh_ox_id); else xid = be16_to_cpu(fh->fh_rx_id); ddp_max = IXGBE_FCOE_DDP_MAX; /* X550 has different DDP Max limit */ if (adapter->hw.mac.type == ixgbe_mac_X550) ddp_max = IXGBE_FCOE_DDP_MAX_X550; if (xid >= ddp_max) return -EINVAL; fcoe = &adapter->fcoe; ddp = &fcoe->ddp[xid]; if (!ddp->udl) return -EINVAL; ddp_err = ixgbe_test_staterr(rx_desc, IXGBE_RXDADV_ERR_FCEOFE | IXGBE_RXDADV_ERR_FCERR); if (ddp_err) return -EINVAL; switch (ixgbe_test_staterr(rx_desc, IXGBE_RXDADV_STAT_FCSTAT)) { /* return 0 to bypass going to ULD for DDPed data */ case cpu_to_le32(IXGBE_RXDADV_STAT_FCSTAT_DDP): /* update length of DDPed data */ ddp->len = le32_to_cpu(rx_desc->wb.lower.hi_dword.rss); rc = 0; break; /* unmap the sg list when FCPRSP is received */ case cpu_to_le32(IXGBE_RXDADV_STAT_FCSTAT_FCPRSP): dma_unmap_sg(&adapter->pdev->dev, ddp->sgl, ddp->sgc, DMA_FROM_DEVICE); ddp->err = (__force u32)ddp_err; ddp->sgl = NULL; ddp->sgc = 0; fallthrough; /* if DDP length is present pass it through to ULD */ case cpu_to_le32(IXGBE_RXDADV_STAT_FCSTAT_NODDP): /* update length of DDPed data */ ddp->len = le32_to_cpu(rx_desc->wb.lower.hi_dword.rss); if (ddp->len) rc = ddp->len; break; /* no match will return as an error */ case cpu_to_le32(IXGBE_RXDADV_STAT_FCSTAT_NOMTCH): default: break; } /* In target mode, check the last data frame of the sequence. * For DDP in target mode, data is already DDPed but the header * indication of the last data frame ould allow is to tell if we * got all the data and the ULP can send FCP_RSP back, as this is * not a full fcoe frame, we fill the trailer here so it won't be * dropped by the ULP stack. */ if ((fh->fh_r_ctl == FC_RCTL_DD_SOL_DATA) && (fctl & FC_FC_END_SEQ)) { skb_linearize(skb); crc = skb_put(skb, sizeof(*crc)); crc->fcoe_eof = FC_EOF_T; } return rc; } /** * ixgbe_fso - ixgbe FCoE Sequence Offload (FSO) * @tx_ring: tx desc ring * @first: first tx_buffer structure containing skb, tx_flags, and protocol * @hdr_len: hdr_len to be returned * * This sets up large send offload for FCoE * * Returns : 0 indicates success, < 0 for error */ int ixgbe_fso(struct ixgbe_ring *tx_ring, struct ixgbe_tx_buffer *first, u8 *hdr_len) { struct sk_buff *skb = first->skb; struct fc_frame_header *fh; u32 vlan_macip_lens; u32 fcoe_sof_eof = 0; u32 mss_l4len_idx; u32 type_tucmd = IXGBE_ADVTXT_TUCMD_FCOE; u8 sof, eof; if (skb_is_gso(skb) && (skb_shinfo(skb)->gso_type != SKB_GSO_FCOE)) { dev_err(tx_ring->dev, "Wrong gso type %d:expecting SKB_GSO_FCOE\n", skb_shinfo(skb)->gso_type); return -EINVAL; } /* resets the header to point fcoe/fc */ skb_set_network_header(skb, skb->mac_len); skb_set_transport_header(skb, skb->mac_len + sizeof(struct fcoe_hdr)); /* sets up SOF and ORIS */ sof = ((struct fcoe_hdr *)skb_network_header(skb))->fcoe_sof; switch (sof) { case FC_SOF_I2: fcoe_sof_eof = IXGBE_ADVTXD_FCOEF_ORIS; break; case FC_SOF_I3: fcoe_sof_eof = IXGBE_ADVTXD_FCOEF_SOF | IXGBE_ADVTXD_FCOEF_ORIS; break; case FC_SOF_N2: break; case FC_SOF_N3: fcoe_sof_eof = IXGBE_ADVTXD_FCOEF_SOF; break; default: dev_warn(tx_ring->dev, "unknown sof = 0x%x\n", sof); return -EINVAL; } /* the first byte of the last dword is EOF */ skb_copy_bits(skb, skb->len - 4, &eof, 1); /* sets up EOF and ORIE */ switch (eof) { case FC_EOF_N: fcoe_sof_eof |= IXGBE_ADVTXD_FCOEF_EOF_N; break; case FC_EOF_T: /* lso needs ORIE */ if (skb_is_gso(skb)) fcoe_sof_eof |= IXGBE_ADVTXD_FCOEF_EOF_N | IXGBE_ADVTXD_FCOEF_ORIE; else fcoe_sof_eof |= IXGBE_ADVTXD_FCOEF_EOF_T; break; case FC_EOF_NI: fcoe_sof_eof |= IXGBE_ADVTXD_FCOEF_EOF_NI; break; case FC_EOF_A: fcoe_sof_eof |= IXGBE_ADVTXD_FCOEF_EOF_A; break; default: dev_warn(tx_ring->dev, "unknown eof = 0x%x\n", eof); return -EINVAL; } /* sets up PARINC indicating data offset */ fh = (struct fc_frame_header *)skb_transport_header(skb); if (fh->fh_f_ctl[2] & FC_FC_REL_OFF) fcoe_sof_eof |= IXGBE_ADVTXD_FCOEF_PARINC; /* include trailer in headlen as it is replicated per frame */ *hdr_len = sizeof(struct fcoe_crc_eof); /* hdr_len includes fc_hdr if FCoE LSO is enabled */ if (skb_is_gso(skb)) { *hdr_len += skb_transport_offset(skb) + sizeof(struct fc_frame_header); /* update gso_segs and bytecount */ first->gso_segs = DIV_ROUND_UP(skb->len - *hdr_len, skb_shinfo(skb)->gso_size); first->bytecount += (first->gso_segs - 1) * *hdr_len; first->tx_flags |= IXGBE_TX_FLAGS_TSO; /* Hardware expects L4T to be RSV for FCoE TSO */ type_tucmd |= IXGBE_ADVTXD_TUCMD_L4T_RSV; } /* set flag indicating FCOE to ixgbe_tx_map call */ first->tx_flags |= IXGBE_TX_FLAGS_FCOE | IXGBE_TX_FLAGS_CC; /* mss_l4len_id: use 0 for FSO as TSO, no need for L4LEN */ mss_l4len_idx = skb_shinfo(skb)->gso_size << IXGBE_ADVTXD_MSS_SHIFT; /* vlan_macip_lens: HEADLEN, MACLEN, VLAN tag */ vlan_macip_lens = skb_transport_offset(skb) + sizeof(struct fc_frame_header); vlan_macip_lens |= (skb_transport_offset(skb) - 4) << IXGBE_ADVTXD_MACLEN_SHIFT; vlan_macip_lens |= first->tx_flags & IXGBE_TX_FLAGS_VLAN_MASK; /* write context desc */ ixgbe_tx_ctxtdesc(tx_ring, vlan_macip_lens, fcoe_sof_eof, type_tucmd, mss_l4len_idx); return 0; } static void ixgbe_fcoe_dma_pool_free(struct ixgbe_fcoe *fcoe, unsigned int cpu) { struct ixgbe_fcoe_ddp_pool *ddp_pool; ddp_pool = per_cpu_ptr(fcoe->ddp_pool, cpu); dma_pool_destroy(ddp_pool->pool); ddp_pool->pool = NULL; } static int ixgbe_fcoe_dma_pool_alloc(struct ixgbe_fcoe *fcoe, struct device *dev, unsigned int cpu) { struct ixgbe_fcoe_ddp_pool *ddp_pool; struct dma_pool *pool; char pool_name[32]; snprintf(pool_name, 32, "ixgbe_fcoe_ddp_%u", cpu); pool = dma_pool_create(pool_name, dev, IXGBE_FCPTR_MAX, IXGBE_FCPTR_ALIGN, PAGE_SIZE); if (!pool) return -ENOMEM; ddp_pool = per_cpu_ptr(fcoe->ddp_pool, cpu); ddp_pool->pool = pool; ddp_pool->noddp = 0; ddp_pool->noddp_ext_buff = 0; return 0; } /** * ixgbe_configure_fcoe - configures registers for fcoe at start * @adapter: ptr to ixgbe adapter * * This sets up FCoE related registers * * Returns : none */ void ixgbe_configure_fcoe(struct ixgbe_adapter *adapter) { struct ixgbe_ring_feature *fcoe = &adapter->ring_feature[RING_F_FCOE]; struct ixgbe_hw *hw = &adapter->hw; int i, fcoe_q, fcoe_i, fcoe_q_h = 0; int fcreta_size; u32 etqf; /* Minimal functionality for FCoE requires at least CRC offloads */ if (!(adapter->netdev->features & NETIF_F_FCOE_CRC)) return; /* Enable L2 EtherType filter for FCoE, needed for FCoE CRC and DDP */ etqf = ETH_P_FCOE | IXGBE_ETQF_FCOE | IXGBE_ETQF_FILTER_EN; if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) { etqf |= IXGBE_ETQF_POOL_ENABLE; etqf |= VMDQ_P(0) << IXGBE_ETQF_POOL_SHIFT; } IXGBE_WRITE_REG(hw, IXGBE_ETQF(IXGBE_ETQF_FILTER_FCOE), etqf); IXGBE_WRITE_REG(hw, IXGBE_ETQS(IXGBE_ETQF_FILTER_FCOE), 0); /* leave registers un-configured if FCoE is disabled */ if (!(adapter->flags & IXGBE_FLAG_FCOE_ENABLED)) return; /* Use one or more Rx queues for FCoE by redirection table */ fcreta_size = IXGBE_FCRETA_SIZE; if (adapter->hw.mac.type == ixgbe_mac_X550) fcreta_size = IXGBE_FCRETA_SIZE_X550; for (i = 0; i < fcreta_size; i++) { if (adapter->hw.mac.type == ixgbe_mac_X550) { int fcoe_i_h = fcoe->offset + ((i + fcreta_size) % fcoe->indices); fcoe_q_h = adapter->rx_ring[fcoe_i_h]->reg_idx; fcoe_q_h = (fcoe_q_h << IXGBE_FCRETA_ENTRY_HIGH_SHIFT) & IXGBE_FCRETA_ENTRY_HIGH_MASK; } fcoe_i = fcoe->offset + (i % fcoe->indices); fcoe_i &= IXGBE_FCRETA_ENTRY_MASK; fcoe_q = adapter->rx_ring[fcoe_i]->reg_idx; fcoe_q |= fcoe_q_h; IXGBE_WRITE_REG(hw, IXGBE_FCRETA(i), fcoe_q); } IXGBE_WRITE_REG(hw, IXGBE_FCRECTL, IXGBE_FCRECTL_ENA); /* Enable L2 EtherType filter for FIP */ etqf = ETH_P_FIP | IXGBE_ETQF_FILTER_EN; if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) { etqf |= IXGBE_ETQF_POOL_ENABLE; etqf |= VMDQ_P(0) << IXGBE_ETQF_POOL_SHIFT; } IXGBE_WRITE_REG(hw, IXGBE_ETQF(IXGBE_ETQF_FILTER_FIP), etqf); /* Send FIP frames to the first FCoE queue */ fcoe_q = adapter->rx_ring[fcoe->offset]->reg_idx; IXGBE_WRITE_REG(hw, IXGBE_ETQS(IXGBE_ETQF_FILTER_FIP), IXGBE_ETQS_QUEUE_EN | (fcoe_q << IXGBE_ETQS_RX_QUEUE_SHIFT)); /* Configure FCoE Rx control */ IXGBE_WRITE_REG(hw, IXGBE_FCRXCTRL, IXGBE_FCRXCTRL_FCCRCBO | (FC_FCOE_VER << IXGBE_FCRXCTRL_FCOEVER_SHIFT)); } /** * ixgbe_free_fcoe_ddp_resources - release all fcoe ddp context resources * @adapter : ixgbe adapter * * Cleans up outstanding ddp context resources * * Returns : none */ void ixgbe_free_fcoe_ddp_resources(struct ixgbe_adapter *adapter) { struct ixgbe_fcoe *fcoe = &adapter->fcoe; int cpu, i, ddp_max; /* do nothing if no DDP pools were allocated */ if (!fcoe->ddp_pool) return; ddp_max = IXGBE_FCOE_DDP_MAX; /* X550 has different DDP Max limit */ if (adapter->hw.mac.type == ixgbe_mac_X550) ddp_max = IXGBE_FCOE_DDP_MAX_X550; for (i = 0; i < ddp_max; i++) ixgbe_fcoe_ddp_put(adapter->netdev, i); for_each_possible_cpu(cpu) ixgbe_fcoe_dma_pool_free(fcoe, cpu); dma_unmap_single(&adapter->pdev->dev, fcoe->extra_ddp_buffer_dma, IXGBE_FCBUFF_MIN, DMA_FROM_DEVICE); kfree(fcoe->extra_ddp_buffer); fcoe->extra_ddp_buffer = NULL; fcoe->extra_ddp_buffer_dma = 0; } /** * ixgbe_setup_fcoe_ddp_resources - setup all fcoe ddp context resources * @adapter: ixgbe adapter * * Sets up ddp context resouces * * Returns : 0 indicates success or -EINVAL on failure */ int ixgbe_setup_fcoe_ddp_resources(struct ixgbe_adapter *adapter) { struct ixgbe_fcoe *fcoe = &adapter->fcoe; struct device *dev = &adapter->pdev->dev; void *buffer; dma_addr_t dma; unsigned int cpu; /* do nothing if no DDP pools were allocated */ if (!fcoe->ddp_pool) return 0; /* Extra buffer to be shared by all DDPs for HW work around */ buffer = kmalloc(IXGBE_FCBUFF_MIN, GFP_KERNEL); if (!buffer) return -ENOMEM; dma = dma_map_single(dev, buffer, IXGBE_FCBUFF_MIN, DMA_FROM_DEVICE); if (dma_mapping_error(dev, dma)) { e_err(drv, "failed to map extra DDP buffer\n"); kfree(buffer); return -ENOMEM; } fcoe->extra_ddp_buffer = buffer; fcoe->extra_ddp_buffer_dma = dma; /* allocate pci pool for each cpu */ for_each_possible_cpu(cpu) { int err = ixgbe_fcoe_dma_pool_alloc(fcoe, dev, cpu); if (!err) continue; e_err(drv, "failed to alloc DDP pool on cpu:%d\n", cpu); ixgbe_free_fcoe_ddp_resources(adapter); return -ENOMEM; } return 0; } static int ixgbe_fcoe_ddp_enable(struct ixgbe_adapter *adapter) { struct ixgbe_fcoe *fcoe = &adapter->fcoe; if (!(adapter->flags & IXGBE_FLAG_FCOE_CAPABLE)) return -EINVAL; fcoe->ddp_pool = alloc_percpu(struct ixgbe_fcoe_ddp_pool); if (!fcoe->ddp_pool) { e_err(drv, "failed to allocate percpu DDP resources\n"); return -ENOMEM; } adapter->netdev->fcoe_ddp_xid = IXGBE_FCOE_DDP_MAX - 1; /* X550 has different DDP Max limit */ if (adapter->hw.mac.type == ixgbe_mac_X550) adapter->netdev->fcoe_ddp_xid = IXGBE_FCOE_DDP_MAX_X550 - 1; return 0; } static void ixgbe_fcoe_ddp_disable(struct ixgbe_adapter *adapter) { struct ixgbe_fcoe *fcoe = &adapter->fcoe; adapter->netdev->fcoe_ddp_xid = 0; if (!fcoe->ddp_pool) return; free_percpu(fcoe->ddp_pool); fcoe->ddp_pool = NULL; } /** * ixgbe_fcoe_enable - turn on FCoE offload feature * @netdev: the corresponding netdev * * Turns on FCoE offload feature in 82599. * * Returns : 0 indicates success or -EINVAL on failure */ int ixgbe_fcoe_enable(struct net_device *netdev) { struct ixgbe_adapter *adapter = netdev_priv(netdev); struct ixgbe_fcoe *fcoe = &adapter->fcoe; atomic_inc(&fcoe->refcnt); if (!(adapter->flags & IXGBE_FLAG_FCOE_CAPABLE)) return -EINVAL; if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) return -EINVAL; e_info(drv, "Enabling FCoE offload features.\n"); if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) e_warn(probe, "Enabling FCoE on PF will disable legacy VFs\n"); if (netif_running(netdev)) netdev->netdev_ops->ndo_stop(netdev); /* Allocate per CPU memory to track DDP pools */ ixgbe_fcoe_ddp_enable(adapter); /* enable FCoE and notify stack */ adapter->flags |= IXGBE_FLAG_FCOE_ENABLED; netdev->features |= NETIF_F_FCOE_MTU; netdev_features_change(netdev); /* release existing queues and reallocate them */ ixgbe_clear_interrupt_scheme(adapter); ixgbe_init_interrupt_scheme(adapter); if (netif_running(netdev)) netdev->netdev_ops->ndo_open(netdev); return 0; } /** * ixgbe_fcoe_disable - turn off FCoE offload feature * @netdev: the corresponding netdev * * Turns off FCoE offload feature in 82599. * * Returns : 0 indicates success or -EINVAL on failure */ int ixgbe_fcoe_disable(struct net_device *netdev) { struct ixgbe_adapter *adapter = netdev_priv(netdev); if (!atomic_dec_and_test(&adapter->fcoe.refcnt)) return -EINVAL; if (!(adapter->flags & IXGBE_FLAG_FCOE_ENABLED)) return -EINVAL; e_info(drv, "Disabling FCoE offload features.\n"); if (netif_running(netdev)) netdev->netdev_ops->ndo_stop(netdev); /* Free per CPU memory to track DDP pools */ ixgbe_fcoe_ddp_disable(adapter); /* disable FCoE and notify stack */ adapter->flags &= ~IXGBE_FLAG_FCOE_ENABLED; netdev->features &= ~NETIF_F_FCOE_MTU; netdev_features_change(netdev); /* release existing queues and reallocate them */ ixgbe_clear_interrupt_scheme(adapter); ixgbe_init_interrupt_scheme(adapter); if (netif_running(netdev)) netdev->netdev_ops->ndo_open(netdev); return 0; } /** * ixgbe_fcoe_get_wwn - get world wide name for the node or the port * @netdev : ixgbe adapter * @wwn : the world wide name * @type: the type of world wide name * * Returns the node or port world wide name if both the prefix and the san * mac address are valid, then the wwn is formed based on the NAA-2 for * IEEE Extended name identifier (ref. to T10 FC-LS Spec., Sec. 15.3). * * Returns : 0 on success */ int ixgbe_fcoe_get_wwn(struct net_device *netdev, u64 *wwn, int type) { u16 prefix = 0xffff; struct ixgbe_adapter *adapter = netdev_priv(netdev); struct ixgbe_mac_info *mac = &adapter->hw.mac; switch (type) { case NETDEV_FCOE_WWNN: prefix = mac->wwnn_prefix; break; case NETDEV_FCOE_WWPN: prefix = mac->wwpn_prefix; break; default: break; } if ((prefix != 0xffff) && is_valid_ether_addr(mac->san_addr)) { *wwn = ((u64) prefix << 48) | ((u64) mac->san_addr[0] << 40) | ((u64) mac->san_addr[1] << 32) | ((u64) mac->san_addr[2] << 24) | ((u64) mac->san_addr[3] << 16) | ((u64) mac->san_addr[4] << 8) | ((u64) mac->san_addr[5]); return 0; } return -EINVAL; } /** * ixgbe_fcoe_get_hbainfo - get FCoE HBA information * @netdev : ixgbe adapter * @info : HBA information * * Returns ixgbe HBA information * * Returns : 0 on success */ int ixgbe_fcoe_get_hbainfo(struct net_device *netdev, struct netdev_fcoe_hbainfo *info) { struct ixgbe_adapter *adapter = netdev_priv(netdev); struct ixgbe_hw *hw = &adapter->hw; u64 dsn; if (!info) return -EINVAL; /* Don't return information on unsupported devices */ if (!(adapter->flags & IXGBE_FLAG_FCOE_ENABLED)) return -EINVAL; /* Manufacturer */ snprintf(info->manufacturer, sizeof(info->manufacturer), "Intel Corporation"); /* Serial Number */ /* Get the PCI-e Device Serial Number Capability */ dsn = pci_get_dsn(adapter->pdev); if (dsn) snprintf(info->serial_number, sizeof(info->serial_number), "%016llX", dsn); else snprintf(info->serial_number, sizeof(info->serial_number), "Unknown"); /* Hardware Version */ snprintf(info->hardware_version, sizeof(info->hardware_version), "Rev %d", hw->revision_id); /* Driver Name/Version */ snprintf(info->driver_version, sizeof(info->driver_version), "%s v%s", ixgbe_driver_name, UTS_RELEASE); /* Firmware Version */ strscpy(info->firmware_version, adapter->eeprom_id, sizeof(info->firmware_version)); /* Model */ if (hw->mac.type == ixgbe_mac_82599EB) { snprintf(info->model, sizeof(info->model), "Intel 82599"); } else if (hw->mac.type == ixgbe_mac_X550) { snprintf(info->model, sizeof(info->model), "Intel X550"); } else { snprintf(info->model, sizeof(info->model), "Intel X540"); } /* Model Description */ snprintf(info->model_description, sizeof(info->model_description), "%s", ixgbe_default_device_descr); return 0; } /** * ixgbe_fcoe_get_tc - get the current TC that fcoe is mapped to * @adapter: pointer to the device adapter structure * * Return : TC that FCoE is mapped to */ u8 ixgbe_fcoe_get_tc(struct ixgbe_adapter *adapter) { #ifdef CONFIG_IXGBE_DCB return netdev_get_prio_tc_map(adapter->netdev, adapter->fcoe.up); #else return 0; #endif }
linux-master
drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c
// SPDX-License-Identifier: GPL-2.0 /* Copyright(c) 1999 - 2018 Intel Corporation. */ #include <linux/pci.h> #include <linux/delay.h> #include <linux/sched.h> #include "ixgbe.h" #include "ixgbe_phy.h" #include "ixgbe_x540.h" #define IXGBE_X540_MAX_TX_QUEUES 128 #define IXGBE_X540_MAX_RX_QUEUES 128 #define IXGBE_X540_RAR_ENTRIES 128 #define IXGBE_X540_MC_TBL_SIZE 128 #define IXGBE_X540_VFT_TBL_SIZE 128 #define IXGBE_X540_RX_PB_SIZE 384 static s32 ixgbe_update_flash_X540(struct ixgbe_hw *hw); static s32 ixgbe_poll_flash_update_done_X540(struct ixgbe_hw *hw); static s32 ixgbe_get_swfw_sync_semaphore(struct ixgbe_hw *hw); static void ixgbe_release_swfw_sync_semaphore(struct ixgbe_hw *hw); enum ixgbe_media_type ixgbe_get_media_type_X540(struct ixgbe_hw *hw) { return ixgbe_media_type_copper; } s32 ixgbe_get_invariants_X540(struct ixgbe_hw *hw) { struct ixgbe_mac_info *mac = &hw->mac; struct ixgbe_phy_info *phy = &hw->phy; /* set_phy_power was set by default to NULL */ phy->ops.set_phy_power = ixgbe_set_copper_phy_power; mac->mcft_size = IXGBE_X540_MC_TBL_SIZE; mac->vft_size = IXGBE_X540_VFT_TBL_SIZE; mac->num_rar_entries = IXGBE_X540_RAR_ENTRIES; mac->rx_pb_size = IXGBE_X540_RX_PB_SIZE; mac->max_rx_queues = IXGBE_X540_MAX_RX_QUEUES; mac->max_tx_queues = IXGBE_X540_MAX_TX_QUEUES; mac->max_msix_vectors = ixgbe_get_pcie_msix_count_generic(hw); return 0; } /** * ixgbe_setup_mac_link_X540 - Set the auto advertised capabilitires * @hw: pointer to hardware structure * @speed: new link speed * @autoneg_wait_to_complete: true when waiting for completion is needed **/ s32 ixgbe_setup_mac_link_X540(struct ixgbe_hw *hw, ixgbe_link_speed speed, bool autoneg_wait_to_complete) { return hw->phy.ops.setup_link_speed(hw, speed, autoneg_wait_to_complete); } /** * ixgbe_reset_hw_X540 - Perform hardware reset * @hw: pointer to hardware structure * * Resets the hardware by resetting the transmit and receive units, masks * and clears all interrupts, perform a PHY reset, and perform a link (MAC) * reset. **/ s32 ixgbe_reset_hw_X540(struct ixgbe_hw *hw) { s32 status; u32 ctrl, i; u32 swfw_mask = hw->phy.phy_semaphore_mask; /* Call adapter stop to disable tx/rx and clear interrupts */ status = hw->mac.ops.stop_adapter(hw); if (status) return status; /* flush pending Tx transactions */ ixgbe_clear_tx_pending(hw); mac_reset_top: status = hw->mac.ops.acquire_swfw_sync(hw, swfw_mask); if (status) { hw_dbg(hw, "semaphore failed with %d", status); return IXGBE_ERR_SWFW_SYNC; } ctrl = IXGBE_CTRL_RST; ctrl |= IXGBE_READ_REG(hw, IXGBE_CTRL); IXGBE_WRITE_REG(hw, IXGBE_CTRL, ctrl); IXGBE_WRITE_FLUSH(hw); hw->mac.ops.release_swfw_sync(hw, swfw_mask); usleep_range(1000, 1200); /* Poll for reset bit to self-clear indicating reset is complete */ for (i = 0; i < 10; i++) { ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL); if (!(ctrl & IXGBE_CTRL_RST_MASK)) break; udelay(1); } if (ctrl & IXGBE_CTRL_RST_MASK) { status = IXGBE_ERR_RESET_FAILED; hw_dbg(hw, "Reset polling failed to complete.\n"); } msleep(100); /* * Double resets are required for recovery from certain error * conditions. Between resets, it is necessary to stall to allow time * for any pending HW events to complete. */ if (hw->mac.flags & IXGBE_FLAGS_DOUBLE_RESET_REQUIRED) { hw->mac.flags &= ~IXGBE_FLAGS_DOUBLE_RESET_REQUIRED; goto mac_reset_top; } /* Set the Rx packet buffer size. */ IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(0), 384 << IXGBE_RXPBSIZE_SHIFT); /* Store the permanent mac address */ hw->mac.ops.get_mac_addr(hw, hw->mac.perm_addr); /* * Store MAC address from RAR0, clear receive address registers, and * clear the multicast table. Also reset num_rar_entries to 128, * since we modify this value when programming the SAN MAC address. */ hw->mac.num_rar_entries = IXGBE_X540_MAX_TX_QUEUES; hw->mac.ops.init_rx_addrs(hw); /* Store the permanent SAN mac address */ hw->mac.ops.get_san_mac_addr(hw, hw->mac.san_addr); /* Add the SAN MAC address to the RAR only if it's a valid address */ if (is_valid_ether_addr(hw->mac.san_addr)) { /* Save the SAN MAC RAR index */ hw->mac.san_mac_rar_index = hw->mac.num_rar_entries - 1; hw->mac.ops.set_rar(hw, hw->mac.san_mac_rar_index, hw->mac.san_addr, 0, IXGBE_RAH_AV); /* clear VMDq pool/queue selection for this RAR */ hw->mac.ops.clear_vmdq(hw, hw->mac.san_mac_rar_index, IXGBE_CLEAR_VMDQ_ALL); /* Reserve the last RAR for the SAN MAC address */ hw->mac.num_rar_entries--; } /* Store the alternative WWNN/WWPN prefix */ hw->mac.ops.get_wwn_prefix(hw, &hw->mac.wwnn_prefix, &hw->mac.wwpn_prefix); return status; } /** * ixgbe_start_hw_X540 - Prepare hardware for Tx/Rx * @hw: pointer to hardware structure * * Starts the hardware using the generic start_hw function * and the generation start_hw function. * Then performs revision-specific operations, if any. **/ s32 ixgbe_start_hw_X540(struct ixgbe_hw *hw) { s32 ret_val; ret_val = ixgbe_start_hw_generic(hw); if (ret_val) return ret_val; return ixgbe_start_hw_gen2(hw); } /** * ixgbe_init_eeprom_params_X540 - Initialize EEPROM params * @hw: pointer to hardware structure * * Initializes the EEPROM parameters ixgbe_eeprom_info within the * ixgbe_hw struct in order to set up EEPROM access. **/ s32 ixgbe_init_eeprom_params_X540(struct ixgbe_hw *hw) { struct ixgbe_eeprom_info *eeprom = &hw->eeprom; u32 eec; u16 eeprom_size; if (eeprom->type == ixgbe_eeprom_uninitialized) { eeprom->semaphore_delay = 10; eeprom->type = ixgbe_flash; eec = IXGBE_READ_REG(hw, IXGBE_EEC(hw)); eeprom_size = (u16)((eec & IXGBE_EEC_SIZE) >> IXGBE_EEC_SIZE_SHIFT); eeprom->word_size = BIT(eeprom_size + IXGBE_EEPROM_WORD_SIZE_SHIFT); hw_dbg(hw, "Eeprom params: type = %d, size = %d\n", eeprom->type, eeprom->word_size); } return 0; } /** * ixgbe_read_eerd_X540- Read EEPROM word using EERD * @hw: pointer to hardware structure * @offset: offset of word in the EEPROM to read * @data: word read from the EEPROM * * Reads a 16 bit word from the EEPROM using the EERD register. **/ static s32 ixgbe_read_eerd_X540(struct ixgbe_hw *hw, u16 offset, u16 *data) { s32 status; if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM)) return IXGBE_ERR_SWFW_SYNC; status = ixgbe_read_eerd_generic(hw, offset, data); hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM); return status; } /** * ixgbe_read_eerd_buffer_X540 - Read EEPROM word(s) using EERD * @hw: pointer to hardware structure * @offset: offset of word in the EEPROM to read * @words: number of words * @data: word(s) read from the EEPROM * * Reads a 16 bit word(s) from the EEPROM using the EERD register. **/ static s32 ixgbe_read_eerd_buffer_X540(struct ixgbe_hw *hw, u16 offset, u16 words, u16 *data) { s32 status; if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM)) return IXGBE_ERR_SWFW_SYNC; status = ixgbe_read_eerd_buffer_generic(hw, offset, words, data); hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM); return status; } /** * ixgbe_write_eewr_X540 - Write EEPROM word using EEWR * @hw: pointer to hardware structure * @offset: offset of word in the EEPROM to write * @data: word write to the EEPROM * * Write a 16 bit word to the EEPROM using the EEWR register. **/ static s32 ixgbe_write_eewr_X540(struct ixgbe_hw *hw, u16 offset, u16 data) { s32 status; if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM)) return IXGBE_ERR_SWFW_SYNC; status = ixgbe_write_eewr_generic(hw, offset, data); hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM); return status; } /** * ixgbe_write_eewr_buffer_X540 - Write EEPROM word(s) using EEWR * @hw: pointer to hardware structure * @offset: offset of word in the EEPROM to write * @words: number of words * @data: word(s) write to the EEPROM * * Write a 16 bit word(s) to the EEPROM using the EEWR register. **/ static s32 ixgbe_write_eewr_buffer_X540(struct ixgbe_hw *hw, u16 offset, u16 words, u16 *data) { s32 status; if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM)) return IXGBE_ERR_SWFW_SYNC; status = ixgbe_write_eewr_buffer_generic(hw, offset, words, data); hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM); return status; } /** * ixgbe_calc_eeprom_checksum_X540 - Calculates and returns the checksum * * This function does not use synchronization for EERD and EEWR. It can * be used internally by function which utilize ixgbe_acquire_swfw_sync_X540. * * @hw: pointer to hardware structure **/ static s32 ixgbe_calc_eeprom_checksum_X540(struct ixgbe_hw *hw) { u16 i; u16 j; u16 checksum = 0; u16 length = 0; u16 pointer = 0; u16 word = 0; u16 checksum_last_word = IXGBE_EEPROM_CHECKSUM; u16 ptr_start = IXGBE_PCIE_ANALOG_PTR; /* * Do not use hw->eeprom.ops.read because we do not want to take * the synchronization semaphores here. Instead use * ixgbe_read_eerd_generic */ /* Include 0x0-0x3F in the checksum */ for (i = 0; i < checksum_last_word; i++) { if (ixgbe_read_eerd_generic(hw, i, &word)) { hw_dbg(hw, "EEPROM read failed\n"); return IXGBE_ERR_EEPROM; } checksum += word; } /* * Include all data from pointers 0x3, 0x6-0xE. This excludes the * FW, PHY module, and PCIe Expansion/Option ROM pointers. */ for (i = ptr_start; i < IXGBE_FW_PTR; i++) { if (i == IXGBE_PHY_PTR || i == IXGBE_OPTION_ROM_PTR) continue; if (ixgbe_read_eerd_generic(hw, i, &pointer)) { hw_dbg(hw, "EEPROM read failed\n"); break; } /* Skip pointer section if the pointer is invalid. */ if (pointer == 0xFFFF || pointer == 0 || pointer >= hw->eeprom.word_size) continue; if (ixgbe_read_eerd_generic(hw, pointer, &length)) { hw_dbg(hw, "EEPROM read failed\n"); return IXGBE_ERR_EEPROM; } /* Skip pointer section if length is invalid. */ if (length == 0xFFFF || length == 0 || (pointer + length) >= hw->eeprom.word_size) continue; for (j = pointer + 1; j <= pointer + length; j++) { if (ixgbe_read_eerd_generic(hw, j, &word)) { hw_dbg(hw, "EEPROM read failed\n"); return IXGBE_ERR_EEPROM; } checksum += word; } } checksum = (u16)IXGBE_EEPROM_SUM - checksum; return (s32)checksum; } /** * ixgbe_validate_eeprom_checksum_X540 - Validate EEPROM checksum * @hw: pointer to hardware structure * @checksum_val: calculated checksum * * Performs checksum calculation and validates the EEPROM checksum. If the * caller does not need checksum_val, the value can be NULL. **/ static s32 ixgbe_validate_eeprom_checksum_X540(struct ixgbe_hw *hw, u16 *checksum_val) { s32 status; u16 checksum; u16 read_checksum = 0; /* Read the first word from the EEPROM. If this times out or fails, do * not continue or we could be in for a very long wait while every * EEPROM read fails */ status = hw->eeprom.ops.read(hw, 0, &checksum); if (status) { hw_dbg(hw, "EEPROM read failed\n"); return status; } if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM)) return IXGBE_ERR_SWFW_SYNC; status = hw->eeprom.ops.calc_checksum(hw); if (status < 0) goto out; checksum = (u16)(status & 0xffff); /* Do not use hw->eeprom.ops.read because we do not want to take * the synchronization semaphores twice here. */ status = ixgbe_read_eerd_generic(hw, IXGBE_EEPROM_CHECKSUM, &read_checksum); if (status) goto out; /* Verify read checksum from EEPROM is the same as * calculated checksum */ if (read_checksum != checksum) { hw_dbg(hw, "Invalid EEPROM checksum"); status = IXGBE_ERR_EEPROM_CHECKSUM; } /* If the user cares, return the calculated checksum */ if (checksum_val) *checksum_val = checksum; out: hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM); return status; } /** * ixgbe_update_eeprom_checksum_X540 - Updates the EEPROM checksum and flash * @hw: pointer to hardware structure * * After writing EEPROM to shadow RAM using EEWR register, software calculates * checksum and updates the EEPROM and instructs the hardware to update * the flash. **/ static s32 ixgbe_update_eeprom_checksum_X540(struct ixgbe_hw *hw) { s32 status; u16 checksum; /* Read the first word from the EEPROM. If this times out or fails, do * not continue or we could be in for a very long wait while every * EEPROM read fails */ status = hw->eeprom.ops.read(hw, 0, &checksum); if (status) { hw_dbg(hw, "EEPROM read failed\n"); return status; } if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM)) return IXGBE_ERR_SWFW_SYNC; status = hw->eeprom.ops.calc_checksum(hw); if (status < 0) goto out; checksum = (u16)(status & 0xffff); /* Do not use hw->eeprom.ops.write because we do not want to * take the synchronization semaphores twice here. */ status = ixgbe_write_eewr_generic(hw, IXGBE_EEPROM_CHECKSUM, checksum); if (status) goto out; status = ixgbe_update_flash_X540(hw); out: hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM); return status; } /** * ixgbe_update_flash_X540 - Instruct HW to copy EEPROM to Flash device * @hw: pointer to hardware structure * * Set FLUP (bit 23) of the EEC register to instruct Hardware to copy * EEPROM from shadow RAM to the flash device. **/ static s32 ixgbe_update_flash_X540(struct ixgbe_hw *hw) { u32 flup; s32 status; status = ixgbe_poll_flash_update_done_X540(hw); if (status == IXGBE_ERR_EEPROM) { hw_dbg(hw, "Flash update time out\n"); return status; } flup = IXGBE_READ_REG(hw, IXGBE_EEC(hw)) | IXGBE_EEC_FLUP; IXGBE_WRITE_REG(hw, IXGBE_EEC(hw), flup); status = ixgbe_poll_flash_update_done_X540(hw); if (status == 0) hw_dbg(hw, "Flash update complete\n"); else hw_dbg(hw, "Flash update time out\n"); if (hw->revision_id == 0) { flup = IXGBE_READ_REG(hw, IXGBE_EEC(hw)); if (flup & IXGBE_EEC_SEC1VAL) { flup |= IXGBE_EEC_FLUP; IXGBE_WRITE_REG(hw, IXGBE_EEC(hw), flup); } status = ixgbe_poll_flash_update_done_X540(hw); if (status == 0) hw_dbg(hw, "Flash update complete\n"); else hw_dbg(hw, "Flash update time out\n"); } return status; } /** * ixgbe_poll_flash_update_done_X540 - Poll flash update status * @hw: pointer to hardware structure * * Polls the FLUDONE (bit 26) of the EEC Register to determine when the * flash update is done. **/ static s32 ixgbe_poll_flash_update_done_X540(struct ixgbe_hw *hw) { u32 i; u32 reg; for (i = 0; i < IXGBE_FLUDONE_ATTEMPTS; i++) { reg = IXGBE_READ_REG(hw, IXGBE_EEC(hw)); if (reg & IXGBE_EEC_FLUDONE) return 0; udelay(5); } return IXGBE_ERR_EEPROM; } /** * ixgbe_acquire_swfw_sync_X540 - Acquire SWFW semaphore * @hw: pointer to hardware structure * @mask: Mask to specify which semaphore to acquire * * Acquires the SWFW semaphore thought the SW_FW_SYNC register for * the specified function (CSR, PHY0, PHY1, NVM, Flash) **/ s32 ixgbe_acquire_swfw_sync_X540(struct ixgbe_hw *hw, u32 mask) { u32 swmask = mask & IXGBE_GSSR_NVM_PHY_MASK; u32 swi2c_mask = mask & IXGBE_GSSR_I2C_MASK; u32 fwmask = swmask << 5; u32 timeout = 200; u32 hwmask = 0; u32 swfw_sync; u32 i; if (swmask & IXGBE_GSSR_EEP_SM) hwmask = IXGBE_GSSR_FLASH_SM; /* SW only mask does not have FW bit pair */ if (mask & IXGBE_GSSR_SW_MNG_SM) swmask |= IXGBE_GSSR_SW_MNG_SM; swmask |= swi2c_mask; fwmask |= swi2c_mask << 2; for (i = 0; i < timeout; i++) { /* SW NVM semaphore bit is used for access to all * SW_FW_SYNC bits (not just NVM) */ if (ixgbe_get_swfw_sync_semaphore(hw)) return IXGBE_ERR_SWFW_SYNC; swfw_sync = IXGBE_READ_REG(hw, IXGBE_SWFW_SYNC(hw)); if (!(swfw_sync & (fwmask | swmask | hwmask))) { swfw_sync |= swmask; IXGBE_WRITE_REG(hw, IXGBE_SWFW_SYNC(hw), swfw_sync); ixgbe_release_swfw_sync_semaphore(hw); usleep_range(5000, 6000); return 0; } /* Firmware currently using resource (fwmask), hardware * currently using resource (hwmask), or other software * thread currently using resource (swmask) */ ixgbe_release_swfw_sync_semaphore(hw); usleep_range(5000, 10000); } /* If the resource is not released by the FW/HW the SW can assume that * the FW/HW malfunctions. In that case the SW should set the SW bit(s) * of the requested resource(s) while ignoring the corresponding FW/HW * bits in the SW_FW_SYNC register. */ if (ixgbe_get_swfw_sync_semaphore(hw)) return IXGBE_ERR_SWFW_SYNC; swfw_sync = IXGBE_READ_REG(hw, IXGBE_SWFW_SYNC(hw)); if (swfw_sync & (fwmask | hwmask)) { swfw_sync |= swmask; IXGBE_WRITE_REG(hw, IXGBE_SWFW_SYNC(hw), swfw_sync); ixgbe_release_swfw_sync_semaphore(hw); usleep_range(5000, 6000); return 0; } /* If the resource is not released by other SW the SW can assume that * the other SW malfunctions. In that case the SW should clear all SW * flags that it does not own and then repeat the whole process once * again. */ if (swfw_sync & swmask) { u32 rmask = IXGBE_GSSR_EEP_SM | IXGBE_GSSR_PHY0_SM | IXGBE_GSSR_PHY1_SM | IXGBE_GSSR_MAC_CSR_SM | IXGBE_GSSR_SW_MNG_SM; if (swi2c_mask) rmask |= IXGBE_GSSR_I2C_MASK; ixgbe_release_swfw_sync_X540(hw, rmask); ixgbe_release_swfw_sync_semaphore(hw); return IXGBE_ERR_SWFW_SYNC; } ixgbe_release_swfw_sync_semaphore(hw); return IXGBE_ERR_SWFW_SYNC; } /** * ixgbe_release_swfw_sync_X540 - Release SWFW semaphore * @hw: pointer to hardware structure * @mask: Mask to specify which semaphore to release * * Releases the SWFW semaphore through the SW_FW_SYNC register * for the specified function (CSR, PHY0, PHY1, EVM, Flash) **/ void ixgbe_release_swfw_sync_X540(struct ixgbe_hw *hw, u32 mask) { u32 swmask = mask & (IXGBE_GSSR_NVM_PHY_MASK | IXGBE_GSSR_SW_MNG_SM); u32 swfw_sync; if (mask & IXGBE_GSSR_I2C_MASK) swmask |= mask & IXGBE_GSSR_I2C_MASK; ixgbe_get_swfw_sync_semaphore(hw); swfw_sync = IXGBE_READ_REG(hw, IXGBE_SWFW_SYNC(hw)); swfw_sync &= ~swmask; IXGBE_WRITE_REG(hw, IXGBE_SWFW_SYNC(hw), swfw_sync); ixgbe_release_swfw_sync_semaphore(hw); usleep_range(5000, 6000); } /** * ixgbe_get_swfw_sync_semaphore - Get hardware semaphore * @hw: pointer to hardware structure * * Sets the hardware semaphores so SW/FW can gain control of shared resources */ static s32 ixgbe_get_swfw_sync_semaphore(struct ixgbe_hw *hw) { u32 timeout = 2000; u32 i; u32 swsm; /* Get SMBI software semaphore between device drivers first */ for (i = 0; i < timeout; i++) { /* If the SMBI bit is 0 when we read it, then the bit will be * set and we have the semaphore */ swsm = IXGBE_READ_REG(hw, IXGBE_SWSM(hw)); if (!(swsm & IXGBE_SWSM_SMBI)) break; usleep_range(50, 100); } if (i == timeout) { hw_dbg(hw, "Software semaphore SMBI between device drivers not granted.\n"); return IXGBE_ERR_EEPROM; } /* Now get the semaphore between SW/FW through the REGSMP bit */ for (i = 0; i < timeout; i++) { swsm = IXGBE_READ_REG(hw, IXGBE_SWFW_SYNC(hw)); if (!(swsm & IXGBE_SWFW_REGSMP)) return 0; usleep_range(50, 100); } /* Release semaphores and return error if SW NVM semaphore * was not granted because we do not have access to the EEPROM */ hw_dbg(hw, "REGSMP Software NVM semaphore not granted\n"); ixgbe_release_swfw_sync_semaphore(hw); return IXGBE_ERR_EEPROM; } /** * ixgbe_release_swfw_sync_semaphore - Release hardware semaphore * @hw: pointer to hardware structure * * This function clears hardware semaphore bits. **/ static void ixgbe_release_swfw_sync_semaphore(struct ixgbe_hw *hw) { u32 swsm; /* Release both semaphores by writing 0 to the bits REGSMP and SMBI */ swsm = IXGBE_READ_REG(hw, IXGBE_SWFW_SYNC(hw)); swsm &= ~IXGBE_SWFW_REGSMP; IXGBE_WRITE_REG(hw, IXGBE_SWFW_SYNC(hw), swsm); swsm = IXGBE_READ_REG(hw, IXGBE_SWSM(hw)); swsm &= ~IXGBE_SWSM_SMBI; IXGBE_WRITE_REG(hw, IXGBE_SWSM(hw), swsm); IXGBE_WRITE_FLUSH(hw); } /** * ixgbe_init_swfw_sync_X540 - Release hardware semaphore * @hw: pointer to hardware structure * * This function reset hardware semaphore bits for a semaphore that may * have be left locked due to a catastrophic failure. **/ void ixgbe_init_swfw_sync_X540(struct ixgbe_hw *hw) { u32 rmask; /* First try to grab the semaphore but we don't need to bother * looking to see whether we got the lock or not since we do * the same thing regardless of whether we got the lock or not. * We got the lock - we release it. * We timeout trying to get the lock - we force its release. */ ixgbe_get_swfw_sync_semaphore(hw); ixgbe_release_swfw_sync_semaphore(hw); /* Acquire and release all software resources. */ rmask = IXGBE_GSSR_EEP_SM | IXGBE_GSSR_PHY0_SM | IXGBE_GSSR_PHY1_SM | IXGBE_GSSR_MAC_CSR_SM | IXGBE_GSSR_SW_MNG_SM | IXGBE_GSSR_I2C_MASK; ixgbe_acquire_swfw_sync_X540(hw, rmask); ixgbe_release_swfw_sync_X540(hw, rmask); } /** * ixgbe_blink_led_start_X540 - Blink LED based on index. * @hw: pointer to hardware structure * @index: led number to blink * * Devices that implement the version 2 interface: * X540 **/ s32 ixgbe_blink_led_start_X540(struct ixgbe_hw *hw, u32 index) { u32 macc_reg; u32 ledctl_reg; ixgbe_link_speed speed; bool link_up; if (index > 3) return IXGBE_ERR_PARAM; /* Link should be up in order for the blink bit in the LED control * register to work. Force link and speed in the MAC if link is down. * This will be reversed when we stop the blinking. */ hw->mac.ops.check_link(hw, &speed, &link_up, false); if (!link_up) { macc_reg = IXGBE_READ_REG(hw, IXGBE_MACC); macc_reg |= IXGBE_MACC_FLU | IXGBE_MACC_FSV_10G | IXGBE_MACC_FS; IXGBE_WRITE_REG(hw, IXGBE_MACC, macc_reg); } /* Set the LED to LINK_UP + BLINK. */ ledctl_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL); ledctl_reg &= ~IXGBE_LED_MODE_MASK(index); ledctl_reg |= IXGBE_LED_BLINK(index); IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, ledctl_reg); IXGBE_WRITE_FLUSH(hw); return 0; } /** * ixgbe_blink_led_stop_X540 - Stop blinking LED based on index. * @hw: pointer to hardware structure * @index: led number to stop blinking * * Devices that implement the version 2 interface: * X540 **/ s32 ixgbe_blink_led_stop_X540(struct ixgbe_hw *hw, u32 index) { u32 macc_reg; u32 ledctl_reg; if (index > 3) return IXGBE_ERR_PARAM; /* Restore the LED to its default value. */ ledctl_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL); ledctl_reg &= ~IXGBE_LED_MODE_MASK(index); ledctl_reg |= IXGBE_LED_LINK_ACTIVE << IXGBE_LED_MODE_SHIFT(index); ledctl_reg &= ~IXGBE_LED_BLINK(index); IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, ledctl_reg); /* Unforce link and speed in the MAC. */ macc_reg = IXGBE_READ_REG(hw, IXGBE_MACC); macc_reg &= ~(IXGBE_MACC_FLU | IXGBE_MACC_FSV_10G | IXGBE_MACC_FS); IXGBE_WRITE_REG(hw, IXGBE_MACC, macc_reg); IXGBE_WRITE_FLUSH(hw); return 0; } static const struct ixgbe_mac_operations mac_ops_X540 = { .init_hw = &ixgbe_init_hw_generic, .reset_hw = &ixgbe_reset_hw_X540, .start_hw = &ixgbe_start_hw_X540, .clear_hw_cntrs = &ixgbe_clear_hw_cntrs_generic, .get_media_type = &ixgbe_get_media_type_X540, .enable_rx_dma = &ixgbe_enable_rx_dma_generic, .get_mac_addr = &ixgbe_get_mac_addr_generic, .get_san_mac_addr = &ixgbe_get_san_mac_addr_generic, .get_device_caps = &ixgbe_get_device_caps_generic, .get_wwn_prefix = &ixgbe_get_wwn_prefix_generic, .stop_adapter = &ixgbe_stop_adapter_generic, .get_bus_info = &ixgbe_get_bus_info_generic, .set_lan_id = &ixgbe_set_lan_id_multi_port_pcie, .read_analog_reg8 = NULL, .write_analog_reg8 = NULL, .setup_link = &ixgbe_setup_mac_link_X540, .set_rxpba = &ixgbe_set_rxpba_generic, .check_link = &ixgbe_check_mac_link_generic, .get_link_capabilities = &ixgbe_get_copper_link_capabilities_generic, .led_on = &ixgbe_led_on_generic, .led_off = &ixgbe_led_off_generic, .init_led_link_act = ixgbe_init_led_link_act_generic, .blink_led_start = &ixgbe_blink_led_start_X540, .blink_led_stop = &ixgbe_blink_led_stop_X540, .set_rar = &ixgbe_set_rar_generic, .clear_rar = &ixgbe_clear_rar_generic, .set_vmdq = &ixgbe_set_vmdq_generic, .set_vmdq_san_mac = &ixgbe_set_vmdq_san_mac_generic, .clear_vmdq = &ixgbe_clear_vmdq_generic, .init_rx_addrs = &ixgbe_init_rx_addrs_generic, .update_mc_addr_list = &ixgbe_update_mc_addr_list_generic, .enable_mc = &ixgbe_enable_mc_generic, .disable_mc = &ixgbe_disable_mc_generic, .clear_vfta = &ixgbe_clear_vfta_generic, .set_vfta = &ixgbe_set_vfta_generic, .fc_enable = &ixgbe_fc_enable_generic, .setup_fc = ixgbe_setup_fc_generic, .fc_autoneg = ixgbe_fc_autoneg, .set_fw_drv_ver = &ixgbe_set_fw_drv_ver_generic, .init_uta_tables = &ixgbe_init_uta_tables_generic, .setup_sfp = NULL, .set_mac_anti_spoofing = &ixgbe_set_mac_anti_spoofing, .set_vlan_anti_spoofing = &ixgbe_set_vlan_anti_spoofing, .acquire_swfw_sync = &ixgbe_acquire_swfw_sync_X540, .release_swfw_sync = &ixgbe_release_swfw_sync_X540, .init_swfw_sync = &ixgbe_init_swfw_sync_X540, .disable_rx_buff = &ixgbe_disable_rx_buff_generic, .enable_rx_buff = &ixgbe_enable_rx_buff_generic, .get_thermal_sensor_data = NULL, .init_thermal_sensor_thresh = NULL, .prot_autoc_read = &prot_autoc_read_generic, .prot_autoc_write = &prot_autoc_write_generic, .enable_rx = &ixgbe_enable_rx_generic, .disable_rx = &ixgbe_disable_rx_generic, }; static const struct ixgbe_eeprom_operations eeprom_ops_X540 = { .init_params = &ixgbe_init_eeprom_params_X540, .read = &ixgbe_read_eerd_X540, .read_buffer = &ixgbe_read_eerd_buffer_X540, .write = &ixgbe_write_eewr_X540, .write_buffer = &ixgbe_write_eewr_buffer_X540, .calc_checksum = &ixgbe_calc_eeprom_checksum_X540, .validate_checksum = &ixgbe_validate_eeprom_checksum_X540, .update_checksum = &ixgbe_update_eeprom_checksum_X540, }; static const struct ixgbe_phy_operations phy_ops_X540 = { .identify = &ixgbe_identify_phy_generic, .identify_sfp = &ixgbe_identify_sfp_module_generic, .init = NULL, .reset = NULL, .read_reg = &ixgbe_read_phy_reg_generic, .write_reg = &ixgbe_write_phy_reg_generic, .setup_link = &ixgbe_setup_phy_link_generic, .setup_link_speed = &ixgbe_setup_phy_link_speed_generic, .read_i2c_byte = &ixgbe_read_i2c_byte_generic, .write_i2c_byte = &ixgbe_write_i2c_byte_generic, .read_i2c_sff8472 = &ixgbe_read_i2c_sff8472_generic, .read_i2c_eeprom = &ixgbe_read_i2c_eeprom_generic, .write_i2c_eeprom = &ixgbe_write_i2c_eeprom_generic, .check_overtemp = &ixgbe_tn_check_overtemp, .set_phy_power = &ixgbe_set_copper_phy_power, }; static const u32 ixgbe_mvals_X540[IXGBE_MVALS_IDX_LIMIT] = { IXGBE_MVALS_INIT(X540) }; const struct ixgbe_info ixgbe_X540_info = { .mac = ixgbe_mac_X540, .get_invariants = &ixgbe_get_invariants_X540, .mac_ops = &mac_ops_X540, .eeprom_ops = &eeprom_ops_X540, .phy_ops = &phy_ops_X540, .mbx_ops = &mbx_ops_generic, .mvals = ixgbe_mvals_X540, };
linux-master
drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c
// SPDX-License-Identifier: GPL-2.0 /* Copyright(c) 1999 - 2018 Intel Corporation. */ #include <linux/types.h> #include <linux/module.h> #include <linux/pci.h> #include <linux/netdevice.h> #include <linux/vmalloc.h> #include <linux/string.h> #include <linux/in.h> #include <linux/interrupt.h> #include <linux/ip.h> #include <linux/tcp.h> #include <linux/sctp.h> #include <linux/pkt_sched.h> #include <linux/ipv6.h> #include <linux/slab.h> #include <net/checksum.h> #include <net/ip6_checksum.h> #include <linux/etherdevice.h> #include <linux/ethtool.h> #include <linux/if.h> #include <linux/if_vlan.h> #include <linux/if_macvlan.h> #include <linux/if_bridge.h> #include <linux/prefetch.h> #include <linux/bpf.h> #include <linux/bpf_trace.h> #include <linux/atomic.h> #include <linux/numa.h> #include <generated/utsrelease.h> #include <scsi/fc/fc_fcoe.h> #include <net/udp_tunnel.h> #include <net/pkt_cls.h> #include <net/tc_act/tc_gact.h> #include <net/tc_act/tc_mirred.h> #include <net/vxlan.h> #include <net/mpls.h> #include <net/netdev_queues.h> #include <net/xdp_sock_drv.h> #include <net/xfrm.h> #include "ixgbe.h" #include "ixgbe_common.h" #include "ixgbe_dcb_82599.h" #include "ixgbe_phy.h" #include "ixgbe_sriov.h" #include "ixgbe_model.h" #include "ixgbe_txrx_common.h" char ixgbe_driver_name[] = "ixgbe"; static const char ixgbe_driver_string[] = "Intel(R) 10 Gigabit PCI Express Network Driver"; #ifdef IXGBE_FCOE char ixgbe_default_device_descr[] = "Intel(R) 10 Gigabit Network Connection"; #else static char ixgbe_default_device_descr[] = "Intel(R) 10 Gigabit Network Connection"; #endif static const char ixgbe_copyright[] = "Copyright (c) 1999-2016 Intel Corporation."; static const char ixgbe_overheat_msg[] = "Network adapter has been stopped because it has over heated. Restart the computer. If the problem persists, power off the system and replace the adapter"; static const struct ixgbe_info *ixgbe_info_tbl[] = { [board_82598] = &ixgbe_82598_info, [board_82599] = &ixgbe_82599_info, [board_X540] = &ixgbe_X540_info, [board_X550] = &ixgbe_X550_info, [board_X550EM_x] = &ixgbe_X550EM_x_info, [board_x550em_x_fw] = &ixgbe_x550em_x_fw_info, [board_x550em_a] = &ixgbe_x550em_a_info, [board_x550em_a_fw] = &ixgbe_x550em_a_fw_info, }; /* ixgbe_pci_tbl - PCI Device ID Table * * Wildcard entries (PCI_ANY_ID) should come last * Last entry must be all 0s * * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, * Class, Class Mask, private data (not used) } */ static const struct pci_device_id ixgbe_pci_tbl[] = { {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598), board_82598 }, {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598AF_DUAL_PORT), board_82598 }, {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598AF_SINGLE_PORT), board_82598 }, {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598AT), board_82598 }, {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598AT2), board_82598 }, {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598EB_CX4), board_82598 }, {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598_CX4_DUAL_PORT), board_82598 }, {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598_DA_DUAL_PORT), board_82598 }, {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM), board_82598 }, {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598EB_XF_LR), board_82598 }, {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598EB_SFP_LOM), board_82598 }, {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598_BX), board_82598 }, {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_KX4), board_82599 }, {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_XAUI_LOM), board_82599 }, {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_KR), board_82599 }, {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP), board_82599 }, {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP_EM), board_82599 }, {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_KX4_MEZZ), board_82599 }, {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_CX4), board_82599 }, {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_BACKPLANE_FCOE), board_82599 }, {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP_FCOE), board_82599 }, {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_T3_LOM), board_82599 }, {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_COMBO_BACKPLANE), board_82599 }, {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X540T), board_X540 }, {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP_SF2), board_82599 }, {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_LS), board_82599 }, {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_QSFP_SF_QP), board_82599 }, {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599EN_SFP), board_82599 }, {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP_SF_QP), board_82599 }, {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X540T1), board_X540 }, {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550T), board_X550}, {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550T1), board_X550}, {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_KX4), board_X550EM_x}, {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_XFI), board_X550EM_x}, {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_KR), board_X550EM_x}, {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_10G_T), board_X550EM_x}, {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_SFP), board_X550EM_x}, {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_1G_T), board_x550em_x_fw}, {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_KR), board_x550em_a }, {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_KR_L), board_x550em_a }, {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_SFP_N), board_x550em_a }, {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_SGMII), board_x550em_a }, {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_SGMII_L), board_x550em_a }, {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_10G_T), board_x550em_a}, {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_SFP), board_x550em_a }, {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_1G_T), board_x550em_a_fw }, {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_1G_T_L), board_x550em_a_fw }, /* required last entry */ {0, } }; MODULE_DEVICE_TABLE(pci, ixgbe_pci_tbl); #ifdef CONFIG_IXGBE_DCA static int ixgbe_notify_dca(struct notifier_block *, unsigned long event, void *p); static struct notifier_block dca_notifier = { .notifier_call = ixgbe_notify_dca, .next = NULL, .priority = 0 }; #endif #ifdef CONFIG_PCI_IOV static unsigned int max_vfs; module_param(max_vfs, uint, 0); MODULE_PARM_DESC(max_vfs, "Maximum number of virtual functions to allocate per physical function - default is zero and maximum value is 63. (Deprecated)"); #endif /* CONFIG_PCI_IOV */ static bool allow_unsupported_sfp; module_param(allow_unsupported_sfp, bool, 0); MODULE_PARM_DESC(allow_unsupported_sfp, "Allow unsupported and untested SFP+ modules on 82599-based adapters"); #define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV|NETIF_MSG_PROBE|NETIF_MSG_LINK) static int debug = -1; module_param(debug, int, 0); MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)"); MODULE_AUTHOR("Intel Corporation, <[email protected]>"); MODULE_DESCRIPTION("Intel(R) 10 Gigabit PCI Express Network Driver"); MODULE_LICENSE("GPL v2"); DEFINE_STATIC_KEY_FALSE(ixgbe_xdp_locking_key); EXPORT_SYMBOL(ixgbe_xdp_locking_key); static struct workqueue_struct *ixgbe_wq; static bool ixgbe_check_cfg_remove(struct ixgbe_hw *hw, struct pci_dev *pdev); static void ixgbe_watchdog_link_is_down(struct ixgbe_adapter *); static const struct net_device_ops ixgbe_netdev_ops; static bool netif_is_ixgbe(struct net_device *dev) { return dev && (dev->netdev_ops == &ixgbe_netdev_ops); } static int ixgbe_read_pci_cfg_word_parent(struct ixgbe_adapter *adapter, u32 reg, u16 *value) { struct pci_dev *parent_dev; struct pci_bus *parent_bus; parent_bus = adapter->pdev->bus->parent; if (!parent_bus) return -1; parent_dev = parent_bus->self; if (!parent_dev) return -1; if (!pci_is_pcie(parent_dev)) return -1; pcie_capability_read_word(parent_dev, reg, value); if (*value == IXGBE_FAILED_READ_CFG_WORD && ixgbe_check_cfg_remove(&adapter->hw, parent_dev)) return -1; return 0; } static s32 ixgbe_get_parent_bus_info(struct ixgbe_adapter *adapter) { struct ixgbe_hw *hw = &adapter->hw; u16 link_status = 0; int err; hw->bus.type = ixgbe_bus_type_pci_express; /* Get the negotiated link width and speed from PCI config space of the * parent, as this device is behind a switch */ err = ixgbe_read_pci_cfg_word_parent(adapter, 18, &link_status); /* assume caller will handle error case */ if (err) return err; hw->bus.width = ixgbe_convert_bus_width(link_status); hw->bus.speed = ixgbe_convert_bus_speed(link_status); return 0; } /** * ixgbe_pcie_from_parent - Determine whether PCIe info should come from parent * @hw: hw specific details * * This function is used by probe to determine whether a device's PCI-Express * bandwidth details should be gathered from the parent bus instead of from the * device. Used to ensure that various locations all have the correct device ID * checks. */ static inline bool ixgbe_pcie_from_parent(struct ixgbe_hw *hw) { switch (hw->device_id) { case IXGBE_DEV_ID_82599_SFP_SF_QP: case IXGBE_DEV_ID_82599_QSFP_SF_QP: return true; default: return false; } } static void ixgbe_check_minimum_link(struct ixgbe_adapter *adapter, int expected_gts) { struct ixgbe_hw *hw = &adapter->hw; struct pci_dev *pdev; /* Some devices are not connected over PCIe and thus do not negotiate * speed. These devices do not have valid bus info, and thus any report * we generate may not be correct. */ if (hw->bus.type == ixgbe_bus_type_internal) return; /* determine whether to use the parent device */ if (ixgbe_pcie_from_parent(&adapter->hw)) pdev = adapter->pdev->bus->parent->self; else pdev = adapter->pdev; pcie_print_link_status(pdev); } static void ixgbe_service_event_schedule(struct ixgbe_adapter *adapter) { if (!test_bit(__IXGBE_DOWN, &adapter->state) && !test_bit(__IXGBE_REMOVING, &adapter->state) && !test_and_set_bit(__IXGBE_SERVICE_SCHED, &adapter->state)) queue_work(ixgbe_wq, &adapter->service_task); } static void ixgbe_remove_adapter(struct ixgbe_hw *hw) { struct ixgbe_adapter *adapter = hw->back; if (!hw->hw_addr) return; hw->hw_addr = NULL; e_dev_err("Adapter removed\n"); if (test_bit(__IXGBE_SERVICE_INITED, &adapter->state)) ixgbe_service_event_schedule(adapter); } static u32 ixgbe_check_remove(struct ixgbe_hw *hw, u32 reg) { u8 __iomem *reg_addr; u32 value; int i; reg_addr = READ_ONCE(hw->hw_addr); if (ixgbe_removed(reg_addr)) return IXGBE_FAILED_READ_REG; /* Register read of 0xFFFFFFF can indicate the adapter has been removed, * so perform several status register reads to determine if the adapter * has been removed. */ for (i = 0; i < IXGBE_FAILED_READ_RETRIES; i++) { value = readl(reg_addr + IXGBE_STATUS); if (value != IXGBE_FAILED_READ_REG) break; mdelay(3); } if (value == IXGBE_FAILED_READ_REG) ixgbe_remove_adapter(hw); else value = readl(reg_addr + reg); return value; } /** * ixgbe_read_reg - Read from device register * @hw: hw specific details * @reg: offset of register to read * * Returns : value read or IXGBE_FAILED_READ_REG if removed * * This function is used to read device registers. It checks for device * removal by confirming any read that returns all ones by checking the * status register value for all ones. This function avoids reading from * the hardware if a removal was previously detected in which case it * returns IXGBE_FAILED_READ_REG (all ones). */ u32 ixgbe_read_reg(struct ixgbe_hw *hw, u32 reg) { u8 __iomem *reg_addr = READ_ONCE(hw->hw_addr); u32 value; if (ixgbe_removed(reg_addr)) return IXGBE_FAILED_READ_REG; if (unlikely(hw->phy.nw_mng_if_sel & IXGBE_NW_MNG_IF_SEL_SGMII_ENABLE)) { struct ixgbe_adapter *adapter; int i; for (i = 0; i < 200; ++i) { value = readl(reg_addr + IXGBE_MAC_SGMII_BUSY); if (likely(!value)) goto writes_completed; if (value == IXGBE_FAILED_READ_REG) { ixgbe_remove_adapter(hw); return IXGBE_FAILED_READ_REG; } udelay(5); } adapter = hw->back; e_warn(hw, "register writes incomplete %08x\n", value); } writes_completed: value = readl(reg_addr + reg); if (unlikely(value == IXGBE_FAILED_READ_REG)) value = ixgbe_check_remove(hw, reg); return value; } static bool ixgbe_check_cfg_remove(struct ixgbe_hw *hw, struct pci_dev *pdev) { u16 value; pci_read_config_word(pdev, PCI_VENDOR_ID, &value); if (value == IXGBE_FAILED_READ_CFG_WORD) { ixgbe_remove_adapter(hw); return true; } return false; } u16 ixgbe_read_pci_cfg_word(struct ixgbe_hw *hw, u32 reg) { struct ixgbe_adapter *adapter = hw->back; u16 value; if (ixgbe_removed(hw->hw_addr)) return IXGBE_FAILED_READ_CFG_WORD; pci_read_config_word(adapter->pdev, reg, &value); if (value == IXGBE_FAILED_READ_CFG_WORD && ixgbe_check_cfg_remove(hw, adapter->pdev)) return IXGBE_FAILED_READ_CFG_WORD; return value; } #ifdef CONFIG_PCI_IOV static u32 ixgbe_read_pci_cfg_dword(struct ixgbe_hw *hw, u32 reg) { struct ixgbe_adapter *adapter = hw->back; u32 value; if (ixgbe_removed(hw->hw_addr)) return IXGBE_FAILED_READ_CFG_DWORD; pci_read_config_dword(adapter->pdev, reg, &value); if (value == IXGBE_FAILED_READ_CFG_DWORD && ixgbe_check_cfg_remove(hw, adapter->pdev)) return IXGBE_FAILED_READ_CFG_DWORD; return value; } #endif /* CONFIG_PCI_IOV */ void ixgbe_write_pci_cfg_word(struct ixgbe_hw *hw, u32 reg, u16 value) { struct ixgbe_adapter *adapter = hw->back; if (ixgbe_removed(hw->hw_addr)) return; pci_write_config_word(adapter->pdev, reg, value); } static void ixgbe_service_event_complete(struct ixgbe_adapter *adapter) { BUG_ON(!test_bit(__IXGBE_SERVICE_SCHED, &adapter->state)); /* flush memory to make sure state is correct before next watchdog */ smp_mb__before_atomic(); clear_bit(__IXGBE_SERVICE_SCHED, &adapter->state); } struct ixgbe_reg_info { u32 ofs; char *name; }; static const struct ixgbe_reg_info ixgbe_reg_info_tbl[] = { /* General Registers */ {IXGBE_CTRL, "CTRL"}, {IXGBE_STATUS, "STATUS"}, {IXGBE_CTRL_EXT, "CTRL_EXT"}, /* Interrupt Registers */ {IXGBE_EICR, "EICR"}, /* RX Registers */ {IXGBE_SRRCTL(0), "SRRCTL"}, {IXGBE_DCA_RXCTRL(0), "DRXCTL"}, {IXGBE_RDLEN(0), "RDLEN"}, {IXGBE_RDH(0), "RDH"}, {IXGBE_RDT(0), "RDT"}, {IXGBE_RXDCTL(0), "RXDCTL"}, {IXGBE_RDBAL(0), "RDBAL"}, {IXGBE_RDBAH(0), "RDBAH"}, /* TX Registers */ {IXGBE_TDBAL(0), "TDBAL"}, {IXGBE_TDBAH(0), "TDBAH"}, {IXGBE_TDLEN(0), "TDLEN"}, {IXGBE_TDH(0), "TDH"}, {IXGBE_TDT(0), "TDT"}, {IXGBE_TXDCTL(0), "TXDCTL"}, /* List Terminator */ { .name = NULL } }; /* * ixgbe_regdump - register printout routine */ static void ixgbe_regdump(struct ixgbe_hw *hw, struct ixgbe_reg_info *reginfo) { int i; char rname[16]; u32 regs[64]; switch (reginfo->ofs) { case IXGBE_SRRCTL(0): for (i = 0; i < 64; i++) regs[i] = IXGBE_READ_REG(hw, IXGBE_SRRCTL(i)); break; case IXGBE_DCA_RXCTRL(0): for (i = 0; i < 64; i++) regs[i] = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i)); break; case IXGBE_RDLEN(0): for (i = 0; i < 64; i++) regs[i] = IXGBE_READ_REG(hw, IXGBE_RDLEN(i)); break; case IXGBE_RDH(0): for (i = 0; i < 64; i++) regs[i] = IXGBE_READ_REG(hw, IXGBE_RDH(i)); break; case IXGBE_RDT(0): for (i = 0; i < 64; i++) regs[i] = IXGBE_READ_REG(hw, IXGBE_RDT(i)); break; case IXGBE_RXDCTL(0): for (i = 0; i < 64; i++) regs[i] = IXGBE_READ_REG(hw, IXGBE_RXDCTL(i)); break; case IXGBE_RDBAL(0): for (i = 0; i < 64; i++) regs[i] = IXGBE_READ_REG(hw, IXGBE_RDBAL(i)); break; case IXGBE_RDBAH(0): for (i = 0; i < 64; i++) regs[i] = IXGBE_READ_REG(hw, IXGBE_RDBAH(i)); break; case IXGBE_TDBAL(0): for (i = 0; i < 64; i++) regs[i] = IXGBE_READ_REG(hw, IXGBE_TDBAL(i)); break; case IXGBE_TDBAH(0): for (i = 0; i < 64; i++) regs[i] = IXGBE_READ_REG(hw, IXGBE_TDBAH(i)); break; case IXGBE_TDLEN(0): for (i = 0; i < 64; i++) regs[i] = IXGBE_READ_REG(hw, IXGBE_TDLEN(i)); break; case IXGBE_TDH(0): for (i = 0; i < 64; i++) regs[i] = IXGBE_READ_REG(hw, IXGBE_TDH(i)); break; case IXGBE_TDT(0): for (i = 0; i < 64; i++) regs[i] = IXGBE_READ_REG(hw, IXGBE_TDT(i)); break; case IXGBE_TXDCTL(0): for (i = 0; i < 64; i++) regs[i] = IXGBE_READ_REG(hw, IXGBE_TXDCTL(i)); break; default: pr_info("%-15s %08x\n", reginfo->name, IXGBE_READ_REG(hw, reginfo->ofs)); return; } i = 0; while (i < 64) { int j; char buf[9 * 8 + 1]; char *p = buf; snprintf(rname, 16, "%s[%d-%d]", reginfo->name, i, i + 7); for (j = 0; j < 8; j++) p += sprintf(p, " %08x", regs[i++]); pr_err("%-15s%s\n", rname, buf); } } static void ixgbe_print_buffer(struct ixgbe_ring *ring, int n) { struct ixgbe_tx_buffer *tx_buffer; tx_buffer = &ring->tx_buffer_info[ring->next_to_clean]; pr_info(" %5d %5X %5X %016llX %08X %p %016llX\n", n, ring->next_to_use, ring->next_to_clean, (u64)dma_unmap_addr(tx_buffer, dma), dma_unmap_len(tx_buffer, len), tx_buffer->next_to_watch, (u64)tx_buffer->time_stamp); } /* * ixgbe_dump - Print registers, tx-rings and rx-rings */ static void ixgbe_dump(struct ixgbe_adapter *adapter) { struct net_device *netdev = adapter->netdev; struct ixgbe_hw *hw = &adapter->hw; struct ixgbe_reg_info *reginfo; int n = 0; struct ixgbe_ring *ring; struct ixgbe_tx_buffer *tx_buffer; union ixgbe_adv_tx_desc *tx_desc; struct my_u0 { u64 a; u64 b; } *u0; struct ixgbe_ring *rx_ring; union ixgbe_adv_rx_desc *rx_desc; struct ixgbe_rx_buffer *rx_buffer_info; int i = 0; if (!netif_msg_hw(adapter)) return; /* Print netdevice Info */ if (netdev) { dev_info(&adapter->pdev->dev, "Net device Info\n"); pr_info("Device Name state " "trans_start\n"); pr_info("%-15s %016lX %016lX\n", netdev->name, netdev->state, dev_trans_start(netdev)); } /* Print Registers */ dev_info(&adapter->pdev->dev, "Register Dump\n"); pr_info(" Register Name Value\n"); for (reginfo = (struct ixgbe_reg_info *)ixgbe_reg_info_tbl; reginfo->name; reginfo++) { ixgbe_regdump(hw, reginfo); } /* Print TX Ring Summary */ if (!netdev || !netif_running(netdev)) return; dev_info(&adapter->pdev->dev, "TX Rings Summary\n"); pr_info(" %s %s %s %s\n", "Queue [NTU] [NTC] [bi(ntc)->dma ]", "leng", "ntw", "timestamp"); for (n = 0; n < adapter->num_tx_queues; n++) { ring = adapter->tx_ring[n]; ixgbe_print_buffer(ring, n); } for (n = 0; n < adapter->num_xdp_queues; n++) { ring = adapter->xdp_ring[n]; ixgbe_print_buffer(ring, n); } /* Print TX Rings */ if (!netif_msg_tx_done(adapter)) goto rx_ring_summary; dev_info(&adapter->pdev->dev, "TX Rings Dump\n"); /* Transmit Descriptor Formats * * 82598 Advanced Transmit Descriptor * +--------------------------------------------------------------+ * 0 | Buffer Address [63:0] | * +--------------------------------------------------------------+ * 8 | PAYLEN | POPTS | IDX | STA | DCMD |DTYP | RSV | DTALEN | * +--------------------------------------------------------------+ * 63 46 45 40 39 36 35 32 31 24 23 20 19 0 * * 82598 Advanced Transmit Descriptor (Write-Back Format) * +--------------------------------------------------------------+ * 0 | RSV [63:0] | * +--------------------------------------------------------------+ * 8 | RSV | STA | NXTSEQ | * +--------------------------------------------------------------+ * 63 36 35 32 31 0 * * 82599+ Advanced Transmit Descriptor * +--------------------------------------------------------------+ * 0 | Buffer Address [63:0] | * +--------------------------------------------------------------+ * 8 |PAYLEN |POPTS|CC|IDX |STA |DCMD |DTYP |MAC |RSV |DTALEN | * +--------------------------------------------------------------+ * 63 46 45 40 39 38 36 35 32 31 24 23 20 19 18 17 16 15 0 * * 82599+ Advanced Transmit Descriptor (Write-Back Format) * +--------------------------------------------------------------+ * 0 | RSV [63:0] | * +--------------------------------------------------------------+ * 8 | RSV | STA | RSV | * +--------------------------------------------------------------+ * 63 36 35 32 31 0 */ for (n = 0; n < adapter->num_tx_queues; n++) { ring = adapter->tx_ring[n]; pr_info("------------------------------------\n"); pr_info("TX QUEUE INDEX = %d\n", ring->queue_index); pr_info("------------------------------------\n"); pr_info("%s%s %s %s %s %s\n", "T [desc] [address 63:0 ] ", "[PlPOIdStDDt Ln] [bi->dma ] ", "leng", "ntw", "timestamp", "bi->skb"); for (i = 0; ring->desc && (i < ring->count); i++) { tx_desc = IXGBE_TX_DESC(ring, i); tx_buffer = &ring->tx_buffer_info[i]; u0 = (struct my_u0 *)tx_desc; if (dma_unmap_len(tx_buffer, len) > 0) { const char *ring_desc; if (i == ring->next_to_use && i == ring->next_to_clean) ring_desc = " NTC/U"; else if (i == ring->next_to_use) ring_desc = " NTU"; else if (i == ring->next_to_clean) ring_desc = " NTC"; else ring_desc = ""; pr_info("T [0x%03X] %016llX %016llX %016llX %08X %p %016llX %p%s", i, le64_to_cpu((__force __le64)u0->a), le64_to_cpu((__force __le64)u0->b), (u64)dma_unmap_addr(tx_buffer, dma), dma_unmap_len(tx_buffer, len), tx_buffer->next_to_watch, (u64)tx_buffer->time_stamp, tx_buffer->skb, ring_desc); if (netif_msg_pktdata(adapter) && tx_buffer->skb) print_hex_dump(KERN_INFO, "", DUMP_PREFIX_ADDRESS, 16, 1, tx_buffer->skb->data, dma_unmap_len(tx_buffer, len), true); } } } /* Print RX Rings Summary */ rx_ring_summary: dev_info(&adapter->pdev->dev, "RX Rings Summary\n"); pr_info("Queue [NTU] [NTC]\n"); for (n = 0; n < adapter->num_rx_queues; n++) { rx_ring = adapter->rx_ring[n]; pr_info("%5d %5X %5X\n", n, rx_ring->next_to_use, rx_ring->next_to_clean); } /* Print RX Rings */ if (!netif_msg_rx_status(adapter)) return; dev_info(&adapter->pdev->dev, "RX Rings Dump\n"); /* Receive Descriptor Formats * * 82598 Advanced Receive Descriptor (Read) Format * 63 1 0 * +-----------------------------------------------------+ * 0 | Packet Buffer Address [63:1] |A0/NSE| * +----------------------------------------------+------+ * 8 | Header Buffer Address [63:1] | DD | * +-----------------------------------------------------+ * * * 82598 Advanced Receive Descriptor (Write-Back) Format * * 63 48 47 32 31 30 21 20 16 15 4 3 0 * +------------------------------------------------------+ * 0 | RSS Hash / |SPH| HDR_LEN | RSV |Packet| RSS | * | Packet | IP | | | | Type | Type | * | Checksum | Ident | | | | | | * +------------------------------------------------------+ * 8 | VLAN Tag | Length | Extended Error | Extended Status | * +------------------------------------------------------+ * 63 48 47 32 31 20 19 0 * * 82599+ Advanced Receive Descriptor (Read) Format * 63 1 0 * +-----------------------------------------------------+ * 0 | Packet Buffer Address [63:1] |A0/NSE| * +----------------------------------------------+------+ * 8 | Header Buffer Address [63:1] | DD | * +-----------------------------------------------------+ * * * 82599+ Advanced Receive Descriptor (Write-Back) Format * * 63 48 47 32 31 30 21 20 17 16 4 3 0 * +------------------------------------------------------+ * 0 |RSS / Frag Checksum|SPH| HDR_LEN |RSC- |Packet| RSS | * |/ RTT / PCoE_PARAM | | | CNT | Type | Type | * |/ Flow Dir Flt ID | | | | | | * +------------------------------------------------------+ * 8 | VLAN Tag | Length |Extended Error| Xtnd Status/NEXTP | * +------------------------------------------------------+ * 63 48 47 32 31 20 19 0 */ for (n = 0; n < adapter->num_rx_queues; n++) { rx_ring = adapter->rx_ring[n]; pr_info("------------------------------------\n"); pr_info("RX QUEUE INDEX = %d\n", rx_ring->queue_index); pr_info("------------------------------------\n"); pr_info("%s%s%s\n", "R [desc] [ PktBuf A0] ", "[ HeadBuf DD] [bi->dma ] [bi->skb ] ", "<-- Adv Rx Read format"); pr_info("%s%s%s\n", "RWB[desc] [PcsmIpSHl PtRs] ", "[vl er S cks ln] ---------------- [bi->skb ] ", "<-- Adv Rx Write-Back format"); for (i = 0; i < rx_ring->count; i++) { const char *ring_desc; if (i == rx_ring->next_to_use) ring_desc = " NTU"; else if (i == rx_ring->next_to_clean) ring_desc = " NTC"; else ring_desc = ""; rx_buffer_info = &rx_ring->rx_buffer_info[i]; rx_desc = IXGBE_RX_DESC(rx_ring, i); u0 = (struct my_u0 *)rx_desc; if (rx_desc->wb.upper.length) { /* Descriptor Done */ pr_info("RWB[0x%03X] %016llX %016llX ---------------- %p%s\n", i, le64_to_cpu((__force __le64)u0->a), le64_to_cpu((__force __le64)u0->b), rx_buffer_info->skb, ring_desc); } else { pr_info("R [0x%03X] %016llX %016llX %016llX %p%s\n", i, le64_to_cpu((__force __le64)u0->a), le64_to_cpu((__force __le64)u0->b), (u64)rx_buffer_info->dma, rx_buffer_info->skb, ring_desc); if (netif_msg_pktdata(adapter) && rx_buffer_info->dma) { print_hex_dump(KERN_INFO, "", DUMP_PREFIX_ADDRESS, 16, 1, page_address(rx_buffer_info->page) + rx_buffer_info->page_offset, ixgbe_rx_bufsz(rx_ring), true); } } } } } static void ixgbe_release_hw_control(struct ixgbe_adapter *adapter) { u32 ctrl_ext; /* Let firmware take over control of h/w */ ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT); IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT, ctrl_ext & ~IXGBE_CTRL_EXT_DRV_LOAD); } static void ixgbe_get_hw_control(struct ixgbe_adapter *adapter) { u32 ctrl_ext; /* Let firmware know the driver has taken over */ ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT); IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT, ctrl_ext | IXGBE_CTRL_EXT_DRV_LOAD); } /** * ixgbe_set_ivar - set the IVAR registers, mapping interrupt causes to vectors * @adapter: pointer to adapter struct * @direction: 0 for Rx, 1 for Tx, -1 for other causes * @queue: queue to map the corresponding interrupt to * @msix_vector: the vector to map to the corresponding queue * */ static void ixgbe_set_ivar(struct ixgbe_adapter *adapter, s8 direction, u8 queue, u8 msix_vector) { u32 ivar, index; struct ixgbe_hw *hw = &adapter->hw; switch (hw->mac.type) { case ixgbe_mac_82598EB: msix_vector |= IXGBE_IVAR_ALLOC_VAL; if (direction == -1) direction = 0; index = (((direction * 64) + queue) >> 2) & 0x1F; ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(index)); ivar &= ~(0xFF << (8 * (queue & 0x3))); ivar |= (msix_vector << (8 * (queue & 0x3))); IXGBE_WRITE_REG(hw, IXGBE_IVAR(index), ivar); break; case ixgbe_mac_82599EB: case ixgbe_mac_X540: case ixgbe_mac_X550: case ixgbe_mac_X550EM_x: case ixgbe_mac_x550em_a: if (direction == -1) { /* other causes */ msix_vector |= IXGBE_IVAR_ALLOC_VAL; index = ((queue & 1) * 8); ivar = IXGBE_READ_REG(&adapter->hw, IXGBE_IVAR_MISC); ivar &= ~(0xFF << index); ivar |= (msix_vector << index); IXGBE_WRITE_REG(&adapter->hw, IXGBE_IVAR_MISC, ivar); break; } else { /* tx or rx causes */ msix_vector |= IXGBE_IVAR_ALLOC_VAL; index = ((16 * (queue & 1)) + (8 * direction)); ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(queue >> 1)); ivar &= ~(0xFF << index); ivar |= (msix_vector << index); IXGBE_WRITE_REG(hw, IXGBE_IVAR(queue >> 1), ivar); break; } default: break; } } void ixgbe_irq_rearm_queues(struct ixgbe_adapter *adapter, u64 qmask) { u32 mask; switch (adapter->hw.mac.type) { case ixgbe_mac_82598EB: mask = (IXGBE_EIMS_RTX_QUEUE & qmask); IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, mask); break; case ixgbe_mac_82599EB: case ixgbe_mac_X540: case ixgbe_mac_X550: case ixgbe_mac_X550EM_x: case ixgbe_mac_x550em_a: mask = (qmask & 0xFFFFFFFF); IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(0), mask); mask = (qmask >> 32); IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(1), mask); break; default: break; } } static void ixgbe_update_xoff_rx_lfc(struct ixgbe_adapter *adapter) { struct ixgbe_hw *hw = &adapter->hw; struct ixgbe_hw_stats *hwstats = &adapter->stats; int i; u32 data; if ((hw->fc.current_mode != ixgbe_fc_full) && (hw->fc.current_mode != ixgbe_fc_rx_pause)) return; switch (hw->mac.type) { case ixgbe_mac_82598EB: data = IXGBE_READ_REG(hw, IXGBE_LXOFFRXC); break; default: data = IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT); } hwstats->lxoffrxc += data; /* refill credits (no tx hang) if we received xoff */ if (!data) return; for (i = 0; i < adapter->num_tx_queues; i++) clear_bit(__IXGBE_HANG_CHECK_ARMED, &adapter->tx_ring[i]->state); for (i = 0; i < adapter->num_xdp_queues; i++) clear_bit(__IXGBE_HANG_CHECK_ARMED, &adapter->xdp_ring[i]->state); } static void ixgbe_update_xoff_received(struct ixgbe_adapter *adapter) { struct ixgbe_hw *hw = &adapter->hw; struct ixgbe_hw_stats *hwstats = &adapter->stats; u32 xoff[8] = {0}; u8 tc; int i; bool pfc_en = adapter->dcb_cfg.pfc_mode_enable; if (adapter->ixgbe_ieee_pfc) pfc_en |= !!(adapter->ixgbe_ieee_pfc->pfc_en); if (!(adapter->flags & IXGBE_FLAG_DCB_ENABLED) || !pfc_en) { ixgbe_update_xoff_rx_lfc(adapter); return; } /* update stats for each tc, only valid with PFC enabled */ for (i = 0; i < MAX_TX_PACKET_BUFFERS; i++) { u32 pxoffrxc; switch (hw->mac.type) { case ixgbe_mac_82598EB: pxoffrxc = IXGBE_READ_REG(hw, IXGBE_PXOFFRXC(i)); break; default: pxoffrxc = IXGBE_READ_REG(hw, IXGBE_PXOFFRXCNT(i)); } hwstats->pxoffrxc[i] += pxoffrxc; /* Get the TC for given UP */ tc = netdev_get_prio_tc_map(adapter->netdev, i); xoff[tc] += pxoffrxc; } /* disarm tx queues that have received xoff frames */ for (i = 0; i < adapter->num_tx_queues; i++) { struct ixgbe_ring *tx_ring = adapter->tx_ring[i]; tc = tx_ring->dcb_tc; if (xoff[tc]) clear_bit(__IXGBE_HANG_CHECK_ARMED, &tx_ring->state); } for (i = 0; i < adapter->num_xdp_queues; i++) { struct ixgbe_ring *xdp_ring = adapter->xdp_ring[i]; tc = xdp_ring->dcb_tc; if (xoff[tc]) clear_bit(__IXGBE_HANG_CHECK_ARMED, &xdp_ring->state); } } static u64 ixgbe_get_tx_completed(struct ixgbe_ring *ring) { return ring->stats.packets; } static u64 ixgbe_get_tx_pending(struct ixgbe_ring *ring) { unsigned int head, tail; head = ring->next_to_clean; tail = ring->next_to_use; return ((head <= tail) ? tail : tail + ring->count) - head; } static inline bool ixgbe_check_tx_hang(struct ixgbe_ring *tx_ring) { u32 tx_done = ixgbe_get_tx_completed(tx_ring); u32 tx_done_old = tx_ring->tx_stats.tx_done_old; u32 tx_pending = ixgbe_get_tx_pending(tx_ring); clear_check_for_tx_hang(tx_ring); /* * Check for a hung queue, but be thorough. This verifies * that a transmit has been completed since the previous * check AND there is at least one packet pending. The * ARMED bit is set to indicate a potential hang. The * bit is cleared if a pause frame is received to remove * false hang detection due to PFC or 802.3x frames. By * requiring this to fail twice we avoid races with * pfc clearing the ARMED bit and conditions where we * run the check_tx_hang logic with a transmit completion * pending but without time to complete it yet. */ if (tx_done_old == tx_done && tx_pending) /* make sure it is true for two checks in a row */ return test_and_set_bit(__IXGBE_HANG_CHECK_ARMED, &tx_ring->state); /* update completed stats and continue */ tx_ring->tx_stats.tx_done_old = tx_done; /* reset the countdown */ clear_bit(__IXGBE_HANG_CHECK_ARMED, &tx_ring->state); return false; } /** * ixgbe_tx_timeout_reset - initiate reset due to Tx timeout * @adapter: driver private struct **/ static void ixgbe_tx_timeout_reset(struct ixgbe_adapter *adapter) { /* Do the reset outside of interrupt context */ if (!test_bit(__IXGBE_DOWN, &adapter->state)) { set_bit(__IXGBE_RESET_REQUESTED, &adapter->state); e_warn(drv, "initiating reset due to tx timeout\n"); ixgbe_service_event_schedule(adapter); } } /** * ixgbe_tx_maxrate - callback to set the maximum per-queue bitrate * @netdev: network interface device structure * @queue_index: Tx queue to set * @maxrate: desired maximum transmit bitrate **/ static int ixgbe_tx_maxrate(struct net_device *netdev, int queue_index, u32 maxrate) { struct ixgbe_adapter *adapter = netdev_priv(netdev); struct ixgbe_hw *hw = &adapter->hw; u32 bcnrc_val = ixgbe_link_mbps(adapter); if (!maxrate) return 0; /* Calculate the rate factor values to set */ bcnrc_val <<= IXGBE_RTTBCNRC_RF_INT_SHIFT; bcnrc_val /= maxrate; /* clear everything but the rate factor */ bcnrc_val &= IXGBE_RTTBCNRC_RF_INT_MASK | IXGBE_RTTBCNRC_RF_DEC_MASK; /* enable the rate scheduler */ bcnrc_val |= IXGBE_RTTBCNRC_RS_ENA; IXGBE_WRITE_REG(hw, IXGBE_RTTDQSEL, queue_index); IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRC, bcnrc_val); return 0; } /** * ixgbe_clean_tx_irq - Reclaim resources after transmit completes * @q_vector: structure containing interrupt and ring information * @tx_ring: tx ring to clean * @napi_budget: Used to determine if we are in netpoll **/ static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector, struct ixgbe_ring *tx_ring, int napi_budget) { struct ixgbe_adapter *adapter = q_vector->adapter; struct ixgbe_tx_buffer *tx_buffer; union ixgbe_adv_tx_desc *tx_desc; unsigned int total_bytes = 0, total_packets = 0, total_ipsec = 0; unsigned int budget = q_vector->tx.work_limit; unsigned int i = tx_ring->next_to_clean; struct netdev_queue *txq; if (test_bit(__IXGBE_DOWN, &adapter->state)) return true; tx_buffer = &tx_ring->tx_buffer_info[i]; tx_desc = IXGBE_TX_DESC(tx_ring, i); i -= tx_ring->count; do { union ixgbe_adv_tx_desc *eop_desc = tx_buffer->next_to_watch; /* if next_to_watch is not set then there is no work pending */ if (!eop_desc) break; /* prevent any other reads prior to eop_desc */ smp_rmb(); /* if DD is not set pending work has not been completed */ if (!(eop_desc->wb.status & cpu_to_le32(IXGBE_TXD_STAT_DD))) break; /* clear next_to_watch to prevent false hangs */ tx_buffer->next_to_watch = NULL; /* update the statistics for this packet */ total_bytes += tx_buffer->bytecount; total_packets += tx_buffer->gso_segs; if (tx_buffer->tx_flags & IXGBE_TX_FLAGS_IPSEC) total_ipsec++; /* free the skb */ if (ring_is_xdp(tx_ring)) xdp_return_frame(tx_buffer->xdpf); else napi_consume_skb(tx_buffer->skb, napi_budget); /* unmap skb header data */ dma_unmap_single(tx_ring->dev, dma_unmap_addr(tx_buffer, dma), dma_unmap_len(tx_buffer, len), DMA_TO_DEVICE); /* clear tx_buffer data */ dma_unmap_len_set(tx_buffer, len, 0); /* unmap remaining buffers */ while (tx_desc != eop_desc) { tx_buffer++; tx_desc++; i++; if (unlikely(!i)) { i -= tx_ring->count; tx_buffer = tx_ring->tx_buffer_info; tx_desc = IXGBE_TX_DESC(tx_ring, 0); } /* unmap any remaining paged data */ if (dma_unmap_len(tx_buffer, len)) { dma_unmap_page(tx_ring->dev, dma_unmap_addr(tx_buffer, dma), dma_unmap_len(tx_buffer, len), DMA_TO_DEVICE); dma_unmap_len_set(tx_buffer, len, 0); } } /* move us one more past the eop_desc for start of next pkt */ tx_buffer++; tx_desc++; i++; if (unlikely(!i)) { i -= tx_ring->count; tx_buffer = tx_ring->tx_buffer_info; tx_desc = IXGBE_TX_DESC(tx_ring, 0); } /* issue prefetch for next Tx descriptor */ prefetch(tx_desc); /* update budget accounting */ budget--; } while (likely(budget)); i += tx_ring->count; tx_ring->next_to_clean = i; u64_stats_update_begin(&tx_ring->syncp); tx_ring->stats.bytes += total_bytes; tx_ring->stats.packets += total_packets; u64_stats_update_end(&tx_ring->syncp); q_vector->tx.total_bytes += total_bytes; q_vector->tx.total_packets += total_packets; adapter->tx_ipsec += total_ipsec; if (check_for_tx_hang(tx_ring) && ixgbe_check_tx_hang(tx_ring)) { /* schedule immediate reset if we believe we hung */ struct ixgbe_hw *hw = &adapter->hw; e_err(drv, "Detected Tx Unit Hang %s\n" " Tx Queue <%d>\n" " TDH, TDT <%x>, <%x>\n" " next_to_use <%x>\n" " next_to_clean <%x>\n" "tx_buffer_info[next_to_clean]\n" " time_stamp <%lx>\n" " jiffies <%lx>\n", ring_is_xdp(tx_ring) ? "(XDP)" : "", tx_ring->queue_index, IXGBE_READ_REG(hw, IXGBE_TDH(tx_ring->reg_idx)), IXGBE_READ_REG(hw, IXGBE_TDT(tx_ring->reg_idx)), tx_ring->next_to_use, i, tx_ring->tx_buffer_info[i].time_stamp, jiffies); if (!ring_is_xdp(tx_ring)) netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index); e_info(probe, "tx hang %d detected on queue %d, resetting adapter\n", adapter->tx_timeout_count + 1, tx_ring->queue_index); /* schedule immediate reset if we believe we hung */ ixgbe_tx_timeout_reset(adapter); /* the adapter is about to reset, no point in enabling stuff */ return true; } if (ring_is_xdp(tx_ring)) return !!budget; #define TX_WAKE_THRESHOLD (DESC_NEEDED * 2) txq = netdev_get_tx_queue(tx_ring->netdev, tx_ring->queue_index); if (!__netif_txq_completed_wake(txq, total_packets, total_bytes, ixgbe_desc_unused(tx_ring), TX_WAKE_THRESHOLD, !netif_carrier_ok(tx_ring->netdev) || test_bit(__IXGBE_DOWN, &adapter->state))) ++tx_ring->tx_stats.restart_queue; return !!budget; } #ifdef CONFIG_IXGBE_DCA static void ixgbe_update_tx_dca(struct ixgbe_adapter *adapter, struct ixgbe_ring *tx_ring, int cpu) { struct ixgbe_hw *hw = &adapter->hw; u32 txctrl = 0; u16 reg_offset; if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) txctrl = dca3_get_tag(tx_ring->dev, cpu); switch (hw->mac.type) { case ixgbe_mac_82598EB: reg_offset = IXGBE_DCA_TXCTRL(tx_ring->reg_idx); break; case ixgbe_mac_82599EB: case ixgbe_mac_X540: reg_offset = IXGBE_DCA_TXCTRL_82599(tx_ring->reg_idx); txctrl <<= IXGBE_DCA_TXCTRL_CPUID_SHIFT_82599; break; default: /* for unknown hardware do not write register */ return; } /* * We can enable relaxed ordering for reads, but not writes when * DCA is enabled. This is due to a known issue in some chipsets * which will cause the DCA tag to be cleared. */ txctrl |= IXGBE_DCA_TXCTRL_DESC_RRO_EN | IXGBE_DCA_TXCTRL_DATA_RRO_EN | IXGBE_DCA_TXCTRL_DESC_DCA_EN; IXGBE_WRITE_REG(hw, reg_offset, txctrl); } static void ixgbe_update_rx_dca(struct ixgbe_adapter *adapter, struct ixgbe_ring *rx_ring, int cpu) { struct ixgbe_hw *hw = &adapter->hw; u32 rxctrl = 0; u8 reg_idx = rx_ring->reg_idx; if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) rxctrl = dca3_get_tag(rx_ring->dev, cpu); switch (hw->mac.type) { case ixgbe_mac_82599EB: case ixgbe_mac_X540: rxctrl <<= IXGBE_DCA_RXCTRL_CPUID_SHIFT_82599; break; default: break; } /* * We can enable relaxed ordering for reads, but not writes when * DCA is enabled. This is due to a known issue in some chipsets * which will cause the DCA tag to be cleared. */ rxctrl |= IXGBE_DCA_RXCTRL_DESC_RRO_EN | IXGBE_DCA_RXCTRL_DATA_DCA_EN | IXGBE_DCA_RXCTRL_DESC_DCA_EN; IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(reg_idx), rxctrl); } static void ixgbe_update_dca(struct ixgbe_q_vector *q_vector) { struct ixgbe_adapter *adapter = q_vector->adapter; struct ixgbe_ring *ring; int cpu = get_cpu(); if (q_vector->cpu == cpu) goto out_no_update; ixgbe_for_each_ring(ring, q_vector->tx) ixgbe_update_tx_dca(adapter, ring, cpu); ixgbe_for_each_ring(ring, q_vector->rx) ixgbe_update_rx_dca(adapter, ring, cpu); q_vector->cpu = cpu; out_no_update: put_cpu(); } static void ixgbe_setup_dca(struct ixgbe_adapter *adapter) { int i; /* always use CB2 mode, difference is masked in the CB driver */ if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_CTRL, IXGBE_DCA_CTRL_DCA_MODE_CB2); else IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_CTRL, IXGBE_DCA_CTRL_DCA_DISABLE); for (i = 0; i < adapter->num_q_vectors; i++) { adapter->q_vector[i]->cpu = -1; ixgbe_update_dca(adapter->q_vector[i]); } } static int __ixgbe_notify_dca(struct device *dev, void *data) { struct ixgbe_adapter *adapter = dev_get_drvdata(dev); unsigned long event = *(unsigned long *)data; if (!(adapter->flags & IXGBE_FLAG_DCA_CAPABLE)) return 0; switch (event) { case DCA_PROVIDER_ADD: /* if we're already enabled, don't do it again */ if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) break; if (dca_add_requester(dev) == 0) { adapter->flags |= IXGBE_FLAG_DCA_ENABLED; IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_CTRL, IXGBE_DCA_CTRL_DCA_MODE_CB2); break; } fallthrough; /* DCA is disabled. */ case DCA_PROVIDER_REMOVE: if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) { dca_remove_requester(dev); adapter->flags &= ~IXGBE_FLAG_DCA_ENABLED; IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_CTRL, IXGBE_DCA_CTRL_DCA_DISABLE); } break; } return 0; } #endif /* CONFIG_IXGBE_DCA */ #define IXGBE_RSS_L4_TYPES_MASK \ ((1ul << IXGBE_RXDADV_RSSTYPE_IPV4_TCP) | \ (1ul << IXGBE_RXDADV_RSSTYPE_IPV4_UDP) | \ (1ul << IXGBE_RXDADV_RSSTYPE_IPV6_TCP) | \ (1ul << IXGBE_RXDADV_RSSTYPE_IPV6_UDP)) static inline void ixgbe_rx_hash(struct ixgbe_ring *ring, union ixgbe_adv_rx_desc *rx_desc, struct sk_buff *skb) { u16 rss_type; if (!(ring->netdev->features & NETIF_F_RXHASH)) return; rss_type = le16_to_cpu(rx_desc->wb.lower.lo_dword.hs_rss.pkt_info) & IXGBE_RXDADV_RSSTYPE_MASK; if (!rss_type) return; skb_set_hash(skb, le32_to_cpu(rx_desc->wb.lower.hi_dword.rss), (IXGBE_RSS_L4_TYPES_MASK & (1ul << rss_type)) ? PKT_HASH_TYPE_L4 : PKT_HASH_TYPE_L3); } #ifdef IXGBE_FCOE /** * ixgbe_rx_is_fcoe - check the rx desc for incoming pkt type * @ring: structure containing ring specific data * @rx_desc: advanced rx descriptor * * Returns : true if it is FCoE pkt */ static inline bool ixgbe_rx_is_fcoe(struct ixgbe_ring *ring, union ixgbe_adv_rx_desc *rx_desc) { __le16 pkt_info = rx_desc->wb.lower.lo_dword.hs_rss.pkt_info; return test_bit(__IXGBE_RX_FCOE, &ring->state) && ((pkt_info & cpu_to_le16(IXGBE_RXDADV_PKTTYPE_ETQF_MASK)) == (cpu_to_le16(IXGBE_ETQF_FILTER_FCOE << IXGBE_RXDADV_PKTTYPE_ETQF_SHIFT))); } #endif /* IXGBE_FCOE */ /** * ixgbe_rx_checksum - indicate in skb if hw indicated a good cksum * @ring: structure containing ring specific data * @rx_desc: current Rx descriptor being processed * @skb: skb currently being received and modified **/ static inline void ixgbe_rx_checksum(struct ixgbe_ring *ring, union ixgbe_adv_rx_desc *rx_desc, struct sk_buff *skb) { __le16 pkt_info = rx_desc->wb.lower.lo_dword.hs_rss.pkt_info; bool encap_pkt = false; skb_checksum_none_assert(skb); /* Rx csum disabled */ if (!(ring->netdev->features & NETIF_F_RXCSUM)) return; /* check for VXLAN and Geneve packets */ if (pkt_info & cpu_to_le16(IXGBE_RXDADV_PKTTYPE_VXLAN)) { encap_pkt = true; skb->encapsulation = 1; } /* if IP and error */ if (ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_IPCS) && ixgbe_test_staterr(rx_desc, IXGBE_RXDADV_ERR_IPE)) { ring->rx_stats.csum_err++; return; } if (!ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_L4CS)) return; if (ixgbe_test_staterr(rx_desc, IXGBE_RXDADV_ERR_TCPE)) { /* * 82599 errata, UDP frames with a 0 checksum can be marked as * checksum errors. */ if ((pkt_info & cpu_to_le16(IXGBE_RXDADV_PKTTYPE_UDP)) && test_bit(__IXGBE_RX_CSUM_UDP_ZERO_ERR, &ring->state)) return; ring->rx_stats.csum_err++; return; } /* It must be a TCP or UDP packet with a valid checksum */ skb->ip_summed = CHECKSUM_UNNECESSARY; if (encap_pkt) { if (!ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_OUTERIPCS)) return; if (ixgbe_test_staterr(rx_desc, IXGBE_RXDADV_ERR_OUTERIPER)) { skb->ip_summed = CHECKSUM_NONE; return; } /* If we checked the outer header let the stack know */ skb->csum_level = 1; } } static unsigned int ixgbe_rx_offset(struct ixgbe_ring *rx_ring) { return ring_uses_build_skb(rx_ring) ? IXGBE_SKB_PAD : 0; } static bool ixgbe_alloc_mapped_page(struct ixgbe_ring *rx_ring, struct ixgbe_rx_buffer *bi) { struct page *page = bi->page; dma_addr_t dma; /* since we are recycling buffers we should seldom need to alloc */ if (likely(page)) return true; /* alloc new page for storage */ page = dev_alloc_pages(ixgbe_rx_pg_order(rx_ring)); if (unlikely(!page)) { rx_ring->rx_stats.alloc_rx_page_failed++; return false; } /* map page for use */ dma = dma_map_page_attrs(rx_ring->dev, page, 0, ixgbe_rx_pg_size(rx_ring), DMA_FROM_DEVICE, IXGBE_RX_DMA_ATTR); /* * if mapping failed free memory back to system since * there isn't much point in holding memory we can't use */ if (dma_mapping_error(rx_ring->dev, dma)) { __free_pages(page, ixgbe_rx_pg_order(rx_ring)); rx_ring->rx_stats.alloc_rx_page_failed++; return false; } bi->dma = dma; bi->page = page; bi->page_offset = rx_ring->rx_offset; page_ref_add(page, USHRT_MAX - 1); bi->pagecnt_bias = USHRT_MAX; rx_ring->rx_stats.alloc_rx_page++; return true; } /** * ixgbe_alloc_rx_buffers - Replace used receive buffers * @rx_ring: ring to place buffers on * @cleaned_count: number of buffers to replace **/ void ixgbe_alloc_rx_buffers(struct ixgbe_ring *rx_ring, u16 cleaned_count) { union ixgbe_adv_rx_desc *rx_desc; struct ixgbe_rx_buffer *bi; u16 i = rx_ring->next_to_use; u16 bufsz; /* nothing to do */ if (!cleaned_count) return; rx_desc = IXGBE_RX_DESC(rx_ring, i); bi = &rx_ring->rx_buffer_info[i]; i -= rx_ring->count; bufsz = ixgbe_rx_bufsz(rx_ring); do { if (!ixgbe_alloc_mapped_page(rx_ring, bi)) break; /* sync the buffer for use by the device */ dma_sync_single_range_for_device(rx_ring->dev, bi->dma, bi->page_offset, bufsz, DMA_FROM_DEVICE); /* * Refresh the desc even if buffer_addrs didn't change * because each write-back erases this info. */ rx_desc->read.pkt_addr = cpu_to_le64(bi->dma + bi->page_offset); rx_desc++; bi++; i++; if (unlikely(!i)) { rx_desc = IXGBE_RX_DESC(rx_ring, 0); bi = rx_ring->rx_buffer_info; i -= rx_ring->count; } /* clear the length for the next_to_use descriptor */ rx_desc->wb.upper.length = 0; cleaned_count--; } while (cleaned_count); i += rx_ring->count; if (rx_ring->next_to_use != i) { rx_ring->next_to_use = i; /* update next to alloc since we have filled the ring */ rx_ring->next_to_alloc = i; /* Force memory writes to complete before letting h/w * know there are new descriptors to fetch. (Only * applicable for weak-ordered memory model archs, * such as IA-64). */ wmb(); writel(i, rx_ring->tail); } } static void ixgbe_set_rsc_gso_size(struct ixgbe_ring *ring, struct sk_buff *skb) { u16 hdr_len = skb_headlen(skb); /* set gso_size to avoid messing up TCP MSS */ skb_shinfo(skb)->gso_size = DIV_ROUND_UP((skb->len - hdr_len), IXGBE_CB(skb)->append_cnt); skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4; } static void ixgbe_update_rsc_stats(struct ixgbe_ring *rx_ring, struct sk_buff *skb) { /* if append_cnt is 0 then frame is not RSC */ if (!IXGBE_CB(skb)->append_cnt) return; rx_ring->rx_stats.rsc_count += IXGBE_CB(skb)->append_cnt; rx_ring->rx_stats.rsc_flush++; ixgbe_set_rsc_gso_size(rx_ring, skb); /* gso_size is computed using append_cnt so always clear it last */ IXGBE_CB(skb)->append_cnt = 0; } /** * ixgbe_process_skb_fields - Populate skb header fields from Rx descriptor * @rx_ring: rx descriptor ring packet is being transacted on * @rx_desc: pointer to the EOP Rx descriptor * @skb: pointer to current skb being populated * * This function checks the ring, descriptor, and packet information in * order to populate the hash, checksum, VLAN, timestamp, protocol, and * other fields within the skb. **/ void ixgbe_process_skb_fields(struct ixgbe_ring *rx_ring, union ixgbe_adv_rx_desc *rx_desc, struct sk_buff *skb) { struct net_device *dev = rx_ring->netdev; u32 flags = rx_ring->q_vector->adapter->flags; ixgbe_update_rsc_stats(rx_ring, skb); ixgbe_rx_hash(rx_ring, rx_desc, skb); ixgbe_rx_checksum(rx_ring, rx_desc, skb); if (unlikely(flags & IXGBE_FLAG_RX_HWTSTAMP_ENABLED)) ixgbe_ptp_rx_hwtstamp(rx_ring, rx_desc, skb); if ((dev->features & NETIF_F_HW_VLAN_CTAG_RX) && ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_VP)) { u16 vid = le16_to_cpu(rx_desc->wb.upper.vlan); __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid); } if (ixgbe_test_staterr(rx_desc, IXGBE_RXDADV_STAT_SECP)) ixgbe_ipsec_rx(rx_ring, rx_desc, skb); /* record Rx queue, or update MACVLAN statistics */ if (netif_is_ixgbe(dev)) skb_record_rx_queue(skb, rx_ring->queue_index); else macvlan_count_rx(netdev_priv(dev), skb->len + ETH_HLEN, true, false); skb->protocol = eth_type_trans(skb, dev); } void ixgbe_rx_skb(struct ixgbe_q_vector *q_vector, struct sk_buff *skb) { napi_gro_receive(&q_vector->napi, skb); } /** * ixgbe_is_non_eop - process handling of non-EOP buffers * @rx_ring: Rx ring being processed * @rx_desc: Rx descriptor for current buffer * @skb: Current socket buffer containing buffer in progress * * This function updates next to clean. If the buffer is an EOP buffer * this function exits returning false, otherwise it will place the * sk_buff in the next buffer to be chained and return true indicating * that this is in fact a non-EOP buffer. **/ static bool ixgbe_is_non_eop(struct ixgbe_ring *rx_ring, union ixgbe_adv_rx_desc *rx_desc, struct sk_buff *skb) { u32 ntc = rx_ring->next_to_clean + 1; /* fetch, update, and store next to clean */ ntc = (ntc < rx_ring->count) ? ntc : 0; rx_ring->next_to_clean = ntc; prefetch(IXGBE_RX_DESC(rx_ring, ntc)); /* update RSC append count if present */ if (ring_is_rsc_enabled(rx_ring)) { __le32 rsc_enabled = rx_desc->wb.lower.lo_dword.data & cpu_to_le32(IXGBE_RXDADV_RSCCNT_MASK); if (unlikely(rsc_enabled)) { u32 rsc_cnt = le32_to_cpu(rsc_enabled); rsc_cnt >>= IXGBE_RXDADV_RSCCNT_SHIFT; IXGBE_CB(skb)->append_cnt += rsc_cnt - 1; /* update ntc based on RSC value */ ntc = le32_to_cpu(rx_desc->wb.upper.status_error); ntc &= IXGBE_RXDADV_NEXTP_MASK; ntc >>= IXGBE_RXDADV_NEXTP_SHIFT; } } /* if we are the last buffer then there is nothing else to do */ if (likely(ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_EOP))) return false; /* place skb in next buffer to be received */ rx_ring->rx_buffer_info[ntc].skb = skb; rx_ring->rx_stats.non_eop_descs++; return true; } /** * ixgbe_pull_tail - ixgbe specific version of skb_pull_tail * @rx_ring: rx descriptor ring packet is being transacted on * @skb: pointer to current skb being adjusted * * This function is an ixgbe specific version of __pskb_pull_tail. The * main difference between this version and the original function is that * this function can make several assumptions about the state of things * that allow for significant optimizations versus the standard function. * As a result we can do things like drop a frag and maintain an accurate * truesize for the skb. */ static void ixgbe_pull_tail(struct ixgbe_ring *rx_ring, struct sk_buff *skb) { skb_frag_t *frag = &skb_shinfo(skb)->frags[0]; unsigned char *va; unsigned int pull_len; /* * it is valid to use page_address instead of kmap since we are * working with pages allocated out of the lomem pool per * alloc_page(GFP_ATOMIC) */ va = skb_frag_address(frag); /* * we need the header to contain the greater of either ETH_HLEN or * 60 bytes if the skb->len is less than 60 for skb_pad. */ pull_len = eth_get_headlen(skb->dev, va, IXGBE_RX_HDR_SIZE); /* align pull length to size of long to optimize memcpy performance */ skb_copy_to_linear_data(skb, va, ALIGN(pull_len, sizeof(long))); /* update all of the pointers */ skb_frag_size_sub(frag, pull_len); skb_frag_off_add(frag, pull_len); skb->data_len -= pull_len; skb->tail += pull_len; } /** * ixgbe_dma_sync_frag - perform DMA sync for first frag of SKB * @rx_ring: rx descriptor ring packet is being transacted on * @skb: pointer to current skb being updated * * This function provides a basic DMA sync up for the first fragment of an * skb. The reason for doing this is that the first fragment cannot be * unmapped until we have reached the end of packet descriptor for a buffer * chain. */ static void ixgbe_dma_sync_frag(struct ixgbe_ring *rx_ring, struct sk_buff *skb) { if (ring_uses_build_skb(rx_ring)) { unsigned long mask = (unsigned long)ixgbe_rx_pg_size(rx_ring) - 1; unsigned long offset = (unsigned long)(skb->data) & mask; dma_sync_single_range_for_cpu(rx_ring->dev, IXGBE_CB(skb)->dma, offset, skb_headlen(skb), DMA_FROM_DEVICE); } else { skb_frag_t *frag = &skb_shinfo(skb)->frags[0]; dma_sync_single_range_for_cpu(rx_ring->dev, IXGBE_CB(skb)->dma, skb_frag_off(frag), skb_frag_size(frag), DMA_FROM_DEVICE); } /* If the page was released, just unmap it. */ if (unlikely(IXGBE_CB(skb)->page_released)) { dma_unmap_page_attrs(rx_ring->dev, IXGBE_CB(skb)->dma, ixgbe_rx_pg_size(rx_ring), DMA_FROM_DEVICE, IXGBE_RX_DMA_ATTR); } } /** * ixgbe_cleanup_headers - Correct corrupted or empty headers * @rx_ring: rx descriptor ring packet is being transacted on * @rx_desc: pointer to the EOP Rx descriptor * @skb: pointer to current skb being fixed * * Check if the skb is valid in the XDP case it will be an error pointer. * Return true in this case to abort processing and advance to next * descriptor. * * Check for corrupted packet headers caused by senders on the local L2 * embedded NIC switch not setting up their Tx Descriptors right. These * should be very rare. * * Also address the case where we are pulling data in on pages only * and as such no data is present in the skb header. * * In addition if skb is not at least 60 bytes we need to pad it so that * it is large enough to qualify as a valid Ethernet frame. * * Returns true if an error was encountered and skb was freed. **/ bool ixgbe_cleanup_headers(struct ixgbe_ring *rx_ring, union ixgbe_adv_rx_desc *rx_desc, struct sk_buff *skb) { struct net_device *netdev = rx_ring->netdev; /* XDP packets use error pointer so abort at this point */ if (IS_ERR(skb)) return true; /* Verify netdev is present, and that packet does not have any * errors that would be unacceptable to the netdev. */ if (!netdev || (unlikely(ixgbe_test_staterr(rx_desc, IXGBE_RXDADV_ERR_FRAME_ERR_MASK) && !(netdev->features & NETIF_F_RXALL)))) { dev_kfree_skb_any(skb); return true; } /* place header in linear portion of buffer */ if (!skb_headlen(skb)) ixgbe_pull_tail(rx_ring, skb); #ifdef IXGBE_FCOE /* do not attempt to pad FCoE Frames as this will disrupt DDP */ if (ixgbe_rx_is_fcoe(rx_ring, rx_desc)) return false; #endif /* if eth_skb_pad returns an error the skb was freed */ if (eth_skb_pad(skb)) return true; return false; } /** * ixgbe_reuse_rx_page - page flip buffer and store it back on the ring * @rx_ring: rx descriptor ring to store buffers on * @old_buff: donor buffer to have page reused * * Synchronizes page for reuse by the adapter **/ static void ixgbe_reuse_rx_page(struct ixgbe_ring *rx_ring, struct ixgbe_rx_buffer *old_buff) { struct ixgbe_rx_buffer *new_buff; u16 nta = rx_ring->next_to_alloc; new_buff = &rx_ring->rx_buffer_info[nta]; /* update, and store next to alloc */ nta++; rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0; /* Transfer page from old buffer to new buffer. * Move each member individually to avoid possible store * forwarding stalls and unnecessary copy of skb. */ new_buff->dma = old_buff->dma; new_buff->page = old_buff->page; new_buff->page_offset = old_buff->page_offset; new_buff->pagecnt_bias = old_buff->pagecnt_bias; } static bool ixgbe_can_reuse_rx_page(struct ixgbe_rx_buffer *rx_buffer, int rx_buffer_pgcnt) { unsigned int pagecnt_bias = rx_buffer->pagecnt_bias; struct page *page = rx_buffer->page; /* avoid re-using remote and pfmemalloc pages */ if (!dev_page_is_reusable(page)) return false; #if (PAGE_SIZE < 8192) /* if we are only owner of page we can reuse it */ if (unlikely((rx_buffer_pgcnt - pagecnt_bias) > 1)) return false; #else /* The last offset is a bit aggressive in that we assume the * worst case of FCoE being enabled and using a 3K buffer. * However this should have minimal impact as the 1K extra is * still less than one buffer in size. */ #define IXGBE_LAST_OFFSET \ (SKB_WITH_OVERHEAD(PAGE_SIZE) - IXGBE_RXBUFFER_3K) if (rx_buffer->page_offset > IXGBE_LAST_OFFSET) return false; #endif /* If we have drained the page fragment pool we need to update * the pagecnt_bias and page count so that we fully restock the * number of references the driver holds. */ if (unlikely(pagecnt_bias == 1)) { page_ref_add(page, USHRT_MAX - 1); rx_buffer->pagecnt_bias = USHRT_MAX; } return true; } /** * ixgbe_add_rx_frag - Add contents of Rx buffer to sk_buff * @rx_ring: rx descriptor ring to transact packets on * @rx_buffer: buffer containing page to add * @skb: sk_buff to place the data into * @size: size of data in rx_buffer * * This function will add the data contained in rx_buffer->page to the skb. * This is done either through a direct copy if the data in the buffer is * less than the skb header size, otherwise it will just attach the page as * a frag to the skb. * * The function will then update the page offset if necessary and return * true if the buffer can be reused by the adapter. **/ static void ixgbe_add_rx_frag(struct ixgbe_ring *rx_ring, struct ixgbe_rx_buffer *rx_buffer, struct sk_buff *skb, unsigned int size) { #if (PAGE_SIZE < 8192) unsigned int truesize = ixgbe_rx_pg_size(rx_ring) / 2; #else unsigned int truesize = rx_ring->rx_offset ? SKB_DATA_ALIGN(rx_ring->rx_offset + size) : SKB_DATA_ALIGN(size); #endif skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_buffer->page, rx_buffer->page_offset, size, truesize); #if (PAGE_SIZE < 8192) rx_buffer->page_offset ^= truesize; #else rx_buffer->page_offset += truesize; #endif } static struct ixgbe_rx_buffer *ixgbe_get_rx_buffer(struct ixgbe_ring *rx_ring, union ixgbe_adv_rx_desc *rx_desc, struct sk_buff **skb, const unsigned int size, int *rx_buffer_pgcnt) { struct ixgbe_rx_buffer *rx_buffer; rx_buffer = &rx_ring->rx_buffer_info[rx_ring->next_to_clean]; *rx_buffer_pgcnt = #if (PAGE_SIZE < 8192) page_count(rx_buffer->page); #else 0; #endif prefetchw(rx_buffer->page); *skb = rx_buffer->skb; /* Delay unmapping of the first packet. It carries the header * information, HW may still access the header after the writeback. * Only unmap it when EOP is reached */ if (!ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_EOP)) { if (!*skb) goto skip_sync; } else { if (*skb) ixgbe_dma_sync_frag(rx_ring, *skb); } /* we are reusing so sync this buffer for CPU use */ dma_sync_single_range_for_cpu(rx_ring->dev, rx_buffer->dma, rx_buffer->page_offset, size, DMA_FROM_DEVICE); skip_sync: rx_buffer->pagecnt_bias--; return rx_buffer; } static void ixgbe_put_rx_buffer(struct ixgbe_ring *rx_ring, struct ixgbe_rx_buffer *rx_buffer, struct sk_buff *skb, int rx_buffer_pgcnt) { if (ixgbe_can_reuse_rx_page(rx_buffer, rx_buffer_pgcnt)) { /* hand second half of page back to the ring */ ixgbe_reuse_rx_page(rx_ring, rx_buffer); } else { if (!IS_ERR(skb) && IXGBE_CB(skb)->dma == rx_buffer->dma) { /* the page has been released from the ring */ IXGBE_CB(skb)->page_released = true; } else { /* we are not reusing the buffer so unmap it */ dma_unmap_page_attrs(rx_ring->dev, rx_buffer->dma, ixgbe_rx_pg_size(rx_ring), DMA_FROM_DEVICE, IXGBE_RX_DMA_ATTR); } __page_frag_cache_drain(rx_buffer->page, rx_buffer->pagecnt_bias); } /* clear contents of rx_buffer */ rx_buffer->page = NULL; rx_buffer->skb = NULL; } static struct sk_buff *ixgbe_construct_skb(struct ixgbe_ring *rx_ring, struct ixgbe_rx_buffer *rx_buffer, struct xdp_buff *xdp, union ixgbe_adv_rx_desc *rx_desc) { unsigned int size = xdp->data_end - xdp->data; #if (PAGE_SIZE < 8192) unsigned int truesize = ixgbe_rx_pg_size(rx_ring) / 2; #else unsigned int truesize = SKB_DATA_ALIGN(xdp->data_end - xdp->data_hard_start); #endif struct sk_buff *skb; /* prefetch first cache line of first page */ net_prefetch(xdp->data); /* Note, we get here by enabling legacy-rx via: * * ethtool --set-priv-flags <dev> legacy-rx on * * In this mode, we currently get 0 extra XDP headroom as * opposed to having legacy-rx off, where we process XDP * packets going to stack via ixgbe_build_skb(). The latter * provides us currently with 192 bytes of headroom. * * For ixgbe_construct_skb() mode it means that the * xdp->data_meta will always point to xdp->data, since * the helper cannot expand the head. Should this ever * change in future for legacy-rx mode on, then lets also * add xdp->data_meta handling here. */ /* allocate a skb to store the frags */ skb = napi_alloc_skb(&rx_ring->q_vector->napi, IXGBE_RX_HDR_SIZE); if (unlikely(!skb)) return NULL; if (size > IXGBE_RX_HDR_SIZE) { if (!ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_EOP)) IXGBE_CB(skb)->dma = rx_buffer->dma; skb_add_rx_frag(skb, 0, rx_buffer->page, xdp->data - page_address(rx_buffer->page), size, truesize); #if (PAGE_SIZE < 8192) rx_buffer->page_offset ^= truesize; #else rx_buffer->page_offset += truesize; #endif } else { memcpy(__skb_put(skb, size), xdp->data, ALIGN(size, sizeof(long))); rx_buffer->pagecnt_bias++; } return skb; } static struct sk_buff *ixgbe_build_skb(struct ixgbe_ring *rx_ring, struct ixgbe_rx_buffer *rx_buffer, struct xdp_buff *xdp, union ixgbe_adv_rx_desc *rx_desc) { unsigned int metasize = xdp->data - xdp->data_meta; #if (PAGE_SIZE < 8192) unsigned int truesize = ixgbe_rx_pg_size(rx_ring) / 2; #else unsigned int truesize = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) + SKB_DATA_ALIGN(xdp->data_end - xdp->data_hard_start); #endif struct sk_buff *skb; /* Prefetch first cache line of first page. If xdp->data_meta * is unused, this points extactly as xdp->data, otherwise we * likely have a consumer accessing first few bytes of meta * data, and then actual data. */ net_prefetch(xdp->data_meta); /* build an skb to around the page buffer */ skb = napi_build_skb(xdp->data_hard_start, truesize); if (unlikely(!skb)) return NULL; /* update pointers within the skb to store the data */ skb_reserve(skb, xdp->data - xdp->data_hard_start); __skb_put(skb, xdp->data_end - xdp->data); if (metasize) skb_metadata_set(skb, metasize); /* record DMA address if this is the start of a chain of buffers */ if (!ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_EOP)) IXGBE_CB(skb)->dma = rx_buffer->dma; /* update buffer offset */ #if (PAGE_SIZE < 8192) rx_buffer->page_offset ^= truesize; #else rx_buffer->page_offset += truesize; #endif return skb; } static struct sk_buff *ixgbe_run_xdp(struct ixgbe_adapter *adapter, struct ixgbe_ring *rx_ring, struct xdp_buff *xdp) { int err, result = IXGBE_XDP_PASS; struct bpf_prog *xdp_prog; struct ixgbe_ring *ring; struct xdp_frame *xdpf; u32 act; xdp_prog = READ_ONCE(rx_ring->xdp_prog); if (!xdp_prog) goto xdp_out; prefetchw(xdp->data_hard_start); /* xdp_frame write */ act = bpf_prog_run_xdp(xdp_prog, xdp); switch (act) { case XDP_PASS: break; case XDP_TX: xdpf = xdp_convert_buff_to_frame(xdp); if (unlikely(!xdpf)) goto out_failure; ring = ixgbe_determine_xdp_ring(adapter); if (static_branch_unlikely(&ixgbe_xdp_locking_key)) spin_lock(&ring->tx_lock); result = ixgbe_xmit_xdp_ring(ring, xdpf); if (static_branch_unlikely(&ixgbe_xdp_locking_key)) spin_unlock(&ring->tx_lock); if (result == IXGBE_XDP_CONSUMED) goto out_failure; break; case XDP_REDIRECT: err = xdp_do_redirect(adapter->netdev, xdp, xdp_prog); if (err) goto out_failure; result = IXGBE_XDP_REDIR; break; default: bpf_warn_invalid_xdp_action(rx_ring->netdev, xdp_prog, act); fallthrough; case XDP_ABORTED: out_failure: trace_xdp_exception(rx_ring->netdev, xdp_prog, act); fallthrough; /* handle aborts by dropping packet */ case XDP_DROP: result = IXGBE_XDP_CONSUMED; break; } xdp_out: return ERR_PTR(-result); } static unsigned int ixgbe_rx_frame_truesize(struct ixgbe_ring *rx_ring, unsigned int size) { unsigned int truesize; #if (PAGE_SIZE < 8192) truesize = ixgbe_rx_pg_size(rx_ring) / 2; /* Must be power-of-2 */ #else truesize = rx_ring->rx_offset ? SKB_DATA_ALIGN(rx_ring->rx_offset + size) + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) : SKB_DATA_ALIGN(size); #endif return truesize; } static void ixgbe_rx_buffer_flip(struct ixgbe_ring *rx_ring, struct ixgbe_rx_buffer *rx_buffer, unsigned int size) { unsigned int truesize = ixgbe_rx_frame_truesize(rx_ring, size); #if (PAGE_SIZE < 8192) rx_buffer->page_offset ^= truesize; #else rx_buffer->page_offset += truesize; #endif } /** * ixgbe_clean_rx_irq - Clean completed descriptors from Rx ring - bounce buf * @q_vector: structure containing interrupt and ring information * @rx_ring: rx descriptor ring to transact packets on * @budget: Total limit on number of packets to process * * This function provides a "bounce buffer" approach to Rx interrupt * processing. The advantage to this is that on systems that have * expensive overhead for IOMMU access this provides a means of avoiding * it by maintaining the mapping of the page to the syste. * * Returns amount of work completed **/ static int ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector, struct ixgbe_ring *rx_ring, const int budget) { unsigned int total_rx_bytes = 0, total_rx_packets = 0, frame_sz = 0; struct ixgbe_adapter *adapter = q_vector->adapter; #ifdef IXGBE_FCOE int ddp_bytes; unsigned int mss = 0; #endif /* IXGBE_FCOE */ u16 cleaned_count = ixgbe_desc_unused(rx_ring); unsigned int offset = rx_ring->rx_offset; unsigned int xdp_xmit = 0; struct xdp_buff xdp; /* Frame size depend on rx_ring setup when PAGE_SIZE=4K */ #if (PAGE_SIZE < 8192) frame_sz = ixgbe_rx_frame_truesize(rx_ring, 0); #endif xdp_init_buff(&xdp, frame_sz, &rx_ring->xdp_rxq); while (likely(total_rx_packets < budget)) { union ixgbe_adv_rx_desc *rx_desc; struct ixgbe_rx_buffer *rx_buffer; struct sk_buff *skb; int rx_buffer_pgcnt; unsigned int size; /* return some buffers to hardware, one at a time is too slow */ if (cleaned_count >= IXGBE_RX_BUFFER_WRITE) { ixgbe_alloc_rx_buffers(rx_ring, cleaned_count); cleaned_count = 0; } rx_desc = IXGBE_RX_DESC(rx_ring, rx_ring->next_to_clean); size = le16_to_cpu(rx_desc->wb.upper.length); if (!size) break; /* This memory barrier is needed to keep us from reading * any other fields out of the rx_desc until we know the * descriptor has been written back */ dma_rmb(); rx_buffer = ixgbe_get_rx_buffer(rx_ring, rx_desc, &skb, size, &rx_buffer_pgcnt); /* retrieve a buffer from the ring */ if (!skb) { unsigned char *hard_start; hard_start = page_address(rx_buffer->page) + rx_buffer->page_offset - offset; xdp_prepare_buff(&xdp, hard_start, offset, size, true); xdp_buff_clear_frags_flag(&xdp); #if (PAGE_SIZE > 4096) /* At larger PAGE_SIZE, frame_sz depend on len size */ xdp.frame_sz = ixgbe_rx_frame_truesize(rx_ring, size); #endif skb = ixgbe_run_xdp(adapter, rx_ring, &xdp); } if (IS_ERR(skb)) { unsigned int xdp_res = -PTR_ERR(skb); if (xdp_res & (IXGBE_XDP_TX | IXGBE_XDP_REDIR)) { xdp_xmit |= xdp_res; ixgbe_rx_buffer_flip(rx_ring, rx_buffer, size); } else { rx_buffer->pagecnt_bias++; } total_rx_packets++; total_rx_bytes += size; } else if (skb) { ixgbe_add_rx_frag(rx_ring, rx_buffer, skb, size); } else if (ring_uses_build_skb(rx_ring)) { skb = ixgbe_build_skb(rx_ring, rx_buffer, &xdp, rx_desc); } else { skb = ixgbe_construct_skb(rx_ring, rx_buffer, &xdp, rx_desc); } /* exit if we failed to retrieve a buffer */ if (!skb) { rx_ring->rx_stats.alloc_rx_buff_failed++; rx_buffer->pagecnt_bias++; break; } ixgbe_put_rx_buffer(rx_ring, rx_buffer, skb, rx_buffer_pgcnt); cleaned_count++; /* place incomplete frames back on ring for completion */ if (ixgbe_is_non_eop(rx_ring, rx_desc, skb)) continue; /* verify the packet layout is correct */ if (ixgbe_cleanup_headers(rx_ring, rx_desc, skb)) continue; /* probably a little skewed due to removing CRC */ total_rx_bytes += skb->len; /* populate checksum, timestamp, VLAN, and protocol */ ixgbe_process_skb_fields(rx_ring, rx_desc, skb); #ifdef IXGBE_FCOE /* if ddp, not passing to ULD unless for FCP_RSP or error */ if (ixgbe_rx_is_fcoe(rx_ring, rx_desc)) { ddp_bytes = ixgbe_fcoe_ddp(adapter, rx_desc, skb); /* include DDPed FCoE data */ if (ddp_bytes > 0) { if (!mss) { mss = rx_ring->netdev->mtu - sizeof(struct fcoe_hdr) - sizeof(struct fc_frame_header) - sizeof(struct fcoe_crc_eof); if (mss > 512) mss &= ~511; } total_rx_bytes += ddp_bytes; total_rx_packets += DIV_ROUND_UP(ddp_bytes, mss); } if (!ddp_bytes) { dev_kfree_skb_any(skb); continue; } } #endif /* IXGBE_FCOE */ ixgbe_rx_skb(q_vector, skb); /* update budget accounting */ total_rx_packets++; } if (xdp_xmit & IXGBE_XDP_REDIR) xdp_do_flush_map(); if (xdp_xmit & IXGBE_XDP_TX) { struct ixgbe_ring *ring = ixgbe_determine_xdp_ring(adapter); ixgbe_xdp_ring_update_tail_locked(ring); } u64_stats_update_begin(&rx_ring->syncp); rx_ring->stats.packets += total_rx_packets; rx_ring->stats.bytes += total_rx_bytes; u64_stats_update_end(&rx_ring->syncp); q_vector->rx.total_packets += total_rx_packets; q_vector->rx.total_bytes += total_rx_bytes; return total_rx_packets; } /** * ixgbe_configure_msix - Configure MSI-X hardware * @adapter: board private structure * * ixgbe_configure_msix sets up the hardware to properly generate MSI-X * interrupts. **/ static void ixgbe_configure_msix(struct ixgbe_adapter *adapter) { struct ixgbe_q_vector *q_vector; int v_idx; u32 mask; /* Populate MSIX to EITR Select */ if (adapter->num_vfs > 32) { u32 eitrsel = BIT(adapter->num_vfs - 32) - 1; IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITRSEL, eitrsel); } /* * Populate the IVAR table and set the ITR values to the * corresponding register. */ for (v_idx = 0; v_idx < adapter->num_q_vectors; v_idx++) { struct ixgbe_ring *ring; q_vector = adapter->q_vector[v_idx]; ixgbe_for_each_ring(ring, q_vector->rx) ixgbe_set_ivar(adapter, 0, ring->reg_idx, v_idx); ixgbe_for_each_ring(ring, q_vector->tx) ixgbe_set_ivar(adapter, 1, ring->reg_idx, v_idx); ixgbe_write_eitr(q_vector); } switch (adapter->hw.mac.type) { case ixgbe_mac_82598EB: ixgbe_set_ivar(adapter, -1, IXGBE_IVAR_OTHER_CAUSES_INDEX, v_idx); break; case ixgbe_mac_82599EB: case ixgbe_mac_X540: case ixgbe_mac_X550: case ixgbe_mac_X550EM_x: case ixgbe_mac_x550em_a: ixgbe_set_ivar(adapter, -1, 1, v_idx); break; default: break; } IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITR(v_idx), 1950); /* set up to autoclear timer, and the vectors */ mask = IXGBE_EIMS_ENABLE_MASK; mask &= ~(IXGBE_EIMS_OTHER | IXGBE_EIMS_MAILBOX | IXGBE_EIMS_LSC); IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIAC, mask); } /** * ixgbe_update_itr - update the dynamic ITR value based on statistics * @q_vector: structure containing interrupt and ring information * @ring_container: structure containing ring performance data * * Stores a new ITR value based on packets and byte * counts during the last interrupt. The advantage of per interrupt * computation is faster updates and more accurate ITR for the current * traffic pattern. Constants in this function were computed * based on theoretical maximum wire speed and thresholds were set based * on testing data as well as attempting to minimize response time * while increasing bulk throughput. **/ static void ixgbe_update_itr(struct ixgbe_q_vector *q_vector, struct ixgbe_ring_container *ring_container) { unsigned int itr = IXGBE_ITR_ADAPTIVE_MIN_USECS | IXGBE_ITR_ADAPTIVE_LATENCY; unsigned int avg_wire_size, packets, bytes; unsigned long next_update = jiffies; /* If we don't have any rings just leave ourselves set for maximum * possible latency so we take ourselves out of the equation. */ if (!ring_container->ring) return; /* If we didn't update within up to 1 - 2 jiffies we can assume * that either packets are coming in so slow there hasn't been * any work, or that there is so much work that NAPI is dealing * with interrupt moderation and we don't need to do anything. */ if (time_after(next_update, ring_container->next_update)) goto clear_counts; packets = ring_container->total_packets; /* We have no packets to actually measure against. This means * either one of the other queues on this vector is active or * we are a Tx queue doing TSO with too high of an interrupt rate. * * When this occurs just tick up our delay by the minimum value * and hope that this extra delay will prevent us from being called * without any work on our queue. */ if (!packets) { itr = (q_vector->itr >> 2) + IXGBE_ITR_ADAPTIVE_MIN_INC; if (itr > IXGBE_ITR_ADAPTIVE_MAX_USECS) itr = IXGBE_ITR_ADAPTIVE_MAX_USECS; itr += ring_container->itr & IXGBE_ITR_ADAPTIVE_LATENCY; goto clear_counts; } bytes = ring_container->total_bytes; /* If packets are less than 4 or bytes are less than 9000 assume * insufficient data to use bulk rate limiting approach. We are * likely latency driven. */ if (packets < 4 && bytes < 9000) { itr = IXGBE_ITR_ADAPTIVE_LATENCY; goto adjust_by_size; } /* Between 4 and 48 we can assume that our current interrupt delay * is only slightly too low. As such we should increase it by a small * fixed amount. */ if (packets < 48) { itr = (q_vector->itr >> 2) + IXGBE_ITR_ADAPTIVE_MIN_INC; if (itr > IXGBE_ITR_ADAPTIVE_MAX_USECS) itr = IXGBE_ITR_ADAPTIVE_MAX_USECS; goto clear_counts; } /* Between 48 and 96 is our "goldilocks" zone where we are working * out "just right". Just report that our current ITR is good for us. */ if (packets < 96) { itr = q_vector->itr >> 2; goto clear_counts; } /* If packet count is 96 or greater we are likely looking at a slight * overrun of the delay we want. Try halving our delay to see if that * will cut the number of packets in half per interrupt. */ if (packets < 256) { itr = q_vector->itr >> 3; if (itr < IXGBE_ITR_ADAPTIVE_MIN_USECS) itr = IXGBE_ITR_ADAPTIVE_MIN_USECS; goto clear_counts; } /* The paths below assume we are dealing with a bulk ITR since number * of packets is 256 or greater. We are just going to have to compute * a value and try to bring the count under control, though for smaller * packet sizes there isn't much we can do as NAPI polling will likely * be kicking in sooner rather than later. */ itr = IXGBE_ITR_ADAPTIVE_BULK; adjust_by_size: /* If packet counts are 256 or greater we can assume we have a gross * overestimation of what the rate should be. Instead of trying to fine * tune it just use the formula below to try and dial in an exact value * give the current packet size of the frame. */ avg_wire_size = bytes / packets; /* The following is a crude approximation of: * wmem_default / (size + overhead) = desired_pkts_per_int * rate / bits_per_byte / (size + ethernet overhead) = pkt_rate * (desired_pkt_rate / pkt_rate) * usecs_per_sec = ITR value * * Assuming wmem_default is 212992 and overhead is 640 bytes per * packet, (256 skb, 64 headroom, 320 shared info), we can reduce the * formula down to * * (170 * (size + 24)) / (size + 640) = ITR * * We first do some math on the packet size and then finally bitshift * by 8 after rounding up. We also have to account for PCIe link speed * difference as ITR scales based on this. */ if (avg_wire_size <= 60) { /* Start at 50k ints/sec */ avg_wire_size = 5120; } else if (avg_wire_size <= 316) { /* 50K ints/sec to 16K ints/sec */ avg_wire_size *= 40; avg_wire_size += 2720; } else if (avg_wire_size <= 1084) { /* 16K ints/sec to 9.2K ints/sec */ avg_wire_size *= 15; avg_wire_size += 11452; } else if (avg_wire_size < 1968) { /* 9.2K ints/sec to 8K ints/sec */ avg_wire_size *= 5; avg_wire_size += 22420; } else { /* plateau at a limit of 8K ints/sec */ avg_wire_size = 32256; } /* If we are in low latency mode half our delay which doubles the rate * to somewhere between 100K to 16K ints/sec */ if (itr & IXGBE_ITR_ADAPTIVE_LATENCY) avg_wire_size >>= 1; /* Resultant value is 256 times larger than it needs to be. This * gives us room to adjust the value as needed to either increase * or decrease the value based on link speeds of 10G, 2.5G, 1G, etc. * * Use addition as we have already recorded the new latency flag * for the ITR value. */ switch (q_vector->adapter->link_speed) { case IXGBE_LINK_SPEED_10GB_FULL: case IXGBE_LINK_SPEED_100_FULL: default: itr += DIV_ROUND_UP(avg_wire_size, IXGBE_ITR_ADAPTIVE_MIN_INC * 256) * IXGBE_ITR_ADAPTIVE_MIN_INC; break; case IXGBE_LINK_SPEED_2_5GB_FULL: case IXGBE_LINK_SPEED_1GB_FULL: case IXGBE_LINK_SPEED_10_FULL: if (avg_wire_size > 8064) avg_wire_size = 8064; itr += DIV_ROUND_UP(avg_wire_size, IXGBE_ITR_ADAPTIVE_MIN_INC * 64) * IXGBE_ITR_ADAPTIVE_MIN_INC; break; } clear_counts: /* write back value */ ring_container->itr = itr; /* next update should occur within next jiffy */ ring_container->next_update = next_update + 1; ring_container->total_bytes = 0; ring_container->total_packets = 0; } /** * ixgbe_write_eitr - write EITR register in hardware specific way * @q_vector: structure containing interrupt and ring information * * This function is made to be called by ethtool and by the driver * when it needs to update EITR registers at runtime. Hardware * specific quirks/differences are taken care of here. */ void ixgbe_write_eitr(struct ixgbe_q_vector *q_vector) { struct ixgbe_adapter *adapter = q_vector->adapter; struct ixgbe_hw *hw = &adapter->hw; int v_idx = q_vector->v_idx; u32 itr_reg = q_vector->itr & IXGBE_MAX_EITR; switch (adapter->hw.mac.type) { case ixgbe_mac_82598EB: /* must write high and low 16 bits to reset counter */ itr_reg |= (itr_reg << 16); break; case ixgbe_mac_82599EB: case ixgbe_mac_X540: case ixgbe_mac_X550: case ixgbe_mac_X550EM_x: case ixgbe_mac_x550em_a: /* * set the WDIS bit to not clear the timer bits and cause an * immediate assertion of the interrupt */ itr_reg |= IXGBE_EITR_CNT_WDIS; break; default: break; } IXGBE_WRITE_REG(hw, IXGBE_EITR(v_idx), itr_reg); } static void ixgbe_set_itr(struct ixgbe_q_vector *q_vector) { u32 new_itr; ixgbe_update_itr(q_vector, &q_vector->tx); ixgbe_update_itr(q_vector, &q_vector->rx); /* use the smallest value of new ITR delay calculations */ new_itr = min(q_vector->rx.itr, q_vector->tx.itr); /* Clear latency flag if set, shift into correct position */ new_itr &= ~IXGBE_ITR_ADAPTIVE_LATENCY; new_itr <<= 2; if (new_itr != q_vector->itr) { /* save the algorithm value here */ q_vector->itr = new_itr; ixgbe_write_eitr(q_vector); } } /** * ixgbe_check_overtemp_subtask - check for over temperature * @adapter: pointer to adapter **/ static void ixgbe_check_overtemp_subtask(struct ixgbe_adapter *adapter) { struct ixgbe_hw *hw = &adapter->hw; u32 eicr = adapter->interrupt_event; s32 rc; if (test_bit(__IXGBE_DOWN, &adapter->state)) return; if (!(adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_EVENT)) return; adapter->flags2 &= ~IXGBE_FLAG2_TEMP_SENSOR_EVENT; switch (hw->device_id) { case IXGBE_DEV_ID_82599_T3_LOM: /* * Since the warning interrupt is for both ports * we don't have to check if: * - This interrupt wasn't for our port. * - We may have missed the interrupt so always have to * check if we got a LSC */ if (!(eicr & IXGBE_EICR_GPI_SDP0_8259X) && !(eicr & IXGBE_EICR_LSC)) return; if (!(eicr & IXGBE_EICR_LSC) && hw->mac.ops.check_link) { u32 speed; bool link_up = false; hw->mac.ops.check_link(hw, &speed, &link_up, false); if (link_up) return; } /* Check if this is not due to overtemp */ if (hw->phy.ops.check_overtemp(hw) != IXGBE_ERR_OVERTEMP) return; break; case IXGBE_DEV_ID_X550EM_A_1G_T: case IXGBE_DEV_ID_X550EM_A_1G_T_L: rc = hw->phy.ops.check_overtemp(hw); if (rc != IXGBE_ERR_OVERTEMP) return; break; default: if (adapter->hw.mac.type >= ixgbe_mac_X540) return; if (!(eicr & IXGBE_EICR_GPI_SDP0(hw))) return; break; } e_crit(drv, "%s\n", ixgbe_overheat_msg); adapter->interrupt_event = 0; } static void ixgbe_check_fan_failure(struct ixgbe_adapter *adapter, u32 eicr) { struct ixgbe_hw *hw = &adapter->hw; if ((adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE) && (eicr & IXGBE_EICR_GPI_SDP1(hw))) { e_crit(probe, "Fan has stopped, replace the adapter\n"); /* write to clear the interrupt */ IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1(hw)); } } static void ixgbe_check_overtemp_event(struct ixgbe_adapter *adapter, u32 eicr) { struct ixgbe_hw *hw = &adapter->hw; if (!(adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE)) return; switch (adapter->hw.mac.type) { case ixgbe_mac_82599EB: /* * Need to check link state so complete overtemp check * on service task */ if (((eicr & IXGBE_EICR_GPI_SDP0(hw)) || (eicr & IXGBE_EICR_LSC)) && (!test_bit(__IXGBE_DOWN, &adapter->state))) { adapter->interrupt_event = eicr; adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_EVENT; ixgbe_service_event_schedule(adapter); return; } return; case ixgbe_mac_x550em_a: if (eicr & IXGBE_EICR_GPI_SDP0_X550EM_a) { adapter->interrupt_event = eicr; adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_EVENT; ixgbe_service_event_schedule(adapter); IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, IXGBE_EICR_GPI_SDP0_X550EM_a); IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP0_X550EM_a); } return; case ixgbe_mac_X550: case ixgbe_mac_X540: if (!(eicr & IXGBE_EICR_TS)) return; break; default: return; } e_crit(drv, "%s\n", ixgbe_overheat_msg); } static inline bool ixgbe_is_sfp(struct ixgbe_hw *hw) { switch (hw->mac.type) { case ixgbe_mac_82598EB: if (hw->phy.type == ixgbe_phy_nl) return true; return false; case ixgbe_mac_82599EB: case ixgbe_mac_X550EM_x: case ixgbe_mac_x550em_a: switch (hw->mac.ops.get_media_type(hw)) { case ixgbe_media_type_fiber: case ixgbe_media_type_fiber_qsfp: return true; default: return false; } default: return false; } } static void ixgbe_check_sfp_event(struct ixgbe_adapter *adapter, u32 eicr) { struct ixgbe_hw *hw = &adapter->hw; u32 eicr_mask = IXGBE_EICR_GPI_SDP2(hw); if (!ixgbe_is_sfp(hw)) return; /* Later MAC's use different SDP */ if (hw->mac.type >= ixgbe_mac_X540) eicr_mask = IXGBE_EICR_GPI_SDP0_X540; if (eicr & eicr_mask) { /* Clear the interrupt */ IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr_mask); if (!test_bit(__IXGBE_DOWN, &adapter->state)) { adapter->flags2 |= IXGBE_FLAG2_SFP_NEEDS_RESET; adapter->sfp_poll_time = 0; ixgbe_service_event_schedule(adapter); } } if (adapter->hw.mac.type == ixgbe_mac_82599EB && (eicr & IXGBE_EICR_GPI_SDP1(hw))) { /* Clear the interrupt */ IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1(hw)); if (!test_bit(__IXGBE_DOWN, &adapter->state)) { adapter->flags |= IXGBE_FLAG_NEED_LINK_CONFIG; ixgbe_service_event_schedule(adapter); } } } static void ixgbe_check_lsc(struct ixgbe_adapter *adapter) { struct ixgbe_hw *hw = &adapter->hw; adapter->lsc_int++; adapter->flags |= IXGBE_FLAG_NEED_LINK_UPDATE; adapter->link_check_timeout = jiffies; if (!test_bit(__IXGBE_DOWN, &adapter->state)) { IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_LSC); IXGBE_WRITE_FLUSH(hw); ixgbe_service_event_schedule(adapter); } } static inline void ixgbe_irq_enable_queues(struct ixgbe_adapter *adapter, u64 qmask) { u32 mask; struct ixgbe_hw *hw = &adapter->hw; switch (hw->mac.type) { case ixgbe_mac_82598EB: mask = (IXGBE_EIMS_RTX_QUEUE & qmask); IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask); break; case ixgbe_mac_82599EB: case ixgbe_mac_X540: case ixgbe_mac_X550: case ixgbe_mac_X550EM_x: case ixgbe_mac_x550em_a: mask = (qmask & 0xFFFFFFFF); if (mask) IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(0), mask); mask = (qmask >> 32); if (mask) IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(1), mask); break; default: break; } /* skip the flush */ } /** * ixgbe_irq_enable - Enable default interrupt generation settings * @adapter: board private structure * @queues: enable irqs for queues * @flush: flush register write **/ static inline void ixgbe_irq_enable(struct ixgbe_adapter *adapter, bool queues, bool flush) { struct ixgbe_hw *hw = &adapter->hw; u32 mask = (IXGBE_EIMS_ENABLE_MASK & ~IXGBE_EIMS_RTX_QUEUE); /* don't reenable LSC while waiting for link */ if (adapter->flags & IXGBE_FLAG_NEED_LINK_UPDATE) mask &= ~IXGBE_EIMS_LSC; if (adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE) switch (adapter->hw.mac.type) { case ixgbe_mac_82599EB: mask |= IXGBE_EIMS_GPI_SDP0(hw); break; case ixgbe_mac_X540: case ixgbe_mac_X550: case ixgbe_mac_X550EM_x: case ixgbe_mac_x550em_a: mask |= IXGBE_EIMS_TS; break; default: break; } if (adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE) mask |= IXGBE_EIMS_GPI_SDP1(hw); switch (adapter->hw.mac.type) { case ixgbe_mac_82599EB: mask |= IXGBE_EIMS_GPI_SDP1(hw); mask |= IXGBE_EIMS_GPI_SDP2(hw); fallthrough; case ixgbe_mac_X540: case ixgbe_mac_X550: case ixgbe_mac_X550EM_x: case ixgbe_mac_x550em_a: if (adapter->hw.device_id == IXGBE_DEV_ID_X550EM_X_SFP || adapter->hw.device_id == IXGBE_DEV_ID_X550EM_A_SFP || adapter->hw.device_id == IXGBE_DEV_ID_X550EM_A_SFP_N) mask |= IXGBE_EIMS_GPI_SDP0(&adapter->hw); if (adapter->hw.phy.type == ixgbe_phy_x550em_ext_t) mask |= IXGBE_EICR_GPI_SDP0_X540; mask |= IXGBE_EIMS_ECC; mask |= IXGBE_EIMS_MAILBOX; break; default: break; } if ((adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) && !(adapter->flags2 & IXGBE_FLAG2_FDIR_REQUIRES_REINIT)) mask |= IXGBE_EIMS_FLOW_DIR; IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, mask); if (queues) ixgbe_irq_enable_queues(adapter, ~0); if (flush) IXGBE_WRITE_FLUSH(&adapter->hw); } static irqreturn_t ixgbe_msix_other(int irq, void *data) { struct ixgbe_adapter *adapter = data; struct ixgbe_hw *hw = &adapter->hw; u32 eicr; /* * Workaround for Silicon errata. Use clear-by-write instead * of clear-by-read. Reading with EICS will return the * interrupt causes without clearing, which later be done * with the write to EICR. */ eicr = IXGBE_READ_REG(hw, IXGBE_EICS); /* The lower 16bits of the EICR register are for the queue interrupts * which should be masked here in order to not accidentally clear them if * the bits are high when ixgbe_msix_other is called. There is a race * condition otherwise which results in possible performance loss * especially if the ixgbe_msix_other interrupt is triggering * consistently (as it would when PPS is turned on for the X540 device) */ eicr &= 0xFFFF0000; IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr); if (eicr & IXGBE_EICR_LSC) ixgbe_check_lsc(adapter); if (eicr & IXGBE_EICR_MAILBOX) ixgbe_msg_task(adapter); switch (hw->mac.type) { case ixgbe_mac_82599EB: case ixgbe_mac_X540: case ixgbe_mac_X550: case ixgbe_mac_X550EM_x: case ixgbe_mac_x550em_a: if (hw->phy.type == ixgbe_phy_x550em_ext_t && (eicr & IXGBE_EICR_GPI_SDP0_X540)) { adapter->flags2 |= IXGBE_FLAG2_PHY_INTERRUPT; ixgbe_service_event_schedule(adapter); IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP0_X540); } if (eicr & IXGBE_EICR_ECC) { e_info(link, "Received ECC Err, initiating reset\n"); set_bit(__IXGBE_RESET_REQUESTED, &adapter->state); ixgbe_service_event_schedule(adapter); IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_ECC); } /* Handle Flow Director Full threshold interrupt */ if (eicr & IXGBE_EICR_FLOW_DIR) { int reinit_count = 0; int i; for (i = 0; i < adapter->num_tx_queues; i++) { struct ixgbe_ring *ring = adapter->tx_ring[i]; if (test_and_clear_bit(__IXGBE_TX_FDIR_INIT_DONE, &ring->state)) reinit_count++; } if (reinit_count) { /* no more flow director interrupts until after init */ IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_FLOW_DIR); adapter->flags2 |= IXGBE_FLAG2_FDIR_REQUIRES_REINIT; ixgbe_service_event_schedule(adapter); } } ixgbe_check_sfp_event(adapter, eicr); ixgbe_check_overtemp_event(adapter, eicr); break; default: break; } ixgbe_check_fan_failure(adapter, eicr); if (unlikely(eicr & IXGBE_EICR_TIMESYNC)) ixgbe_ptp_check_pps_event(adapter); /* re-enable the original interrupt state, no lsc, no queues */ if (!test_bit(__IXGBE_DOWN, &adapter->state)) ixgbe_irq_enable(adapter, false, false); return IRQ_HANDLED; } static irqreturn_t ixgbe_msix_clean_rings(int irq, void *data) { struct ixgbe_q_vector *q_vector = data; /* EIAM disabled interrupts (on this vector) for us */ if (q_vector->rx.ring || q_vector->tx.ring) napi_schedule_irqoff(&q_vector->napi); return IRQ_HANDLED; } /** * ixgbe_poll - NAPI Rx polling callback * @napi: structure for representing this polling device * @budget: how many packets driver is allowed to clean * * This function is used for legacy and MSI, NAPI mode **/ int ixgbe_poll(struct napi_struct *napi, int budget) { struct ixgbe_q_vector *q_vector = container_of(napi, struct ixgbe_q_vector, napi); struct ixgbe_adapter *adapter = q_vector->adapter; struct ixgbe_ring *ring; int per_ring_budget, work_done = 0; bool clean_complete = true; #ifdef CONFIG_IXGBE_DCA if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) ixgbe_update_dca(q_vector); #endif ixgbe_for_each_ring(ring, q_vector->tx) { bool wd = ring->xsk_pool ? ixgbe_clean_xdp_tx_irq(q_vector, ring, budget) : ixgbe_clean_tx_irq(q_vector, ring, budget); if (!wd) clean_complete = false; } /* Exit if we are called by netpoll */ if (budget <= 0) return budget; /* attempt to distribute budget to each queue fairly, but don't allow * the budget to go below 1 because we'll exit polling */ if (q_vector->rx.count > 1) per_ring_budget = max(budget/q_vector->rx.count, 1); else per_ring_budget = budget; ixgbe_for_each_ring(ring, q_vector->rx) { int cleaned = ring->xsk_pool ? ixgbe_clean_rx_irq_zc(q_vector, ring, per_ring_budget) : ixgbe_clean_rx_irq(q_vector, ring, per_ring_budget); work_done += cleaned; if (cleaned >= per_ring_budget) clean_complete = false; } /* If all work not completed, return budget and keep polling */ if (!clean_complete) return budget; /* all work done, exit the polling mode */ if (likely(napi_complete_done(napi, work_done))) { if (adapter->rx_itr_setting & 1) ixgbe_set_itr(q_vector); if (!test_bit(__IXGBE_DOWN, &adapter->state)) ixgbe_irq_enable_queues(adapter, BIT_ULL(q_vector->v_idx)); } return min(work_done, budget - 1); } /** * ixgbe_request_msix_irqs - Initialize MSI-X interrupts * @adapter: board private structure * * ixgbe_request_msix_irqs allocates MSI-X vectors and requests * interrupts from the kernel. **/ static int ixgbe_request_msix_irqs(struct ixgbe_adapter *adapter) { struct net_device *netdev = adapter->netdev; unsigned int ri = 0, ti = 0; int vector, err; for (vector = 0; vector < adapter->num_q_vectors; vector++) { struct ixgbe_q_vector *q_vector = adapter->q_vector[vector]; struct msix_entry *entry = &adapter->msix_entries[vector]; if (q_vector->tx.ring && q_vector->rx.ring) { snprintf(q_vector->name, sizeof(q_vector->name), "%s-TxRx-%u", netdev->name, ri++); ti++; } else if (q_vector->rx.ring) { snprintf(q_vector->name, sizeof(q_vector->name), "%s-rx-%u", netdev->name, ri++); } else if (q_vector->tx.ring) { snprintf(q_vector->name, sizeof(q_vector->name), "%s-tx-%u", netdev->name, ti++); } else { /* skip this unused q_vector */ continue; } err = request_irq(entry->vector, &ixgbe_msix_clean_rings, 0, q_vector->name, q_vector); if (err) { e_err(probe, "request_irq failed for MSIX interrupt " "Error: %d\n", err); goto free_queue_irqs; } /* If Flow Director is enabled, set interrupt affinity */ if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) { /* assign the mask for this irq */ irq_update_affinity_hint(entry->vector, &q_vector->affinity_mask); } } err = request_irq(adapter->msix_entries[vector].vector, ixgbe_msix_other, 0, netdev->name, adapter); if (err) { e_err(probe, "request_irq for msix_other failed: %d\n", err); goto free_queue_irqs; } return 0; free_queue_irqs: while (vector) { vector--; irq_update_affinity_hint(adapter->msix_entries[vector].vector, NULL); free_irq(adapter->msix_entries[vector].vector, adapter->q_vector[vector]); } adapter->flags &= ~IXGBE_FLAG_MSIX_ENABLED; pci_disable_msix(adapter->pdev); kfree(adapter->msix_entries); adapter->msix_entries = NULL; return err; } /** * ixgbe_intr - legacy mode Interrupt Handler * @irq: interrupt number * @data: pointer to a network interface device structure **/ static irqreturn_t ixgbe_intr(int irq, void *data) { struct ixgbe_adapter *adapter = data; struct ixgbe_hw *hw = &adapter->hw; struct ixgbe_q_vector *q_vector = adapter->q_vector[0]; u32 eicr; /* * Workaround for silicon errata #26 on 82598. Mask the interrupt * before the read of EICR. */ IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_IRQ_CLEAR_MASK); /* for NAPI, using EIAM to auto-mask tx/rx interrupt bits on read * therefore no explicit interrupt disable is necessary */ eicr = IXGBE_READ_REG(hw, IXGBE_EICR); if (!eicr) { /* * shared interrupt alert! * make sure interrupts are enabled because the read will * have disabled interrupts due to EIAM * finish the workaround of silicon errata on 82598. Unmask * the interrupt that we masked before the EICR read. */ if (!test_bit(__IXGBE_DOWN, &adapter->state)) ixgbe_irq_enable(adapter, true, true); return IRQ_NONE; /* Not our interrupt */ } if (eicr & IXGBE_EICR_LSC) ixgbe_check_lsc(adapter); switch (hw->mac.type) { case ixgbe_mac_82599EB: ixgbe_check_sfp_event(adapter, eicr); fallthrough; case ixgbe_mac_X540: case ixgbe_mac_X550: case ixgbe_mac_X550EM_x: case ixgbe_mac_x550em_a: if (eicr & IXGBE_EICR_ECC) { e_info(link, "Received ECC Err, initiating reset\n"); set_bit(__IXGBE_RESET_REQUESTED, &adapter->state); ixgbe_service_event_schedule(adapter); IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_ECC); } ixgbe_check_overtemp_event(adapter, eicr); break; default: break; } ixgbe_check_fan_failure(adapter, eicr); if (unlikely(eicr & IXGBE_EICR_TIMESYNC)) ixgbe_ptp_check_pps_event(adapter); /* would disable interrupts here but EIAM disabled it */ napi_schedule_irqoff(&q_vector->napi); /* * re-enable link(maybe) and non-queue interrupts, no flush. * ixgbe_poll will re-enable the queue interrupts */ if (!test_bit(__IXGBE_DOWN, &adapter->state)) ixgbe_irq_enable(adapter, false, false); return IRQ_HANDLED; } /** * ixgbe_request_irq - initialize interrupts * @adapter: board private structure * * Attempts to configure interrupts using the best available * capabilities of the hardware and kernel. **/ static int ixgbe_request_irq(struct ixgbe_adapter *adapter) { struct net_device *netdev = adapter->netdev; int err; if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) err = ixgbe_request_msix_irqs(adapter); else if (adapter->flags & IXGBE_FLAG_MSI_ENABLED) err = request_irq(adapter->pdev->irq, ixgbe_intr, 0, netdev->name, adapter); else err = request_irq(adapter->pdev->irq, ixgbe_intr, IRQF_SHARED, netdev->name, adapter); if (err) e_err(probe, "request_irq failed, Error %d\n", err); return err; } static void ixgbe_free_irq(struct ixgbe_adapter *adapter) { int vector; if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED)) { free_irq(adapter->pdev->irq, adapter); return; } if (!adapter->msix_entries) return; for (vector = 0; vector < adapter->num_q_vectors; vector++) { struct ixgbe_q_vector *q_vector = adapter->q_vector[vector]; struct msix_entry *entry = &adapter->msix_entries[vector]; /* free only the irqs that were actually requested */ if (!q_vector->rx.ring && !q_vector->tx.ring) continue; /* clear the affinity_mask in the IRQ descriptor */ irq_update_affinity_hint(entry->vector, NULL); free_irq(entry->vector, q_vector); } free_irq(adapter->msix_entries[vector].vector, adapter); } /** * ixgbe_irq_disable - Mask off interrupt generation on the NIC * @adapter: board private structure **/ static inline void ixgbe_irq_disable(struct ixgbe_adapter *adapter) { switch (adapter->hw.mac.type) { case ixgbe_mac_82598EB: IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, ~0); break; case ixgbe_mac_82599EB: case ixgbe_mac_X540: case ixgbe_mac_X550: case ixgbe_mac_X550EM_x: case ixgbe_mac_x550em_a: IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 0xFFFF0000); IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(0), ~0); IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(1), ~0); break; default: break; } IXGBE_WRITE_FLUSH(&adapter->hw); if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) { int vector; for (vector = 0; vector < adapter->num_q_vectors; vector++) synchronize_irq(adapter->msix_entries[vector].vector); synchronize_irq(adapter->msix_entries[vector++].vector); } else { synchronize_irq(adapter->pdev->irq); } } /** * ixgbe_configure_msi_and_legacy - Initialize PIN (INTA...) and MSI interrupts * @adapter: board private structure * **/ static void ixgbe_configure_msi_and_legacy(struct ixgbe_adapter *adapter) { struct ixgbe_q_vector *q_vector = adapter->q_vector[0]; ixgbe_write_eitr(q_vector); ixgbe_set_ivar(adapter, 0, 0, 0); ixgbe_set_ivar(adapter, 1, 0, 0); e_info(hw, "Legacy interrupt IVAR setup done\n"); } /** * ixgbe_configure_tx_ring - Configure 8259x Tx ring after Reset * @adapter: board private structure * @ring: structure containing ring specific data * * Configure the Tx descriptor ring after a reset. **/ void ixgbe_configure_tx_ring(struct ixgbe_adapter *adapter, struct ixgbe_ring *ring) { struct ixgbe_hw *hw = &adapter->hw; u64 tdba = ring->dma; int wait_loop = 10; u32 txdctl = IXGBE_TXDCTL_ENABLE; u8 reg_idx = ring->reg_idx; ring->xsk_pool = NULL; if (ring_is_xdp(ring)) ring->xsk_pool = ixgbe_xsk_pool(adapter, ring); /* disable queue to avoid issues while updating state */ IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(reg_idx), 0); IXGBE_WRITE_FLUSH(hw); IXGBE_WRITE_REG(hw, IXGBE_TDBAL(reg_idx), (tdba & DMA_BIT_MASK(32))); IXGBE_WRITE_REG(hw, IXGBE_TDBAH(reg_idx), (tdba >> 32)); IXGBE_WRITE_REG(hw, IXGBE_TDLEN(reg_idx), ring->count * sizeof(union ixgbe_adv_tx_desc)); IXGBE_WRITE_REG(hw, IXGBE_TDH(reg_idx), 0); IXGBE_WRITE_REG(hw, IXGBE_TDT(reg_idx), 0); ring->tail = adapter->io_addr + IXGBE_TDT(reg_idx); /* * set WTHRESH to encourage burst writeback, it should not be set * higher than 1 when: * - ITR is 0 as it could cause false TX hangs * - ITR is set to > 100k int/sec and BQL is enabled * * In order to avoid issues WTHRESH + PTHRESH should always be equal * to or less than the number of on chip descriptors, which is * currently 40. */ if (!ring->q_vector || (ring->q_vector->itr < IXGBE_100K_ITR)) txdctl |= 1u << 16; /* WTHRESH = 1 */ else txdctl |= 8u << 16; /* WTHRESH = 8 */ /* * Setting PTHRESH to 32 both improves performance * and avoids a TX hang with DFP enabled */ txdctl |= (1u << 8) | /* HTHRESH = 1 */ 32; /* PTHRESH = 32 */ /* reinitialize flowdirector state */ if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) { ring->atr_sample_rate = adapter->atr_sample_rate; ring->atr_count = 0; set_bit(__IXGBE_TX_FDIR_INIT_DONE, &ring->state); } else { ring->atr_sample_rate = 0; } /* initialize XPS */ if (!test_and_set_bit(__IXGBE_TX_XPS_INIT_DONE, &ring->state)) { struct ixgbe_q_vector *q_vector = ring->q_vector; if (q_vector) netif_set_xps_queue(ring->netdev, &q_vector->affinity_mask, ring->queue_index); } clear_bit(__IXGBE_HANG_CHECK_ARMED, &ring->state); /* reinitialize tx_buffer_info */ memset(ring->tx_buffer_info, 0, sizeof(struct ixgbe_tx_buffer) * ring->count); /* enable queue */ IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(reg_idx), txdctl); /* TXDCTL.EN will return 0 on 82598 if link is down, so skip it */ if (hw->mac.type == ixgbe_mac_82598EB && !(IXGBE_READ_REG(hw, IXGBE_LINKS) & IXGBE_LINKS_UP)) return; /* poll to verify queue is enabled */ do { usleep_range(1000, 2000); txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(reg_idx)); } while (--wait_loop && !(txdctl & IXGBE_TXDCTL_ENABLE)); if (!wait_loop) hw_dbg(hw, "Could not enable Tx Queue %d\n", reg_idx); } static void ixgbe_setup_mtqc(struct ixgbe_adapter *adapter) { struct ixgbe_hw *hw = &adapter->hw; u32 rttdcs, mtqc; u8 tcs = adapter->hw_tcs; if (hw->mac.type == ixgbe_mac_82598EB) return; /* disable the arbiter while setting MTQC */ rttdcs = IXGBE_READ_REG(hw, IXGBE_RTTDCS); rttdcs |= IXGBE_RTTDCS_ARBDIS; IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs); /* set transmit pool layout */ if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) { mtqc = IXGBE_MTQC_VT_ENA; if (tcs > 4) mtqc |= IXGBE_MTQC_RT_ENA | IXGBE_MTQC_8TC_8TQ; else if (tcs > 1) mtqc |= IXGBE_MTQC_RT_ENA | IXGBE_MTQC_4TC_4TQ; else if (adapter->ring_feature[RING_F_VMDQ].mask == IXGBE_82599_VMDQ_4Q_MASK) mtqc |= IXGBE_MTQC_32VF; else mtqc |= IXGBE_MTQC_64VF; } else { if (tcs > 4) { mtqc = IXGBE_MTQC_RT_ENA | IXGBE_MTQC_8TC_8TQ; } else if (tcs > 1) { mtqc = IXGBE_MTQC_RT_ENA | IXGBE_MTQC_4TC_4TQ; } else { u8 max_txq = adapter->num_tx_queues + adapter->num_xdp_queues; if (max_txq > 63) mtqc = IXGBE_MTQC_RT_ENA | IXGBE_MTQC_4TC_4TQ; else mtqc = IXGBE_MTQC_64Q_1PB; } } IXGBE_WRITE_REG(hw, IXGBE_MTQC, mtqc); /* Enable Security TX Buffer IFG for multiple pb */ if (tcs) { u32 sectx = IXGBE_READ_REG(hw, IXGBE_SECTXMINIFG); sectx |= IXGBE_SECTX_DCB; IXGBE_WRITE_REG(hw, IXGBE_SECTXMINIFG, sectx); } /* re-enable the arbiter */ rttdcs &= ~IXGBE_RTTDCS_ARBDIS; IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs); } /** * ixgbe_configure_tx - Configure 8259x Transmit Unit after Reset * @adapter: board private structure * * Configure the Tx unit of the MAC after a reset. **/ static void ixgbe_configure_tx(struct ixgbe_adapter *adapter) { struct ixgbe_hw *hw = &adapter->hw; u32 dmatxctl; u32 i; ixgbe_setup_mtqc(adapter); if (hw->mac.type != ixgbe_mac_82598EB) { /* DMATXCTL.EN must be before Tx queues are enabled */ dmatxctl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL); dmatxctl |= IXGBE_DMATXCTL_TE; IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, dmatxctl); } /* Setup the HW Tx Head and Tail descriptor pointers */ for (i = 0; i < adapter->num_tx_queues; i++) ixgbe_configure_tx_ring(adapter, adapter->tx_ring[i]); for (i = 0; i < adapter->num_xdp_queues; i++) ixgbe_configure_tx_ring(adapter, adapter->xdp_ring[i]); } static void ixgbe_enable_rx_drop(struct ixgbe_adapter *adapter, struct ixgbe_ring *ring) { struct ixgbe_hw *hw = &adapter->hw; u8 reg_idx = ring->reg_idx; u32 srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(reg_idx)); srrctl |= IXGBE_SRRCTL_DROP_EN; IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(reg_idx), srrctl); } static void ixgbe_disable_rx_drop(struct ixgbe_adapter *adapter, struct ixgbe_ring *ring) { struct ixgbe_hw *hw = &adapter->hw; u8 reg_idx = ring->reg_idx; u32 srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(reg_idx)); srrctl &= ~IXGBE_SRRCTL_DROP_EN; IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(reg_idx), srrctl); } #ifdef CONFIG_IXGBE_DCB void ixgbe_set_rx_drop_en(struct ixgbe_adapter *adapter) #else static void ixgbe_set_rx_drop_en(struct ixgbe_adapter *adapter) #endif { int i; bool pfc_en = adapter->dcb_cfg.pfc_mode_enable; if (adapter->ixgbe_ieee_pfc) pfc_en |= !!(adapter->ixgbe_ieee_pfc->pfc_en); /* * We should set the drop enable bit if: * SR-IOV is enabled * or * Number of Rx queues > 1 and flow control is disabled * * This allows us to avoid head of line blocking for security * and performance reasons. */ if (adapter->num_vfs || (adapter->num_rx_queues > 1 && !(adapter->hw.fc.current_mode & ixgbe_fc_tx_pause) && !pfc_en)) { for (i = 0; i < adapter->num_rx_queues; i++) ixgbe_enable_rx_drop(adapter, adapter->rx_ring[i]); } else { for (i = 0; i < adapter->num_rx_queues; i++) ixgbe_disable_rx_drop(adapter, adapter->rx_ring[i]); } } #define IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT 2 static void ixgbe_configure_srrctl(struct ixgbe_adapter *adapter, struct ixgbe_ring *rx_ring) { struct ixgbe_hw *hw = &adapter->hw; u32 srrctl; u8 reg_idx = rx_ring->reg_idx; if (hw->mac.type == ixgbe_mac_82598EB) { u16 mask = adapter->ring_feature[RING_F_RSS].mask; /* * if VMDq is not active we must program one srrctl register * per RSS queue since we have enabled RDRXCTL.MVMEN */ reg_idx &= mask; } /* configure header buffer length, needed for RSC */ srrctl = IXGBE_RX_HDR_SIZE << IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT; /* configure the packet buffer length */ if (rx_ring->xsk_pool) { u32 xsk_buf_len = xsk_pool_get_rx_frame_size(rx_ring->xsk_pool); /* If the MAC support setting RXDCTL.RLPML, the * SRRCTL[n].BSIZEPKT is set to PAGE_SIZE and * RXDCTL.RLPML is set to the actual UMEM buffer * size. If not, then we are stuck with a 1k buffer * size resolution. In this case frames larger than * the UMEM buffer size viewed in a 1k resolution will * be dropped. */ if (hw->mac.type != ixgbe_mac_82599EB) srrctl |= PAGE_SIZE >> IXGBE_SRRCTL_BSIZEPKT_SHIFT; else srrctl |= xsk_buf_len >> IXGBE_SRRCTL_BSIZEPKT_SHIFT; } else if (test_bit(__IXGBE_RX_3K_BUFFER, &rx_ring->state)) { srrctl |= IXGBE_RXBUFFER_3K >> IXGBE_SRRCTL_BSIZEPKT_SHIFT; } else { srrctl |= IXGBE_RXBUFFER_2K >> IXGBE_SRRCTL_BSIZEPKT_SHIFT; } /* configure descriptor type */ srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF; IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(reg_idx), srrctl); } /** * ixgbe_rss_indir_tbl_entries - Return RSS indirection table entries * @adapter: device handle * * - 82598/82599/X540: 128 * - X550(non-SRIOV mode): 512 * - X550(SRIOV mode): 64 */ u32 ixgbe_rss_indir_tbl_entries(struct ixgbe_adapter *adapter) { if (adapter->hw.mac.type < ixgbe_mac_X550) return 128; else if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) return 64; else return 512; } /** * ixgbe_store_key - Write the RSS key to HW * @adapter: device handle * * Write the RSS key stored in adapter.rss_key to HW. */ void ixgbe_store_key(struct ixgbe_adapter *adapter) { struct ixgbe_hw *hw = &adapter->hw; int i; for (i = 0; i < 10; i++) IXGBE_WRITE_REG(hw, IXGBE_RSSRK(i), adapter->rss_key[i]); } /** * ixgbe_init_rss_key - Initialize adapter RSS key * @adapter: device handle * * Allocates and initializes the RSS key if it is not allocated. **/ static inline int ixgbe_init_rss_key(struct ixgbe_adapter *adapter) { u32 *rss_key; if (!adapter->rss_key) { rss_key = kzalloc(IXGBE_RSS_KEY_SIZE, GFP_KERNEL); if (unlikely(!rss_key)) return -ENOMEM; netdev_rss_key_fill(rss_key, IXGBE_RSS_KEY_SIZE); adapter->rss_key = rss_key; } return 0; } /** * ixgbe_store_reta - Write the RETA table to HW * @adapter: device handle * * Write the RSS redirection table stored in adapter.rss_indir_tbl[] to HW. */ void ixgbe_store_reta(struct ixgbe_adapter *adapter) { u32 i, reta_entries = ixgbe_rss_indir_tbl_entries(adapter); struct ixgbe_hw *hw = &adapter->hw; u32 reta = 0; u32 indices_multi; u8 *indir_tbl = adapter->rss_indir_tbl; /* Fill out the redirection table as follows: * - 82598: 8 bit wide entries containing pair of 4 bit RSS * indices. * - 82599/X540: 8 bit wide entries containing 4 bit RSS index * - X550: 8 bit wide entries containing 6 bit RSS index */ if (adapter->hw.mac.type == ixgbe_mac_82598EB) indices_multi = 0x11; else indices_multi = 0x1; /* Write redirection table to HW */ for (i = 0; i < reta_entries; i++) { reta |= indices_multi * indir_tbl[i] << (i & 0x3) * 8; if ((i & 3) == 3) { if (i < 128) IXGBE_WRITE_REG(hw, IXGBE_RETA(i >> 2), reta); else IXGBE_WRITE_REG(hw, IXGBE_ERETA((i >> 2) - 32), reta); reta = 0; } } } /** * ixgbe_store_vfreta - Write the RETA table to HW (x550 devices in SRIOV mode) * @adapter: device handle * * Write the RSS redirection table stored in adapter.rss_indir_tbl[] to HW. */ static void ixgbe_store_vfreta(struct ixgbe_adapter *adapter) { u32 i, reta_entries = ixgbe_rss_indir_tbl_entries(adapter); struct ixgbe_hw *hw = &adapter->hw; u32 vfreta = 0; /* Write redirection table to HW */ for (i = 0; i < reta_entries; i++) { u16 pool = adapter->num_rx_pools; vfreta |= (u32)adapter->rss_indir_tbl[i] << (i & 0x3) * 8; if ((i & 3) != 3) continue; while (pool--) IXGBE_WRITE_REG(hw, IXGBE_PFVFRETA(i >> 2, VMDQ_P(pool)), vfreta); vfreta = 0; } } static void ixgbe_setup_reta(struct ixgbe_adapter *adapter) { u32 i, j; u32 reta_entries = ixgbe_rss_indir_tbl_entries(adapter); u16 rss_i = adapter->ring_feature[RING_F_RSS].indices; /* Program table for at least 4 queues w/ SR-IOV so that VFs can * make full use of any rings they may have. We will use the * PSRTYPE register to control how many rings we use within the PF. */ if ((adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) && (rss_i < 4)) rss_i = 4; /* Fill out hash function seeds */ ixgbe_store_key(adapter); /* Fill out redirection table */ memset(adapter->rss_indir_tbl, 0, sizeof(adapter->rss_indir_tbl)); for (i = 0, j = 0; i < reta_entries; i++, j++) { if (j == rss_i) j = 0; adapter->rss_indir_tbl[i] = j; } ixgbe_store_reta(adapter); } static void ixgbe_setup_vfreta(struct ixgbe_adapter *adapter) { struct ixgbe_hw *hw = &adapter->hw; u16 rss_i = adapter->ring_feature[RING_F_RSS].indices; int i, j; /* Fill out hash function seeds */ for (i = 0; i < 10; i++) { u16 pool = adapter->num_rx_pools; while (pool--) IXGBE_WRITE_REG(hw, IXGBE_PFVFRSSRK(i, VMDQ_P(pool)), *(adapter->rss_key + i)); } /* Fill out the redirection table */ for (i = 0, j = 0; i < 64; i++, j++) { if (j == rss_i) j = 0; adapter->rss_indir_tbl[i] = j; } ixgbe_store_vfreta(adapter); } static void ixgbe_setup_mrqc(struct ixgbe_adapter *adapter) { struct ixgbe_hw *hw = &adapter->hw; u32 mrqc = 0, rss_field = 0, vfmrqc = 0; u32 rxcsum; /* Disable indicating checksum in descriptor, enables RSS hash */ rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM); rxcsum |= IXGBE_RXCSUM_PCSD; IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum); if (adapter->hw.mac.type == ixgbe_mac_82598EB) { if (adapter->ring_feature[RING_F_RSS].mask) mrqc = IXGBE_MRQC_RSSEN; } else { u8 tcs = adapter->hw_tcs; if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) { if (tcs > 4) mrqc = IXGBE_MRQC_VMDQRT8TCEN; /* 8 TCs */ else if (tcs > 1) mrqc = IXGBE_MRQC_VMDQRT4TCEN; /* 4 TCs */ else if (adapter->ring_feature[RING_F_VMDQ].mask == IXGBE_82599_VMDQ_4Q_MASK) mrqc = IXGBE_MRQC_VMDQRSS32EN; else mrqc = IXGBE_MRQC_VMDQRSS64EN; /* Enable L3/L4 for Tx Switched packets only for X550, * older devices do not support this feature */ if (hw->mac.type >= ixgbe_mac_X550) mrqc |= IXGBE_MRQC_L3L4TXSWEN; } else { if (tcs > 4) mrqc = IXGBE_MRQC_RTRSS8TCEN; else if (tcs > 1) mrqc = IXGBE_MRQC_RTRSS4TCEN; else mrqc = IXGBE_MRQC_RSSEN; } } /* Perform hash on these packet types */ rss_field |= IXGBE_MRQC_RSS_FIELD_IPV4 | IXGBE_MRQC_RSS_FIELD_IPV4_TCP | IXGBE_MRQC_RSS_FIELD_IPV6 | IXGBE_MRQC_RSS_FIELD_IPV6_TCP; if (adapter->flags2 & IXGBE_FLAG2_RSS_FIELD_IPV4_UDP) rss_field |= IXGBE_MRQC_RSS_FIELD_IPV4_UDP; if (adapter->flags2 & IXGBE_FLAG2_RSS_FIELD_IPV6_UDP) rss_field |= IXGBE_MRQC_RSS_FIELD_IPV6_UDP; if ((hw->mac.type >= ixgbe_mac_X550) && (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)) { u16 pool = adapter->num_rx_pools; /* Enable VF RSS mode */ mrqc |= IXGBE_MRQC_MULTIPLE_RSS; IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc); /* Setup RSS through the VF registers */ ixgbe_setup_vfreta(adapter); vfmrqc = IXGBE_MRQC_RSSEN; vfmrqc |= rss_field; while (pool--) IXGBE_WRITE_REG(hw, IXGBE_PFVFMRQC(VMDQ_P(pool)), vfmrqc); } else { ixgbe_setup_reta(adapter); mrqc |= rss_field; IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc); } } /** * ixgbe_configure_rscctl - enable RSC for the indicated ring * @adapter: address of board private structure * @ring: structure containing ring specific data **/ static void ixgbe_configure_rscctl(struct ixgbe_adapter *adapter, struct ixgbe_ring *ring) { struct ixgbe_hw *hw = &adapter->hw; u32 rscctrl; u8 reg_idx = ring->reg_idx; if (!ring_is_rsc_enabled(ring)) return; rscctrl = IXGBE_READ_REG(hw, IXGBE_RSCCTL(reg_idx)); rscctrl |= IXGBE_RSCCTL_RSCEN; /* * we must limit the number of descriptors so that the * total size of max desc * buf_len is not greater * than 65536 */ rscctrl |= IXGBE_RSCCTL_MAXDESC_16; IXGBE_WRITE_REG(hw, IXGBE_RSCCTL(reg_idx), rscctrl); } #define IXGBE_MAX_RX_DESC_POLL 10 static void ixgbe_rx_desc_queue_enable(struct ixgbe_adapter *adapter, struct ixgbe_ring *ring) { struct ixgbe_hw *hw = &adapter->hw; int wait_loop = IXGBE_MAX_RX_DESC_POLL; u32 rxdctl; u8 reg_idx = ring->reg_idx; if (ixgbe_removed(hw->hw_addr)) return; /* RXDCTL.EN will return 0 on 82598 if link is down, so skip it */ if (hw->mac.type == ixgbe_mac_82598EB && !(IXGBE_READ_REG(hw, IXGBE_LINKS) & IXGBE_LINKS_UP)) return; do { usleep_range(1000, 2000); rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(reg_idx)); } while (--wait_loop && !(rxdctl & IXGBE_RXDCTL_ENABLE)); if (!wait_loop) { e_err(drv, "RXDCTL.ENABLE on Rx queue %d not set within " "the polling period\n", reg_idx); } } void ixgbe_configure_rx_ring(struct ixgbe_adapter *adapter, struct ixgbe_ring *ring) { struct ixgbe_hw *hw = &adapter->hw; union ixgbe_adv_rx_desc *rx_desc; u64 rdba = ring->dma; u32 rxdctl; u8 reg_idx = ring->reg_idx; xdp_rxq_info_unreg_mem_model(&ring->xdp_rxq); ring->xsk_pool = ixgbe_xsk_pool(adapter, ring); if (ring->xsk_pool) { WARN_ON(xdp_rxq_info_reg_mem_model(&ring->xdp_rxq, MEM_TYPE_XSK_BUFF_POOL, NULL)); xsk_pool_set_rxq_info(ring->xsk_pool, &ring->xdp_rxq); } else { WARN_ON(xdp_rxq_info_reg_mem_model(&ring->xdp_rxq, MEM_TYPE_PAGE_SHARED, NULL)); } /* disable queue to avoid use of these values while updating state */ rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(reg_idx)); rxdctl &= ~IXGBE_RXDCTL_ENABLE; /* write value back with RXDCTL.ENABLE bit cleared */ IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(reg_idx), rxdctl); IXGBE_WRITE_FLUSH(hw); IXGBE_WRITE_REG(hw, IXGBE_RDBAL(reg_idx), (rdba & DMA_BIT_MASK(32))); IXGBE_WRITE_REG(hw, IXGBE_RDBAH(reg_idx), (rdba >> 32)); IXGBE_WRITE_REG(hw, IXGBE_RDLEN(reg_idx), ring->count * sizeof(union ixgbe_adv_rx_desc)); /* Force flushing of IXGBE_RDLEN to prevent MDD */ IXGBE_WRITE_FLUSH(hw); IXGBE_WRITE_REG(hw, IXGBE_RDH(reg_idx), 0); IXGBE_WRITE_REG(hw, IXGBE_RDT(reg_idx), 0); ring->tail = adapter->io_addr + IXGBE_RDT(reg_idx); ixgbe_configure_srrctl(adapter, ring); ixgbe_configure_rscctl(adapter, ring); if (hw->mac.type == ixgbe_mac_82598EB) { /* * enable cache line friendly hardware writes: * PTHRESH=32 descriptors (half the internal cache), * this also removes ugly rx_no_buffer_count increment * HTHRESH=4 descriptors (to minimize latency on fetch) * WTHRESH=8 burst writeback up to two cache lines */ rxdctl &= ~0x3FFFFF; rxdctl |= 0x080420; #if (PAGE_SIZE < 8192) /* RXDCTL.RLPML does not work on 82599 */ } else if (hw->mac.type != ixgbe_mac_82599EB) { rxdctl &= ~(IXGBE_RXDCTL_RLPMLMASK | IXGBE_RXDCTL_RLPML_EN); /* Limit the maximum frame size so we don't overrun the skb. * This can happen in SRIOV mode when the MTU of the VF is * higher than the MTU of the PF. */ if (ring_uses_build_skb(ring) && !test_bit(__IXGBE_RX_3K_BUFFER, &ring->state)) rxdctl |= IXGBE_MAX_2K_FRAME_BUILD_SKB | IXGBE_RXDCTL_RLPML_EN; #endif } ring->rx_offset = ixgbe_rx_offset(ring); if (ring->xsk_pool && hw->mac.type != ixgbe_mac_82599EB) { u32 xsk_buf_len = xsk_pool_get_rx_frame_size(ring->xsk_pool); rxdctl &= ~(IXGBE_RXDCTL_RLPMLMASK | IXGBE_RXDCTL_RLPML_EN); rxdctl |= xsk_buf_len | IXGBE_RXDCTL_RLPML_EN; ring->rx_buf_len = xsk_buf_len; } /* initialize rx_buffer_info */ memset(ring->rx_buffer_info, 0, sizeof(struct ixgbe_rx_buffer) * ring->count); /* initialize Rx descriptor 0 */ rx_desc = IXGBE_RX_DESC(ring, 0); rx_desc->wb.upper.length = 0; /* enable receive descriptor ring */ rxdctl |= IXGBE_RXDCTL_ENABLE; IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(reg_idx), rxdctl); ixgbe_rx_desc_queue_enable(adapter, ring); if (ring->xsk_pool) ixgbe_alloc_rx_buffers_zc(ring, ixgbe_desc_unused(ring)); else ixgbe_alloc_rx_buffers(ring, ixgbe_desc_unused(ring)); } static void ixgbe_setup_psrtype(struct ixgbe_adapter *adapter) { struct ixgbe_hw *hw = &adapter->hw; int rss_i = adapter->ring_feature[RING_F_RSS].indices; u16 pool = adapter->num_rx_pools; /* PSRTYPE must be initialized in non 82598 adapters */ u32 psrtype = IXGBE_PSRTYPE_TCPHDR | IXGBE_PSRTYPE_UDPHDR | IXGBE_PSRTYPE_IPV4HDR | IXGBE_PSRTYPE_L2HDR | IXGBE_PSRTYPE_IPV6HDR; if (hw->mac.type == ixgbe_mac_82598EB) return; if (rss_i > 3) psrtype |= 2u << 29; else if (rss_i > 1) psrtype |= 1u << 29; while (pool--) IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(VMDQ_P(pool)), psrtype); } static void ixgbe_configure_virtualization(struct ixgbe_adapter *adapter) { struct ixgbe_hw *hw = &adapter->hw; u16 pool = adapter->num_rx_pools; u32 reg_offset, vf_shift, vmolr; u32 gcr_ext, vmdctl; int i; if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)) return; vmdctl = IXGBE_READ_REG(hw, IXGBE_VT_CTL); vmdctl |= IXGBE_VMD_CTL_VMDQ_EN; vmdctl &= ~IXGBE_VT_CTL_POOL_MASK; vmdctl |= VMDQ_P(0) << IXGBE_VT_CTL_POOL_SHIFT; vmdctl |= IXGBE_VT_CTL_REPLEN; IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, vmdctl); /* accept untagged packets until a vlan tag is * specifically set for the VMDQ queue/pool */ vmolr = IXGBE_VMOLR_AUPE; while (pool--) IXGBE_WRITE_REG(hw, IXGBE_VMOLR(VMDQ_P(pool)), vmolr); vf_shift = VMDQ_P(0) % 32; reg_offset = (VMDQ_P(0) >= 32) ? 1 : 0; /* Enable only the PF's pool for Tx/Rx */ IXGBE_WRITE_REG(hw, IXGBE_VFRE(reg_offset), GENMASK(31, vf_shift)); IXGBE_WRITE_REG(hw, IXGBE_VFRE(reg_offset ^ 1), reg_offset - 1); IXGBE_WRITE_REG(hw, IXGBE_VFTE(reg_offset), GENMASK(31, vf_shift)); IXGBE_WRITE_REG(hw, IXGBE_VFTE(reg_offset ^ 1), reg_offset - 1); if (adapter->bridge_mode == BRIDGE_MODE_VEB) IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, IXGBE_PFDTXGSWC_VT_LBEN); /* Map PF MAC address in RAR Entry 0 to first pool following VFs */ hw->mac.ops.set_vmdq(hw, 0, VMDQ_P(0)); /* clear VLAN promisc flag so VFTA will be updated if necessary */ adapter->flags2 &= ~IXGBE_FLAG2_VLAN_PROMISC; /* * Set up VF register offsets for selected VT Mode, * i.e. 32 or 64 VFs for SR-IOV */ switch (adapter->ring_feature[RING_F_VMDQ].mask) { case IXGBE_82599_VMDQ_8Q_MASK: gcr_ext = IXGBE_GCR_EXT_VT_MODE_16; break; case IXGBE_82599_VMDQ_4Q_MASK: gcr_ext = IXGBE_GCR_EXT_VT_MODE_32; break; default: gcr_ext = IXGBE_GCR_EXT_VT_MODE_64; break; } IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT, gcr_ext); for (i = 0; i < adapter->num_vfs; i++) { /* configure spoof checking */ ixgbe_ndo_set_vf_spoofchk(adapter->netdev, i, adapter->vfinfo[i].spoofchk_enabled); /* Enable/Disable RSS query feature */ ixgbe_ndo_set_vf_rss_query_en(adapter->netdev, i, adapter->vfinfo[i].rss_query_enabled); } } static void ixgbe_set_rx_buffer_len(struct ixgbe_adapter *adapter) { struct ixgbe_hw *hw = &adapter->hw; struct net_device *netdev = adapter->netdev; int max_frame = netdev->mtu + ETH_HLEN + ETH_FCS_LEN; struct ixgbe_ring *rx_ring; int i; u32 mhadd, hlreg0; #ifdef IXGBE_FCOE /* adjust max frame to be able to do baby jumbo for FCoE */ if ((adapter->flags & IXGBE_FLAG_FCOE_ENABLED) && (max_frame < IXGBE_FCOE_JUMBO_FRAME_SIZE)) max_frame = IXGBE_FCOE_JUMBO_FRAME_SIZE; #endif /* IXGBE_FCOE */ /* adjust max frame to be at least the size of a standard frame */ if (max_frame < (ETH_FRAME_LEN + ETH_FCS_LEN)) max_frame = (ETH_FRAME_LEN + ETH_FCS_LEN); mhadd = IXGBE_READ_REG(hw, IXGBE_MHADD); if (max_frame != (mhadd >> IXGBE_MHADD_MFS_SHIFT)) { mhadd &= ~IXGBE_MHADD_MFS_MASK; mhadd |= max_frame << IXGBE_MHADD_MFS_SHIFT; IXGBE_WRITE_REG(hw, IXGBE_MHADD, mhadd); } hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0); /* set jumbo enable since MHADD.MFS is keeping size locked at max_frame */ hlreg0 |= IXGBE_HLREG0_JUMBOEN; IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0); /* * Setup the HW Rx Head and Tail Descriptor Pointers and * the Base and Length of the Rx Descriptor Ring */ for (i = 0; i < adapter->num_rx_queues; i++) { rx_ring = adapter->rx_ring[i]; clear_ring_rsc_enabled(rx_ring); clear_bit(__IXGBE_RX_3K_BUFFER, &rx_ring->state); clear_bit(__IXGBE_RX_BUILD_SKB_ENABLED, &rx_ring->state); if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) set_ring_rsc_enabled(rx_ring); if (test_bit(__IXGBE_RX_FCOE, &rx_ring->state)) set_bit(__IXGBE_RX_3K_BUFFER, &rx_ring->state); if (adapter->flags2 & IXGBE_FLAG2_RX_LEGACY) continue; set_bit(__IXGBE_RX_BUILD_SKB_ENABLED, &rx_ring->state); #if (PAGE_SIZE < 8192) if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) set_bit(__IXGBE_RX_3K_BUFFER, &rx_ring->state); if (IXGBE_2K_TOO_SMALL_WITH_PADDING || (max_frame > (ETH_FRAME_LEN + ETH_FCS_LEN))) set_bit(__IXGBE_RX_3K_BUFFER, &rx_ring->state); #endif } } static void ixgbe_setup_rdrxctl(struct ixgbe_adapter *adapter) { struct ixgbe_hw *hw = &adapter->hw; u32 rdrxctl = IXGBE_READ_REG(hw, IXGBE_RDRXCTL); switch (hw->mac.type) { case ixgbe_mac_82598EB: /* * For VMDq support of different descriptor types or * buffer sizes through the use of multiple SRRCTL * registers, RDRXCTL.MVMEN must be set to 1 * * also, the manual doesn't mention it clearly but DCA hints * will only use queue 0's tags unless this bit is set. Side * effects of setting this bit are only that SRRCTL must be * fully programmed [0..15] */ rdrxctl |= IXGBE_RDRXCTL_MVMEN; break; case ixgbe_mac_X550: case ixgbe_mac_X550EM_x: case ixgbe_mac_x550em_a: if (adapter->num_vfs) rdrxctl |= IXGBE_RDRXCTL_PSP; fallthrough; case ixgbe_mac_82599EB: case ixgbe_mac_X540: /* Disable RSC for ACK packets */ IXGBE_WRITE_REG(hw, IXGBE_RSCDBU, (IXGBE_RSCDBU_RSCACKDIS | IXGBE_READ_REG(hw, IXGBE_RSCDBU))); rdrxctl &= ~IXGBE_RDRXCTL_RSCFRSTSIZE; /* hardware requires some bits to be set by default */ rdrxctl |= (IXGBE_RDRXCTL_RSCACKC | IXGBE_RDRXCTL_FCOE_WRFIX); rdrxctl |= IXGBE_RDRXCTL_CRCSTRIP; break; default: /* We should do nothing since we don't know this hardware */ return; } IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, rdrxctl); } /** * ixgbe_configure_rx - Configure 8259x Receive Unit after Reset * @adapter: board private structure * * Configure the Rx unit of the MAC after a reset. **/ static void ixgbe_configure_rx(struct ixgbe_adapter *adapter) { struct ixgbe_hw *hw = &adapter->hw; int i; u32 rxctrl, rfctl; /* disable receives while setting up the descriptors */ hw->mac.ops.disable_rx(hw); ixgbe_setup_psrtype(adapter); ixgbe_setup_rdrxctl(adapter); /* RSC Setup */ rfctl = IXGBE_READ_REG(hw, IXGBE_RFCTL); rfctl &= ~IXGBE_RFCTL_RSC_DIS; if (!(adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED)) rfctl |= IXGBE_RFCTL_RSC_DIS; /* disable NFS filtering */ rfctl |= (IXGBE_RFCTL_NFSW_DIS | IXGBE_RFCTL_NFSR_DIS); IXGBE_WRITE_REG(hw, IXGBE_RFCTL, rfctl); /* Program registers for the distribution of queues */ ixgbe_setup_mrqc(adapter); /* set_rx_buffer_len must be called before ring initialization */ ixgbe_set_rx_buffer_len(adapter); /* * Setup the HW Rx Head and Tail Descriptor Pointers and * the Base and Length of the Rx Descriptor Ring */ for (i = 0; i < adapter->num_rx_queues; i++) ixgbe_configure_rx_ring(adapter, adapter->rx_ring[i]); rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL); /* disable drop enable for 82598 parts */ if (hw->mac.type == ixgbe_mac_82598EB) rxctrl |= IXGBE_RXCTRL_DMBYPS; /* enable all receives */ rxctrl |= IXGBE_RXCTRL_RXEN; hw->mac.ops.enable_rx_dma(hw, rxctrl); } static int ixgbe_vlan_rx_add_vid(struct net_device *netdev, __be16 proto, u16 vid) { struct ixgbe_adapter *adapter = netdev_priv(netdev); struct ixgbe_hw *hw = &adapter->hw; /* add VID to filter table */ if (!vid || !(adapter->flags2 & IXGBE_FLAG2_VLAN_PROMISC)) hw->mac.ops.set_vfta(&adapter->hw, vid, VMDQ_P(0), true, !!vid); set_bit(vid, adapter->active_vlans); return 0; } static int ixgbe_find_vlvf_entry(struct ixgbe_hw *hw, u32 vlan) { u32 vlvf; int idx; /* short cut the special case */ if (vlan == 0) return 0; /* Search for the vlan id in the VLVF entries */ for (idx = IXGBE_VLVF_ENTRIES; --idx;) { vlvf = IXGBE_READ_REG(hw, IXGBE_VLVF(idx)); if ((vlvf & VLAN_VID_MASK) == vlan) break; } return idx; } void ixgbe_update_pf_promisc_vlvf(struct ixgbe_adapter *adapter, u32 vid) { struct ixgbe_hw *hw = &adapter->hw; u32 bits, word; int idx; idx = ixgbe_find_vlvf_entry(hw, vid); if (!idx) return; /* See if any other pools are set for this VLAN filter * entry other than the PF. */ word = idx * 2 + (VMDQ_P(0) / 32); bits = ~BIT(VMDQ_P(0) % 32); bits &= IXGBE_READ_REG(hw, IXGBE_VLVFB(word)); /* Disable the filter so this falls into the default pool. */ if (!bits && !IXGBE_READ_REG(hw, IXGBE_VLVFB(word ^ 1))) { if (!(adapter->flags2 & IXGBE_FLAG2_VLAN_PROMISC)) IXGBE_WRITE_REG(hw, IXGBE_VLVFB(word), 0); IXGBE_WRITE_REG(hw, IXGBE_VLVF(idx), 0); } } static int ixgbe_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto, u16 vid) { struct ixgbe_adapter *adapter = netdev_priv(netdev); struct ixgbe_hw *hw = &adapter->hw; /* remove VID from filter table */ if (vid && !(adapter->flags2 & IXGBE_FLAG2_VLAN_PROMISC)) hw->mac.ops.set_vfta(hw, vid, VMDQ_P(0), false, true); clear_bit(vid, adapter->active_vlans); return 0; } /** * ixgbe_vlan_strip_disable - helper to disable hw vlan stripping * @adapter: driver data */ static void ixgbe_vlan_strip_disable(struct ixgbe_adapter *adapter) { struct ixgbe_hw *hw = &adapter->hw; u32 vlnctrl; int i, j; switch (hw->mac.type) { case ixgbe_mac_82598EB: vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL); vlnctrl &= ~IXGBE_VLNCTRL_VME; IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl); break; case ixgbe_mac_82599EB: case ixgbe_mac_X540: case ixgbe_mac_X550: case ixgbe_mac_X550EM_x: case ixgbe_mac_x550em_a: for (i = 0; i < adapter->num_rx_queues; i++) { struct ixgbe_ring *ring = adapter->rx_ring[i]; if (!netif_is_ixgbe(ring->netdev)) continue; j = ring->reg_idx; vlnctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(j)); vlnctrl &= ~IXGBE_RXDCTL_VME; IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(j), vlnctrl); } break; default: break; } } /** * ixgbe_vlan_strip_enable - helper to enable hw vlan stripping * @adapter: driver data */ static void ixgbe_vlan_strip_enable(struct ixgbe_adapter *adapter) { struct ixgbe_hw *hw = &adapter->hw; u32 vlnctrl; int i, j; switch (hw->mac.type) { case ixgbe_mac_82598EB: vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL); vlnctrl |= IXGBE_VLNCTRL_VME; IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl); break; case ixgbe_mac_82599EB: case ixgbe_mac_X540: case ixgbe_mac_X550: case ixgbe_mac_X550EM_x: case ixgbe_mac_x550em_a: for (i = 0; i < adapter->num_rx_queues; i++) { struct ixgbe_ring *ring = adapter->rx_ring[i]; if (!netif_is_ixgbe(ring->netdev)) continue; j = ring->reg_idx; vlnctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(j)); vlnctrl |= IXGBE_RXDCTL_VME; IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(j), vlnctrl); } break; default: break; } } static void ixgbe_vlan_promisc_enable(struct ixgbe_adapter *adapter) { struct ixgbe_hw *hw = &adapter->hw; u32 vlnctrl, i; vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL); if (adapter->flags & IXGBE_FLAG_VMDQ_ENABLED) { /* For VMDq and SR-IOV we must leave VLAN filtering enabled */ vlnctrl |= IXGBE_VLNCTRL_VFE; IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl); } else { vlnctrl &= ~IXGBE_VLNCTRL_VFE; IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl); return; } /* Nothing to do for 82598 */ if (hw->mac.type == ixgbe_mac_82598EB) return; /* We are already in VLAN promisc, nothing to do */ if (adapter->flags2 & IXGBE_FLAG2_VLAN_PROMISC) return; /* Set flag so we don't redo unnecessary work */ adapter->flags2 |= IXGBE_FLAG2_VLAN_PROMISC; /* Add PF to all active pools */ for (i = IXGBE_VLVF_ENTRIES; --i;) { u32 reg_offset = IXGBE_VLVFB(i * 2 + VMDQ_P(0) / 32); u32 vlvfb = IXGBE_READ_REG(hw, reg_offset); vlvfb |= BIT(VMDQ_P(0) % 32); IXGBE_WRITE_REG(hw, reg_offset, vlvfb); } /* Set all bits in the VLAN filter table array */ for (i = hw->mac.vft_size; i--;) IXGBE_WRITE_REG(hw, IXGBE_VFTA(i), ~0U); } #define VFTA_BLOCK_SIZE 8 static void ixgbe_scrub_vfta(struct ixgbe_adapter *adapter, u32 vfta_offset) { struct ixgbe_hw *hw = &adapter->hw; u32 vfta[VFTA_BLOCK_SIZE] = { 0 }; u32 vid_start = vfta_offset * 32; u32 vid_end = vid_start + (VFTA_BLOCK_SIZE * 32); u32 i, vid, word, bits; for (i = IXGBE_VLVF_ENTRIES; --i;) { u32 vlvf = IXGBE_READ_REG(hw, IXGBE_VLVF(i)); /* pull VLAN ID from VLVF */ vid = vlvf & VLAN_VID_MASK; /* only concern outselves with a certain range */ if (vid < vid_start || vid >= vid_end) continue; if (vlvf) { /* record VLAN ID in VFTA */ vfta[(vid - vid_start) / 32] |= BIT(vid % 32); /* if PF is part of this then continue */ if (test_bit(vid, adapter->active_vlans)) continue; } /* remove PF from the pool */ word = i * 2 + VMDQ_P(0) / 32; bits = ~BIT(VMDQ_P(0) % 32); bits &= IXGBE_READ_REG(hw, IXGBE_VLVFB(word)); IXGBE_WRITE_REG(hw, IXGBE_VLVFB(word), bits); } /* extract values from active_vlans and write back to VFTA */ for (i = VFTA_BLOCK_SIZE; i--;) { vid = (vfta_offset + i) * 32; word = vid / BITS_PER_LONG; bits = vid % BITS_PER_LONG; vfta[i] |= adapter->active_vlans[word] >> bits; IXGBE_WRITE_REG(hw, IXGBE_VFTA(vfta_offset + i), vfta[i]); } } static void ixgbe_vlan_promisc_disable(struct ixgbe_adapter *adapter) { struct ixgbe_hw *hw = &adapter->hw; u32 vlnctrl, i; /* Set VLAN filtering to enabled */ vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL); vlnctrl |= IXGBE_VLNCTRL_VFE; IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl); if (!(adapter->flags & IXGBE_FLAG_VMDQ_ENABLED) || hw->mac.type == ixgbe_mac_82598EB) return; /* We are not in VLAN promisc, nothing to do */ if (!(adapter->flags2 & IXGBE_FLAG2_VLAN_PROMISC)) return; /* Set flag so we don't redo unnecessary work */ adapter->flags2 &= ~IXGBE_FLAG2_VLAN_PROMISC; for (i = 0; i < hw->mac.vft_size; i += VFTA_BLOCK_SIZE) ixgbe_scrub_vfta(adapter, i); } static void ixgbe_restore_vlan(struct ixgbe_adapter *adapter) { u16 vid = 1; ixgbe_vlan_rx_add_vid(adapter->netdev, htons(ETH_P_8021Q), 0); for_each_set_bit_from(vid, adapter->active_vlans, VLAN_N_VID) ixgbe_vlan_rx_add_vid(adapter->netdev, htons(ETH_P_8021Q), vid); } /** * ixgbe_write_mc_addr_list - write multicast addresses to MTA * @netdev: network interface device structure * * Writes multicast address list to the MTA hash table. * Returns: -ENOMEM on failure * 0 on no addresses written * X on writing X addresses to MTA **/ static int ixgbe_write_mc_addr_list(struct net_device *netdev) { struct ixgbe_adapter *adapter = netdev_priv(netdev); struct ixgbe_hw *hw = &adapter->hw; if (!netif_running(netdev)) return 0; if (hw->mac.ops.update_mc_addr_list) hw->mac.ops.update_mc_addr_list(hw, netdev); else return -ENOMEM; #ifdef CONFIG_PCI_IOV ixgbe_restore_vf_multicasts(adapter); #endif return netdev_mc_count(netdev); } #ifdef CONFIG_PCI_IOV void ixgbe_full_sync_mac_table(struct ixgbe_adapter *adapter) { struct ixgbe_mac_addr *mac_table = &adapter->mac_table[0]; struct ixgbe_hw *hw = &adapter->hw; int i; for (i = 0; i < hw->mac.num_rar_entries; i++, mac_table++) { mac_table->state &= ~IXGBE_MAC_STATE_MODIFIED; if (mac_table->state & IXGBE_MAC_STATE_IN_USE) hw->mac.ops.set_rar(hw, i, mac_table->addr, mac_table->pool, IXGBE_RAH_AV); else hw->mac.ops.clear_rar(hw, i); } } #endif static void ixgbe_sync_mac_table(struct ixgbe_adapter *adapter) { struct ixgbe_mac_addr *mac_table = &adapter->mac_table[0]; struct ixgbe_hw *hw = &adapter->hw; int i; for (i = 0; i < hw->mac.num_rar_entries; i++, mac_table++) { if (!(mac_table->state & IXGBE_MAC_STATE_MODIFIED)) continue; mac_table->state &= ~IXGBE_MAC_STATE_MODIFIED; if (mac_table->state & IXGBE_MAC_STATE_IN_USE) hw->mac.ops.set_rar(hw, i, mac_table->addr, mac_table->pool, IXGBE_RAH_AV); else hw->mac.ops.clear_rar(hw, i); } } static void ixgbe_flush_sw_mac_table(struct ixgbe_adapter *adapter) { struct ixgbe_mac_addr *mac_table = &adapter->mac_table[0]; struct ixgbe_hw *hw = &adapter->hw; int i; for (i = 0; i < hw->mac.num_rar_entries; i++, mac_table++) { mac_table->state |= IXGBE_MAC_STATE_MODIFIED; mac_table->state &= ~IXGBE_MAC_STATE_IN_USE; } ixgbe_sync_mac_table(adapter); } static int ixgbe_available_rars(struct ixgbe_adapter *adapter, u16 pool) { struct ixgbe_mac_addr *mac_table = &adapter->mac_table[0]; struct ixgbe_hw *hw = &adapter->hw; int i, count = 0; for (i = 0; i < hw->mac.num_rar_entries; i++, mac_table++) { /* do not count default RAR as available */ if (mac_table->state & IXGBE_MAC_STATE_DEFAULT) continue; /* only count unused and addresses that belong to us */ if (mac_table->state & IXGBE_MAC_STATE_IN_USE) { if (mac_table->pool != pool) continue; } count++; } return count; } /* this function destroys the first RAR entry */ static void ixgbe_mac_set_default_filter(struct ixgbe_adapter *adapter) { struct ixgbe_mac_addr *mac_table = &adapter->mac_table[0]; struct ixgbe_hw *hw = &adapter->hw; memcpy(&mac_table->addr, hw->mac.addr, ETH_ALEN); mac_table->pool = VMDQ_P(0); mac_table->state = IXGBE_MAC_STATE_DEFAULT | IXGBE_MAC_STATE_IN_USE; hw->mac.ops.set_rar(hw, 0, mac_table->addr, mac_table->pool, IXGBE_RAH_AV); } int ixgbe_add_mac_filter(struct ixgbe_adapter *adapter, const u8 *addr, u16 pool) { struct ixgbe_mac_addr *mac_table = &adapter->mac_table[0]; struct ixgbe_hw *hw = &adapter->hw; int i; if (is_zero_ether_addr(addr)) return -EINVAL; for (i = 0; i < hw->mac.num_rar_entries; i++, mac_table++) { if (mac_table->state & IXGBE_MAC_STATE_IN_USE) continue; ether_addr_copy(mac_table->addr, addr); mac_table->pool = pool; mac_table->state |= IXGBE_MAC_STATE_MODIFIED | IXGBE_MAC_STATE_IN_USE; ixgbe_sync_mac_table(adapter); return i; } return -ENOMEM; } int ixgbe_del_mac_filter(struct ixgbe_adapter *adapter, const u8 *addr, u16 pool) { struct ixgbe_mac_addr *mac_table = &adapter->mac_table[0]; struct ixgbe_hw *hw = &adapter->hw; int i; if (is_zero_ether_addr(addr)) return -EINVAL; /* search table for addr, if found clear IN_USE flag and sync */ for (i = 0; i < hw->mac.num_rar_entries; i++, mac_table++) { /* we can only delete an entry if it is in use */ if (!(mac_table->state & IXGBE_MAC_STATE_IN_USE)) continue; /* we only care about entries that belong to the given pool */ if (mac_table->pool != pool) continue; /* we only care about a specific MAC address */ if (!ether_addr_equal(addr, mac_table->addr)) continue; mac_table->state |= IXGBE_MAC_STATE_MODIFIED; mac_table->state &= ~IXGBE_MAC_STATE_IN_USE; ixgbe_sync_mac_table(adapter); return 0; } return -ENOMEM; } static int ixgbe_uc_sync(struct net_device *netdev, const unsigned char *addr) { struct ixgbe_adapter *adapter = netdev_priv(netdev); int ret; ret = ixgbe_add_mac_filter(adapter, addr, VMDQ_P(0)); return min_t(int, ret, 0); } static int ixgbe_uc_unsync(struct net_device *netdev, const unsigned char *addr) { struct ixgbe_adapter *adapter = netdev_priv(netdev); ixgbe_del_mac_filter(adapter, addr, VMDQ_P(0)); return 0; } /** * ixgbe_set_rx_mode - Unicast, Multicast and Promiscuous mode set * @netdev: network interface device structure * * The set_rx_method entry point is called whenever the unicast/multicast * address list or the network interface flags are updated. This routine is * responsible for configuring the hardware for proper unicast, multicast and * promiscuous mode. **/ void ixgbe_set_rx_mode(struct net_device *netdev) { struct ixgbe_adapter *adapter = netdev_priv(netdev); struct ixgbe_hw *hw = &adapter->hw; u32 fctrl, vmolr = IXGBE_VMOLR_BAM | IXGBE_VMOLR_AUPE; netdev_features_t features = netdev->features; int count; /* Check for Promiscuous and All Multicast modes */ fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL); /* set all bits that we expect to always be set */ fctrl &= ~IXGBE_FCTRL_SBP; /* disable store-bad-packets */ fctrl |= IXGBE_FCTRL_BAM; fctrl |= IXGBE_FCTRL_DPF; /* discard pause frames when FC enabled */ fctrl |= IXGBE_FCTRL_PMCF; /* clear the bits we are changing the status of */ fctrl &= ~(IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE); if (netdev->flags & IFF_PROMISC) { hw->addr_ctrl.user_set_promisc = true; fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE); vmolr |= IXGBE_VMOLR_MPE; features &= ~NETIF_F_HW_VLAN_CTAG_FILTER; } else { if (netdev->flags & IFF_ALLMULTI) { fctrl |= IXGBE_FCTRL_MPE; vmolr |= IXGBE_VMOLR_MPE; } hw->addr_ctrl.user_set_promisc = false; } /* * Write addresses to available RAR registers, if there is not * sufficient space to store all the addresses then enable * unicast promiscuous mode */ if (__dev_uc_sync(netdev, ixgbe_uc_sync, ixgbe_uc_unsync)) { fctrl |= IXGBE_FCTRL_UPE; vmolr |= IXGBE_VMOLR_ROPE; } /* Write addresses to the MTA, if the attempt fails * then we should just turn on promiscuous mode so * that we can at least receive multicast traffic */ count = ixgbe_write_mc_addr_list(netdev); if (count < 0) { fctrl |= IXGBE_FCTRL_MPE; vmolr |= IXGBE_VMOLR_MPE; } else if (count) { vmolr |= IXGBE_VMOLR_ROMPE; } if (hw->mac.type != ixgbe_mac_82598EB) { vmolr |= IXGBE_READ_REG(hw, IXGBE_VMOLR(VMDQ_P(0))) & ~(IXGBE_VMOLR_MPE | IXGBE_VMOLR_ROMPE | IXGBE_VMOLR_ROPE); IXGBE_WRITE_REG(hw, IXGBE_VMOLR(VMDQ_P(0)), vmolr); } /* This is useful for sniffing bad packets. */ if (features & NETIF_F_RXALL) { /* UPE and MPE will be handled by normal PROMISC logic * in e1000e_set_rx_mode */ fctrl |= (IXGBE_FCTRL_SBP | /* Receive bad packets */ IXGBE_FCTRL_BAM | /* RX All Bcast Pkts */ IXGBE_FCTRL_PMCF); /* RX All MAC Ctrl Pkts */ fctrl &= ~(IXGBE_FCTRL_DPF); /* NOTE: VLAN filtering is disabled by setting PROMISC */ } IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl); if (features & NETIF_F_HW_VLAN_CTAG_RX) ixgbe_vlan_strip_enable(adapter); else ixgbe_vlan_strip_disable(adapter); if (features & NETIF_F_HW_VLAN_CTAG_FILTER) ixgbe_vlan_promisc_disable(adapter); else ixgbe_vlan_promisc_enable(adapter); } static void ixgbe_napi_enable_all(struct ixgbe_adapter *adapter) { int q_idx; for (q_idx = 0; q_idx < adapter->num_q_vectors; q_idx++) napi_enable(&adapter->q_vector[q_idx]->napi); } static void ixgbe_napi_disable_all(struct ixgbe_adapter *adapter) { int q_idx; for (q_idx = 0; q_idx < adapter->num_q_vectors; q_idx++) napi_disable(&adapter->q_vector[q_idx]->napi); } static int ixgbe_udp_tunnel_sync(struct net_device *dev, unsigned int table) { struct ixgbe_adapter *adapter = netdev_priv(dev); struct ixgbe_hw *hw = &adapter->hw; struct udp_tunnel_info ti; udp_tunnel_nic_get_port(dev, table, 0, &ti); if (ti.type == UDP_TUNNEL_TYPE_VXLAN) adapter->vxlan_port = ti.port; else adapter->geneve_port = ti.port; IXGBE_WRITE_REG(hw, IXGBE_VXLANCTRL, ntohs(adapter->vxlan_port) | ntohs(adapter->geneve_port) << IXGBE_VXLANCTRL_GENEVE_UDPPORT_SHIFT); return 0; } static const struct udp_tunnel_nic_info ixgbe_udp_tunnels_x550 = { .sync_table = ixgbe_udp_tunnel_sync, .flags = UDP_TUNNEL_NIC_INFO_IPV4_ONLY, .tables = { { .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_VXLAN, }, }, }; static const struct udp_tunnel_nic_info ixgbe_udp_tunnels_x550em_a = { .sync_table = ixgbe_udp_tunnel_sync, .flags = UDP_TUNNEL_NIC_INFO_IPV4_ONLY, .tables = { { .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_VXLAN, }, { .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_GENEVE, }, }, }; #ifdef CONFIG_IXGBE_DCB /** * ixgbe_configure_dcb - Configure DCB hardware * @adapter: ixgbe adapter struct * * This is called by the driver on open to configure the DCB hardware. * This is also called by the gennetlink interface when reconfiguring * the DCB state. */ static void ixgbe_configure_dcb(struct ixgbe_adapter *adapter) { struct ixgbe_hw *hw = &adapter->hw; int max_frame = adapter->netdev->mtu + ETH_HLEN + ETH_FCS_LEN; if (!(adapter->flags & IXGBE_FLAG_DCB_ENABLED)) { if (hw->mac.type == ixgbe_mac_82598EB) netif_set_tso_max_size(adapter->netdev, 65536); return; } if (hw->mac.type == ixgbe_mac_82598EB) netif_set_tso_max_size(adapter->netdev, 32768); #ifdef IXGBE_FCOE if (adapter->netdev->features & NETIF_F_FCOE_MTU) max_frame = max(max_frame, IXGBE_FCOE_JUMBO_FRAME_SIZE); #endif /* reconfigure the hardware */ if (adapter->dcbx_cap & DCB_CAP_DCBX_VER_CEE) { ixgbe_dcb_calculate_tc_credits(hw, &adapter->dcb_cfg, max_frame, DCB_TX_CONFIG); ixgbe_dcb_calculate_tc_credits(hw, &adapter->dcb_cfg, max_frame, DCB_RX_CONFIG); ixgbe_dcb_hw_config(hw, &adapter->dcb_cfg); } else if (adapter->ixgbe_ieee_ets && adapter->ixgbe_ieee_pfc) { ixgbe_dcb_hw_ets(&adapter->hw, adapter->ixgbe_ieee_ets, max_frame); ixgbe_dcb_hw_pfc_config(&adapter->hw, adapter->ixgbe_ieee_pfc->pfc_en, adapter->ixgbe_ieee_ets->prio_tc); } /* Enable RSS Hash per TC */ if (hw->mac.type != ixgbe_mac_82598EB) { u32 msb = 0; u16 rss_i = adapter->ring_feature[RING_F_RSS].indices - 1; while (rss_i) { msb++; rss_i >>= 1; } /* write msb to all 8 TCs in one write */ IXGBE_WRITE_REG(hw, IXGBE_RQTC, msb * 0x11111111); } } #endif /* Additional bittime to account for IXGBE framing */ #define IXGBE_ETH_FRAMING 20 /** * ixgbe_hpbthresh - calculate high water mark for flow control * * @adapter: board private structure to calculate for * @pb: packet buffer to calculate */ static int ixgbe_hpbthresh(struct ixgbe_adapter *adapter, int pb) { struct ixgbe_hw *hw = &adapter->hw; struct net_device *dev = adapter->netdev; int link, tc, kb, marker; u32 dv_id, rx_pba; /* Calculate max LAN frame size */ tc = link = dev->mtu + ETH_HLEN + ETH_FCS_LEN + IXGBE_ETH_FRAMING; #ifdef IXGBE_FCOE /* FCoE traffic class uses FCOE jumbo frames */ if ((dev->features & NETIF_F_FCOE_MTU) && (tc < IXGBE_FCOE_JUMBO_FRAME_SIZE) && (pb == ixgbe_fcoe_get_tc(adapter))) tc = IXGBE_FCOE_JUMBO_FRAME_SIZE; #endif /* Calculate delay value for device */ switch (hw->mac.type) { case ixgbe_mac_X540: case ixgbe_mac_X550: case ixgbe_mac_X550EM_x: case ixgbe_mac_x550em_a: dv_id = IXGBE_DV_X540(link, tc); break; default: dv_id = IXGBE_DV(link, tc); break; } /* Loopback switch introduces additional latency */ if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) dv_id += IXGBE_B2BT(tc); /* Delay value is calculated in bit times convert to KB */ kb = IXGBE_BT2KB(dv_id); rx_pba = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(pb)) >> 10; marker = rx_pba - kb; /* It is possible that the packet buffer is not large enough * to provide required headroom. In this case throw an error * to user and a do the best we can. */ if (marker < 0) { e_warn(drv, "Packet Buffer(%i) can not provide enough" "headroom to support flow control." "Decrease MTU or number of traffic classes\n", pb); marker = tc + 1; } return marker; } /** * ixgbe_lpbthresh - calculate low water mark for flow control * * @adapter: board private structure to calculate for * @pb: packet buffer to calculate */ static int ixgbe_lpbthresh(struct ixgbe_adapter *adapter, int pb) { struct ixgbe_hw *hw = &adapter->hw; struct net_device *dev = adapter->netdev; int tc; u32 dv_id; /* Calculate max LAN frame size */ tc = dev->mtu + ETH_HLEN + ETH_FCS_LEN; #ifdef IXGBE_FCOE /* FCoE traffic class uses FCOE jumbo frames */ if ((dev->features & NETIF_F_FCOE_MTU) && (tc < IXGBE_FCOE_JUMBO_FRAME_SIZE) && (pb == netdev_get_prio_tc_map(dev, adapter->fcoe.up))) tc = IXGBE_FCOE_JUMBO_FRAME_SIZE; #endif /* Calculate delay value for device */ switch (hw->mac.type) { case ixgbe_mac_X540: case ixgbe_mac_X550: case ixgbe_mac_X550EM_x: case ixgbe_mac_x550em_a: dv_id = IXGBE_LOW_DV_X540(tc); break; default: dv_id = IXGBE_LOW_DV(tc); break; } /* Delay value is calculated in bit times convert to KB */ return IXGBE_BT2KB(dv_id); } /* * ixgbe_pbthresh_setup - calculate and setup high low water marks */ static void ixgbe_pbthresh_setup(struct ixgbe_adapter *adapter) { struct ixgbe_hw *hw = &adapter->hw; int num_tc = adapter->hw_tcs; int i; if (!num_tc) num_tc = 1; for (i = 0; i < num_tc; i++) { hw->fc.high_water[i] = ixgbe_hpbthresh(adapter, i); hw->fc.low_water[i] = ixgbe_lpbthresh(adapter, i); /* Low water marks must not be larger than high water marks */ if (hw->fc.low_water[i] > hw->fc.high_water[i]) hw->fc.low_water[i] = 0; } for (; i < MAX_TRAFFIC_CLASS; i++) hw->fc.high_water[i] = 0; } static void ixgbe_configure_pb(struct ixgbe_adapter *adapter) { struct ixgbe_hw *hw = &adapter->hw; int hdrm; u8 tc = adapter->hw_tcs; if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE || adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE) hdrm = 32 << adapter->fdir_pballoc; else hdrm = 0; hw->mac.ops.set_rxpba(hw, tc, hdrm, PBA_STRATEGY_EQUAL); ixgbe_pbthresh_setup(adapter); } static void ixgbe_fdir_filter_restore(struct ixgbe_adapter *adapter) { struct ixgbe_hw *hw = &adapter->hw; struct hlist_node *node2; struct ixgbe_fdir_filter *filter; u8 queue; spin_lock(&adapter->fdir_perfect_lock); if (!hlist_empty(&adapter->fdir_filter_list)) ixgbe_fdir_set_input_mask_82599(hw, &adapter->fdir_mask); hlist_for_each_entry_safe(filter, node2, &adapter->fdir_filter_list, fdir_node) { if (filter->action == IXGBE_FDIR_DROP_QUEUE) { queue = IXGBE_FDIR_DROP_QUEUE; } else { u32 ring = ethtool_get_flow_spec_ring(filter->action); u8 vf = ethtool_get_flow_spec_ring_vf(filter->action); if (!vf && (ring >= adapter->num_rx_queues)) { e_err(drv, "FDIR restore failed without VF, ring: %u\n", ring); continue; } else if (vf && ((vf > adapter->num_vfs) || ring >= adapter->num_rx_queues_per_pool)) { e_err(drv, "FDIR restore failed with VF, vf: %hhu, ring: %u\n", vf, ring); continue; } /* Map the ring onto the absolute queue index */ if (!vf) queue = adapter->rx_ring[ring]->reg_idx; else queue = ((vf - 1) * adapter->num_rx_queues_per_pool) + ring; } ixgbe_fdir_write_perfect_filter_82599(hw, &filter->filter, filter->sw_idx, queue); } spin_unlock(&adapter->fdir_perfect_lock); } /** * ixgbe_clean_rx_ring - Free Rx Buffers per Queue * @rx_ring: ring to free buffers from **/ static void ixgbe_clean_rx_ring(struct ixgbe_ring *rx_ring) { u16 i = rx_ring->next_to_clean; struct ixgbe_rx_buffer *rx_buffer = &rx_ring->rx_buffer_info[i]; if (rx_ring->xsk_pool) { ixgbe_xsk_clean_rx_ring(rx_ring); goto skip_free; } /* Free all the Rx ring sk_buffs */ while (i != rx_ring->next_to_alloc) { if (rx_buffer->skb) { struct sk_buff *skb = rx_buffer->skb; if (IXGBE_CB(skb)->page_released) dma_unmap_page_attrs(rx_ring->dev, IXGBE_CB(skb)->dma, ixgbe_rx_pg_size(rx_ring), DMA_FROM_DEVICE, IXGBE_RX_DMA_ATTR); dev_kfree_skb(skb); } /* Invalidate cache lines that may have been written to by * device so that we avoid corrupting memory. */ dma_sync_single_range_for_cpu(rx_ring->dev, rx_buffer->dma, rx_buffer->page_offset, ixgbe_rx_bufsz(rx_ring), DMA_FROM_DEVICE); /* free resources associated with mapping */ dma_unmap_page_attrs(rx_ring->dev, rx_buffer->dma, ixgbe_rx_pg_size(rx_ring), DMA_FROM_DEVICE, IXGBE_RX_DMA_ATTR); __page_frag_cache_drain(rx_buffer->page, rx_buffer->pagecnt_bias); i++; rx_buffer++; if (i == rx_ring->count) { i = 0; rx_buffer = rx_ring->rx_buffer_info; } } skip_free: rx_ring->next_to_alloc = 0; rx_ring->next_to_clean = 0; rx_ring->next_to_use = 0; } static int ixgbe_fwd_ring_up(struct ixgbe_adapter *adapter, struct ixgbe_fwd_adapter *accel) { u16 rss_i = adapter->ring_feature[RING_F_RSS].indices; int num_tc = netdev_get_num_tc(adapter->netdev); struct net_device *vdev = accel->netdev; int i, baseq, err; baseq = accel->pool * adapter->num_rx_queues_per_pool; netdev_dbg(vdev, "pool %i:%i queues %i:%i\n", accel->pool, adapter->num_rx_pools, baseq, baseq + adapter->num_rx_queues_per_pool); accel->rx_base_queue = baseq; accel->tx_base_queue = baseq; /* record configuration for macvlan interface in vdev */ for (i = 0; i < num_tc; i++) netdev_bind_sb_channel_queue(adapter->netdev, vdev, i, rss_i, baseq + (rss_i * i)); for (i = 0; i < adapter->num_rx_queues_per_pool; i++) adapter->rx_ring[baseq + i]->netdev = vdev; /* Guarantee all rings are updated before we update the * MAC address filter. */ wmb(); /* ixgbe_add_mac_filter will return an index if it succeeds, so we * need to only treat it as an error value if it is negative. */ err = ixgbe_add_mac_filter(adapter, vdev->dev_addr, VMDQ_P(accel->pool)); if (err >= 0) return 0; /* if we cannot add the MAC rule then disable the offload */ macvlan_release_l2fw_offload(vdev); for (i = 0; i < adapter->num_rx_queues_per_pool; i++) adapter->rx_ring[baseq + i]->netdev = NULL; netdev_err(vdev, "L2FW offload disabled due to L2 filter error\n"); /* unbind the queues and drop the subordinate channel config */ netdev_unbind_sb_channel(adapter->netdev, vdev); netdev_set_sb_channel(vdev, 0); clear_bit(accel->pool, adapter->fwd_bitmask); kfree(accel); return err; } static int ixgbe_macvlan_up(struct net_device *vdev, struct netdev_nested_priv *priv) { struct ixgbe_adapter *adapter = (struct ixgbe_adapter *)priv->data; struct ixgbe_fwd_adapter *accel; if (!netif_is_macvlan(vdev)) return 0; accel = macvlan_accel_priv(vdev); if (!accel) return 0; ixgbe_fwd_ring_up(adapter, accel); return 0; } static void ixgbe_configure_dfwd(struct ixgbe_adapter *adapter) { struct netdev_nested_priv priv = { .data = (void *)adapter, }; netdev_walk_all_upper_dev_rcu(adapter->netdev, ixgbe_macvlan_up, &priv); } static void ixgbe_configure(struct ixgbe_adapter *adapter) { struct ixgbe_hw *hw = &adapter->hw; ixgbe_configure_pb(adapter); #ifdef CONFIG_IXGBE_DCB ixgbe_configure_dcb(adapter); #endif /* * We must restore virtualization before VLANs or else * the VLVF registers will not be populated */ ixgbe_configure_virtualization(adapter); ixgbe_set_rx_mode(adapter->netdev); ixgbe_restore_vlan(adapter); ixgbe_ipsec_restore(adapter); switch (hw->mac.type) { case ixgbe_mac_82599EB: case ixgbe_mac_X540: hw->mac.ops.disable_rx_buff(hw); break; default: break; } if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) { ixgbe_init_fdir_signature_82599(&adapter->hw, adapter->fdir_pballoc); } else if (adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE) { ixgbe_init_fdir_perfect_82599(&adapter->hw, adapter->fdir_pballoc); ixgbe_fdir_filter_restore(adapter); } switch (hw->mac.type) { case ixgbe_mac_82599EB: case ixgbe_mac_X540: hw->mac.ops.enable_rx_buff(hw); break; default: break; } #ifdef CONFIG_IXGBE_DCA /* configure DCA */ if (adapter->flags & IXGBE_FLAG_DCA_CAPABLE) ixgbe_setup_dca(adapter); #endif /* CONFIG_IXGBE_DCA */ #ifdef IXGBE_FCOE /* configure FCoE L2 filters, redirection table, and Rx control */ ixgbe_configure_fcoe(adapter); #endif /* IXGBE_FCOE */ ixgbe_configure_tx(adapter); ixgbe_configure_rx(adapter); ixgbe_configure_dfwd(adapter); } /** * ixgbe_sfp_link_config - set up SFP+ link * @adapter: pointer to private adapter struct **/ static void ixgbe_sfp_link_config(struct ixgbe_adapter *adapter) { /* * We are assuming the worst case scenario here, and that * is that an SFP was inserted/removed after the reset * but before SFP detection was enabled. As such the best * solution is to just start searching as soon as we start */ if (adapter->hw.mac.type == ixgbe_mac_82598EB) adapter->flags2 |= IXGBE_FLAG2_SEARCH_FOR_SFP; adapter->flags2 |= IXGBE_FLAG2_SFP_NEEDS_RESET; adapter->sfp_poll_time = 0; } /** * ixgbe_non_sfp_link_config - set up non-SFP+ link * @hw: pointer to private hardware struct * * Returns 0 on success, negative on failure **/ static int ixgbe_non_sfp_link_config(struct ixgbe_hw *hw) { u32 speed; bool autoneg, link_up = false; int ret = IXGBE_ERR_LINK_SETUP; if (hw->mac.ops.check_link) ret = hw->mac.ops.check_link(hw, &speed, &link_up, false); if (ret) return ret; speed = hw->phy.autoneg_advertised; if (!speed && hw->mac.ops.get_link_capabilities) { ret = hw->mac.ops.get_link_capabilities(hw, &speed, &autoneg); /* remove NBASE-T speeds from default autonegotiation * to accommodate broken network switches in the field * which cannot cope with advertised NBASE-T speeds */ speed &= ~(IXGBE_LINK_SPEED_5GB_FULL | IXGBE_LINK_SPEED_2_5GB_FULL); } if (ret) return ret; if (hw->mac.ops.setup_link) ret = hw->mac.ops.setup_link(hw, speed, link_up); return ret; } /** * ixgbe_clear_vf_stats_counters - Clear out VF stats after reset * @adapter: board private structure * * On a reset we need to clear out the VF stats or accounting gets * messed up because they're not clear on read. **/ static void ixgbe_clear_vf_stats_counters(struct ixgbe_adapter *adapter) { struct ixgbe_hw *hw = &adapter->hw; int i; for (i = 0; i < adapter->num_vfs; i++) { adapter->vfinfo[i].last_vfstats.gprc = IXGBE_READ_REG(hw, IXGBE_PVFGPRC(i)); adapter->vfinfo[i].saved_rst_vfstats.gprc += adapter->vfinfo[i].vfstats.gprc; adapter->vfinfo[i].vfstats.gprc = 0; adapter->vfinfo[i].last_vfstats.gptc = IXGBE_READ_REG(hw, IXGBE_PVFGPTC(i)); adapter->vfinfo[i].saved_rst_vfstats.gptc += adapter->vfinfo[i].vfstats.gptc; adapter->vfinfo[i].vfstats.gptc = 0; adapter->vfinfo[i].last_vfstats.gorc = IXGBE_READ_REG(hw, IXGBE_PVFGORC_LSB(i)); adapter->vfinfo[i].saved_rst_vfstats.gorc += adapter->vfinfo[i].vfstats.gorc; adapter->vfinfo[i].vfstats.gorc = 0; adapter->vfinfo[i].last_vfstats.gotc = IXGBE_READ_REG(hw, IXGBE_PVFGOTC_LSB(i)); adapter->vfinfo[i].saved_rst_vfstats.gotc += adapter->vfinfo[i].vfstats.gotc; adapter->vfinfo[i].vfstats.gotc = 0; adapter->vfinfo[i].last_vfstats.mprc = IXGBE_READ_REG(hw, IXGBE_PVFMPRC(i)); adapter->vfinfo[i].saved_rst_vfstats.mprc += adapter->vfinfo[i].vfstats.mprc; adapter->vfinfo[i].vfstats.mprc = 0; } } static void ixgbe_setup_gpie(struct ixgbe_adapter *adapter) { struct ixgbe_hw *hw = &adapter->hw; u32 gpie = 0; if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) { gpie = IXGBE_GPIE_MSIX_MODE | IXGBE_GPIE_PBA_SUPPORT | IXGBE_GPIE_OCD; gpie |= IXGBE_GPIE_EIAME; /* * use EIAM to auto-mask when MSI-X interrupt is asserted * this saves a register write for every interrupt */ switch (hw->mac.type) { case ixgbe_mac_82598EB: IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE); break; case ixgbe_mac_82599EB: case ixgbe_mac_X540: case ixgbe_mac_X550: case ixgbe_mac_X550EM_x: case ixgbe_mac_x550em_a: default: IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(0), 0xFFFFFFFF); IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(1), 0xFFFFFFFF); break; } } else { /* legacy interrupts, use EIAM to auto-mask when reading EICR, * specifically only auto mask tx and rx interrupts */ IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE); } /* XXX: to interrupt immediately for EICS writes, enable this */ /* gpie |= IXGBE_GPIE_EIMEN; */ if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) { gpie &= ~IXGBE_GPIE_VTMODE_MASK; switch (adapter->ring_feature[RING_F_VMDQ].mask) { case IXGBE_82599_VMDQ_8Q_MASK: gpie |= IXGBE_GPIE_VTMODE_16; break; case IXGBE_82599_VMDQ_4Q_MASK: gpie |= IXGBE_GPIE_VTMODE_32; break; default: gpie |= IXGBE_GPIE_VTMODE_64; break; } } /* Enable Thermal over heat sensor interrupt */ if (adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE) { switch (adapter->hw.mac.type) { case ixgbe_mac_82599EB: gpie |= IXGBE_SDP0_GPIEN_8259X; break; default: break; } } /* Enable fan failure interrupt */ if (adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE) gpie |= IXGBE_SDP1_GPIEN(hw); switch (hw->mac.type) { case ixgbe_mac_82599EB: gpie |= IXGBE_SDP1_GPIEN_8259X | IXGBE_SDP2_GPIEN_8259X; break; case ixgbe_mac_X550EM_x: case ixgbe_mac_x550em_a: gpie |= IXGBE_SDP0_GPIEN_X540; break; default: break; } IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie); } static void ixgbe_up_complete(struct ixgbe_adapter *adapter) { struct ixgbe_hw *hw = &adapter->hw; int err; u32 ctrl_ext; ixgbe_get_hw_control(adapter); ixgbe_setup_gpie(adapter); if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) ixgbe_configure_msix(adapter); else ixgbe_configure_msi_and_legacy(adapter); /* enable the optics for 82599 SFP+ fiber */ if (hw->mac.ops.enable_tx_laser) hw->mac.ops.enable_tx_laser(hw); if (hw->phy.ops.set_phy_power) hw->phy.ops.set_phy_power(hw, true); smp_mb__before_atomic(); clear_bit(__IXGBE_DOWN, &adapter->state); ixgbe_napi_enable_all(adapter); if (ixgbe_is_sfp(hw)) { ixgbe_sfp_link_config(adapter); } else { err = ixgbe_non_sfp_link_config(hw); if (err) e_err(probe, "link_config FAILED %d\n", err); } /* clear any pending interrupts, may auto mask */ IXGBE_READ_REG(hw, IXGBE_EICR); ixgbe_irq_enable(adapter, true, true); /* * If this adapter has a fan, check to see if we had a failure * before we enabled the interrupt. */ if (adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE) { u32 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP); if (esdp & IXGBE_ESDP_SDP1) e_crit(drv, "Fan has stopped, replace the adapter\n"); } /* bring the link up in the watchdog, this could race with our first * link up interrupt but shouldn't be a problem */ adapter->flags |= IXGBE_FLAG_NEED_LINK_UPDATE; adapter->link_check_timeout = jiffies; mod_timer(&adapter->service_timer, jiffies); ixgbe_clear_vf_stats_counters(adapter); /* Set PF Reset Done bit so PF/VF Mail Ops can work */ ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT); ctrl_ext |= IXGBE_CTRL_EXT_PFRSTD; IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext); /* update setting rx tx for all active vfs */ ixgbe_set_all_vfs(adapter); } void ixgbe_reinit_locked(struct ixgbe_adapter *adapter) { /* put off any impending NetWatchDogTimeout */ netif_trans_update(adapter->netdev); while (test_and_set_bit(__IXGBE_RESETTING, &adapter->state)) usleep_range(1000, 2000); if (adapter->hw.phy.type == ixgbe_phy_fw) ixgbe_watchdog_link_is_down(adapter); ixgbe_down(adapter); /* * If SR-IOV enabled then wait a bit before bringing the adapter * back up to give the VFs time to respond to the reset. The * two second wait is based upon the watchdog timer cycle in * the VF driver. */ if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) msleep(2000); ixgbe_up(adapter); clear_bit(__IXGBE_RESETTING, &adapter->state); } void ixgbe_up(struct ixgbe_adapter *adapter) { /* hardware has been reset, we need to reload some things */ ixgbe_configure(adapter); ixgbe_up_complete(adapter); } static unsigned long ixgbe_get_completion_timeout(struct ixgbe_adapter *adapter) { u16 devctl2; pcie_capability_read_word(adapter->pdev, PCI_EXP_DEVCTL2, &devctl2); switch (devctl2 & IXGBE_PCIDEVCTRL2_TIMEO_MASK) { case IXGBE_PCIDEVCTRL2_17_34s: case IXGBE_PCIDEVCTRL2_4_8s: /* For now we cap the upper limit on delay to 2 seconds * as we end up going up to 34 seconds of delay in worst * case timeout value. */ case IXGBE_PCIDEVCTRL2_1_2s: return 2000000ul; /* 2.0 s */ case IXGBE_PCIDEVCTRL2_260_520ms: return 520000ul; /* 520 ms */ case IXGBE_PCIDEVCTRL2_65_130ms: return 130000ul; /* 130 ms */ case IXGBE_PCIDEVCTRL2_16_32ms: return 32000ul; /* 32 ms */ case IXGBE_PCIDEVCTRL2_1_2ms: return 2000ul; /* 2 ms */ case IXGBE_PCIDEVCTRL2_50_100us: return 100ul; /* 100 us */ case IXGBE_PCIDEVCTRL2_16_32ms_def: return 32000ul; /* 32 ms */ default: break; } /* We shouldn't need to hit this path, but just in case default as * though completion timeout is not supported and support 32ms. */ return 32000ul; } void ixgbe_disable_rx(struct ixgbe_adapter *adapter) { unsigned long wait_delay, delay_interval; struct ixgbe_hw *hw = &adapter->hw; int i, wait_loop; u32 rxdctl; /* disable receives */ hw->mac.ops.disable_rx(hw); if (ixgbe_removed(hw->hw_addr)) return; /* disable all enabled Rx queues */ for (i = 0; i < adapter->num_rx_queues; i++) { struct ixgbe_ring *ring = adapter->rx_ring[i]; u8 reg_idx = ring->reg_idx; rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(reg_idx)); rxdctl &= ~IXGBE_RXDCTL_ENABLE; rxdctl |= IXGBE_RXDCTL_SWFLSH; /* write value back with RXDCTL.ENABLE bit cleared */ IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(reg_idx), rxdctl); } /* RXDCTL.EN may not change on 82598 if link is down, so skip it */ if (hw->mac.type == ixgbe_mac_82598EB && !(IXGBE_READ_REG(hw, IXGBE_LINKS) & IXGBE_LINKS_UP)) return; /* Determine our minimum delay interval. We will increase this value * with each subsequent test. This way if the device returns quickly * we should spend as little time as possible waiting, however as * the time increases we will wait for larger periods of time. * * The trick here is that we increase the interval using the * following pattern: 1x 3x 5x 7x 9x 11x 13x 15x 17x 19x. The result * of that wait is that it totals up to 100x whatever interval we * choose. Since our minimum wait is 100us we can just divide the * total timeout by 100 to get our minimum delay interval. */ delay_interval = ixgbe_get_completion_timeout(adapter) / 100; wait_loop = IXGBE_MAX_RX_DESC_POLL; wait_delay = delay_interval; while (wait_loop--) { usleep_range(wait_delay, wait_delay + 10); wait_delay += delay_interval * 2; rxdctl = 0; /* OR together the reading of all the active RXDCTL registers, * and then test the result. We need the disable to complete * before we start freeing the memory and invalidating the * DMA mappings. */ for (i = 0; i < adapter->num_rx_queues; i++) { struct ixgbe_ring *ring = adapter->rx_ring[i]; u8 reg_idx = ring->reg_idx; rxdctl |= IXGBE_READ_REG(hw, IXGBE_RXDCTL(reg_idx)); } if (!(rxdctl & IXGBE_RXDCTL_ENABLE)) return; } e_err(drv, "RXDCTL.ENABLE for one or more queues not cleared within the polling period\n"); } void ixgbe_disable_tx(struct ixgbe_adapter *adapter) { unsigned long wait_delay, delay_interval; struct ixgbe_hw *hw = &adapter->hw; int i, wait_loop; u32 txdctl; if (ixgbe_removed(hw->hw_addr)) return; /* disable all enabled Tx queues */ for (i = 0; i < adapter->num_tx_queues; i++) { struct ixgbe_ring *ring = adapter->tx_ring[i]; u8 reg_idx = ring->reg_idx; IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(reg_idx), IXGBE_TXDCTL_SWFLSH); } /* disable all enabled XDP Tx queues */ for (i = 0; i < adapter->num_xdp_queues; i++) { struct ixgbe_ring *ring = adapter->xdp_ring[i]; u8 reg_idx = ring->reg_idx; IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(reg_idx), IXGBE_TXDCTL_SWFLSH); } /* If the link is not up there shouldn't be much in the way of * pending transactions. Those that are left will be flushed out * when the reset logic goes through the flush sequence to clean out * the pending Tx transactions. */ if (!(IXGBE_READ_REG(hw, IXGBE_LINKS) & IXGBE_LINKS_UP)) goto dma_engine_disable; /* Determine our minimum delay interval. We will increase this value * with each subsequent test. This way if the device returns quickly * we should spend as little time as possible waiting, however as * the time increases we will wait for larger periods of time. * * The trick here is that we increase the interval using the * following pattern: 1x 3x 5x 7x 9x 11x 13x 15x 17x 19x. The result * of that wait is that it totals up to 100x whatever interval we * choose. Since our minimum wait is 100us we can just divide the * total timeout by 100 to get our minimum delay interval. */ delay_interval = ixgbe_get_completion_timeout(adapter) / 100; wait_loop = IXGBE_MAX_RX_DESC_POLL; wait_delay = delay_interval; while (wait_loop--) { usleep_range(wait_delay, wait_delay + 10); wait_delay += delay_interval * 2; txdctl = 0; /* OR together the reading of all the active TXDCTL registers, * and then test the result. We need the disable to complete * before we start freeing the memory and invalidating the * DMA mappings. */ for (i = 0; i < adapter->num_tx_queues; i++) { struct ixgbe_ring *ring = adapter->tx_ring[i]; u8 reg_idx = ring->reg_idx; txdctl |= IXGBE_READ_REG(hw, IXGBE_TXDCTL(reg_idx)); } for (i = 0; i < adapter->num_xdp_queues; i++) { struct ixgbe_ring *ring = adapter->xdp_ring[i]; u8 reg_idx = ring->reg_idx; txdctl |= IXGBE_READ_REG(hw, IXGBE_TXDCTL(reg_idx)); } if (!(txdctl & IXGBE_TXDCTL_ENABLE)) goto dma_engine_disable; } e_err(drv, "TXDCTL.ENABLE for one or more queues not cleared within the polling period\n"); dma_engine_disable: /* Disable the Tx DMA engine on 82599 and later MAC */ switch (hw->mac.type) { case ixgbe_mac_82599EB: case ixgbe_mac_X540: case ixgbe_mac_X550: case ixgbe_mac_X550EM_x: case ixgbe_mac_x550em_a: IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, (IXGBE_READ_REG(hw, IXGBE_DMATXCTL) & ~IXGBE_DMATXCTL_TE)); fallthrough; default: break; } } void ixgbe_reset(struct ixgbe_adapter *adapter) { struct ixgbe_hw *hw = &adapter->hw; struct net_device *netdev = adapter->netdev; int err; if (ixgbe_removed(hw->hw_addr)) return; /* lock SFP init bit to prevent race conditions with the watchdog */ while (test_and_set_bit(__IXGBE_IN_SFP_INIT, &adapter->state)) usleep_range(1000, 2000); /* clear all SFP and link config related flags while holding SFP_INIT */ adapter->flags2 &= ~(IXGBE_FLAG2_SEARCH_FOR_SFP | IXGBE_FLAG2_SFP_NEEDS_RESET); adapter->flags &= ~IXGBE_FLAG_NEED_LINK_CONFIG; err = hw->mac.ops.init_hw(hw); switch (err) { case 0: case IXGBE_ERR_SFP_NOT_PRESENT: case IXGBE_ERR_SFP_NOT_SUPPORTED: break; case IXGBE_ERR_PRIMARY_REQUESTS_PENDING: e_dev_err("primary disable timed out\n"); break; case IXGBE_ERR_EEPROM_VERSION: /* We are running on a pre-production device, log a warning */ e_dev_warn("This device is a pre-production adapter/LOM. " "Please be aware there may be issues associated with " "your hardware. If you are experiencing problems " "please contact your Intel or hardware " "representative who provided you with this " "hardware.\n"); break; default: e_dev_err("Hardware Error: %d\n", err); } clear_bit(__IXGBE_IN_SFP_INIT, &adapter->state); /* flush entries out of MAC table */ ixgbe_flush_sw_mac_table(adapter); __dev_uc_unsync(netdev, NULL); /* do not flush user set addresses */ ixgbe_mac_set_default_filter(adapter); /* update SAN MAC vmdq pool selection */ if (hw->mac.san_mac_rar_index) hw->mac.ops.set_vmdq_san_mac(hw, VMDQ_P(0)); if (test_bit(__IXGBE_PTP_RUNNING, &adapter->state)) ixgbe_ptp_reset(adapter); if (hw->phy.ops.set_phy_power) { if (!netif_running(adapter->netdev) && !adapter->wol) hw->phy.ops.set_phy_power(hw, false); else hw->phy.ops.set_phy_power(hw, true); } } /** * ixgbe_clean_tx_ring - Free Tx Buffers * @tx_ring: ring to be cleaned **/ static void ixgbe_clean_tx_ring(struct ixgbe_ring *tx_ring) { u16 i = tx_ring->next_to_clean; struct ixgbe_tx_buffer *tx_buffer = &tx_ring->tx_buffer_info[i]; if (tx_ring->xsk_pool) { ixgbe_xsk_clean_tx_ring(tx_ring); goto out; } while (i != tx_ring->next_to_use) { union ixgbe_adv_tx_desc *eop_desc, *tx_desc; /* Free all the Tx ring sk_buffs */ if (ring_is_xdp(tx_ring)) xdp_return_frame(tx_buffer->xdpf); else dev_kfree_skb_any(tx_buffer->skb); /* unmap skb header data */ dma_unmap_single(tx_ring->dev, dma_unmap_addr(tx_buffer, dma), dma_unmap_len(tx_buffer, len), DMA_TO_DEVICE); /* check for eop_desc to determine the end of the packet */ eop_desc = tx_buffer->next_to_watch; tx_desc = IXGBE_TX_DESC(tx_ring, i); /* unmap remaining buffers */ while (tx_desc != eop_desc) { tx_buffer++; tx_desc++; i++; if (unlikely(i == tx_ring->count)) { i = 0; tx_buffer = tx_ring->tx_buffer_info; tx_desc = IXGBE_TX_DESC(tx_ring, 0); } /* unmap any remaining paged data */ if (dma_unmap_len(tx_buffer, len)) dma_unmap_page(tx_ring->dev, dma_unmap_addr(tx_buffer, dma), dma_unmap_len(tx_buffer, len), DMA_TO_DEVICE); } /* move us one more past the eop_desc for start of next pkt */ tx_buffer++; i++; if (unlikely(i == tx_ring->count)) { i = 0; tx_buffer = tx_ring->tx_buffer_info; } } /* reset BQL for queue */ if (!ring_is_xdp(tx_ring)) netdev_tx_reset_queue(txring_txq(tx_ring)); out: /* reset next_to_use and next_to_clean */ tx_ring->next_to_use = 0; tx_ring->next_to_clean = 0; } /** * ixgbe_clean_all_rx_rings - Free Rx Buffers for all queues * @adapter: board private structure **/ static void ixgbe_clean_all_rx_rings(struct ixgbe_adapter *adapter) { int i; for (i = 0; i < adapter->num_rx_queues; i++) ixgbe_clean_rx_ring(adapter->rx_ring[i]); } /** * ixgbe_clean_all_tx_rings - Free Tx Buffers for all queues * @adapter: board private structure **/ static void ixgbe_clean_all_tx_rings(struct ixgbe_adapter *adapter) { int i; for (i = 0; i < adapter->num_tx_queues; i++) ixgbe_clean_tx_ring(adapter->tx_ring[i]); for (i = 0; i < adapter->num_xdp_queues; i++) ixgbe_clean_tx_ring(adapter->xdp_ring[i]); } static void ixgbe_fdir_filter_exit(struct ixgbe_adapter *adapter) { struct hlist_node *node2; struct ixgbe_fdir_filter *filter; spin_lock(&adapter->fdir_perfect_lock); hlist_for_each_entry_safe(filter, node2, &adapter->fdir_filter_list, fdir_node) { hlist_del(&filter->fdir_node); kfree(filter); } adapter->fdir_filter_count = 0; spin_unlock(&adapter->fdir_perfect_lock); } void ixgbe_down(struct ixgbe_adapter *adapter) { struct net_device *netdev = adapter->netdev; struct ixgbe_hw *hw = &adapter->hw; int i; /* signal that we are down to the interrupt handler */ if (test_and_set_bit(__IXGBE_DOWN, &adapter->state)) return; /* do nothing if already down */ /* Shut off incoming Tx traffic */ netif_tx_stop_all_queues(netdev); /* call carrier off first to avoid false dev_watchdog timeouts */ netif_carrier_off(netdev); netif_tx_disable(netdev); /* Disable Rx */ ixgbe_disable_rx(adapter); /* synchronize_rcu() needed for pending XDP buffers to drain */ if (adapter->xdp_ring[0]) synchronize_rcu(); ixgbe_irq_disable(adapter); ixgbe_napi_disable_all(adapter); clear_bit(__IXGBE_RESET_REQUESTED, &adapter->state); adapter->flags2 &= ~IXGBE_FLAG2_FDIR_REQUIRES_REINIT; adapter->flags &= ~IXGBE_FLAG_NEED_LINK_UPDATE; del_timer_sync(&adapter->service_timer); if (adapter->num_vfs) { /* Clear EITR Select mapping */ IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITRSEL, 0); /* Mark all the VFs as inactive */ for (i = 0 ; i < adapter->num_vfs; i++) adapter->vfinfo[i].clear_to_send = false; /* update setting rx tx for all active vfs */ ixgbe_set_all_vfs(adapter); } /* disable transmits in the hardware now that interrupts are off */ ixgbe_disable_tx(adapter); if (!pci_channel_offline(adapter->pdev)) ixgbe_reset(adapter); /* power down the optics for 82599 SFP+ fiber */ if (hw->mac.ops.disable_tx_laser) hw->mac.ops.disable_tx_laser(hw); ixgbe_clean_all_tx_rings(adapter); ixgbe_clean_all_rx_rings(adapter); } /** * ixgbe_set_eee_capable - helper function to determine EEE support on X550 * @adapter: board private structure */ static void ixgbe_set_eee_capable(struct ixgbe_adapter *adapter) { struct ixgbe_hw *hw = &adapter->hw; switch (hw->device_id) { case IXGBE_DEV_ID_X550EM_A_1G_T: case IXGBE_DEV_ID_X550EM_A_1G_T_L: if (!hw->phy.eee_speeds_supported) break; adapter->flags2 |= IXGBE_FLAG2_EEE_CAPABLE; if (!hw->phy.eee_speeds_advertised) break; adapter->flags2 |= IXGBE_FLAG2_EEE_ENABLED; break; default: adapter->flags2 &= ~IXGBE_FLAG2_EEE_CAPABLE; adapter->flags2 &= ~IXGBE_FLAG2_EEE_ENABLED; break; } } /** * ixgbe_tx_timeout - Respond to a Tx Hang * @netdev: network interface device structure * @txqueue: queue number that timed out **/ static void ixgbe_tx_timeout(struct net_device *netdev, unsigned int __always_unused txqueue) { struct ixgbe_adapter *adapter = netdev_priv(netdev); /* Do the reset outside of interrupt context */ ixgbe_tx_timeout_reset(adapter); } #ifdef CONFIG_IXGBE_DCB static void ixgbe_init_dcb(struct ixgbe_adapter *adapter) { struct ixgbe_hw *hw = &adapter->hw; struct tc_configuration *tc; int j; switch (hw->mac.type) { case ixgbe_mac_82598EB: case ixgbe_mac_82599EB: adapter->dcb_cfg.num_tcs.pg_tcs = MAX_TRAFFIC_CLASS; adapter->dcb_cfg.num_tcs.pfc_tcs = MAX_TRAFFIC_CLASS; break; case ixgbe_mac_X540: case ixgbe_mac_X550: adapter->dcb_cfg.num_tcs.pg_tcs = X540_TRAFFIC_CLASS; adapter->dcb_cfg.num_tcs.pfc_tcs = X540_TRAFFIC_CLASS; break; case ixgbe_mac_X550EM_x: case ixgbe_mac_x550em_a: default: adapter->dcb_cfg.num_tcs.pg_tcs = DEF_TRAFFIC_CLASS; adapter->dcb_cfg.num_tcs.pfc_tcs = DEF_TRAFFIC_CLASS; break; } /* Configure DCB traffic classes */ for (j = 0; j < MAX_TRAFFIC_CLASS; j++) { tc = &adapter->dcb_cfg.tc_config[j]; tc->path[DCB_TX_CONFIG].bwg_id = 0; tc->path[DCB_TX_CONFIG].bwg_percent = 12 + (j & 1); tc->path[DCB_RX_CONFIG].bwg_id = 0; tc->path[DCB_RX_CONFIG].bwg_percent = 12 + (j & 1); tc->dcb_pfc = pfc_disabled; } /* Initialize default user to priority mapping, UPx->TC0 */ tc = &adapter->dcb_cfg.tc_config[0]; tc->path[DCB_TX_CONFIG].up_to_tc_bitmap = 0xFF; tc->path[DCB_RX_CONFIG].up_to_tc_bitmap = 0xFF; adapter->dcb_cfg.bw_percentage[DCB_TX_CONFIG][0] = 100; adapter->dcb_cfg.bw_percentage[DCB_RX_CONFIG][0] = 100; adapter->dcb_cfg.pfc_mode_enable = false; adapter->dcb_set_bitmap = 0x00; if (adapter->flags & IXGBE_FLAG_DCB_CAPABLE) adapter->dcbx_cap = DCB_CAP_DCBX_HOST | DCB_CAP_DCBX_VER_CEE; memcpy(&adapter->temp_dcb_cfg, &adapter->dcb_cfg, sizeof(adapter->temp_dcb_cfg)); } #endif /** * ixgbe_sw_init - Initialize general software structures (struct ixgbe_adapter) * @adapter: board private structure to initialize * @ii: pointer to ixgbe_info for device * * ixgbe_sw_init initializes the Adapter private data structure. * Fields are initialized based on PCI device information and * OS network device settings (MTU size). **/ static int ixgbe_sw_init(struct ixgbe_adapter *adapter, const struct ixgbe_info *ii) { struct ixgbe_hw *hw = &adapter->hw; struct pci_dev *pdev = adapter->pdev; unsigned int rss, fdir; u32 fwsm; int i; /* PCI config space info */ hw->vendor_id = pdev->vendor; hw->device_id = pdev->device; hw->revision_id = pdev->revision; hw->subsystem_vendor_id = pdev->subsystem_vendor; hw->subsystem_device_id = pdev->subsystem_device; /* get_invariants needs the device IDs */ ii->get_invariants(hw); /* Set common capability flags and settings */ rss = min_t(int, ixgbe_max_rss_indices(adapter), num_online_cpus()); adapter->ring_feature[RING_F_RSS].limit = rss; adapter->flags2 |= IXGBE_FLAG2_RSC_CAPABLE; adapter->max_q_vectors = MAX_Q_VECTORS_82599; adapter->atr_sample_rate = 20; fdir = min_t(int, IXGBE_MAX_FDIR_INDICES, num_online_cpus()); adapter->ring_feature[RING_F_FDIR].limit = fdir; adapter->fdir_pballoc = IXGBE_FDIR_PBALLOC_64K; adapter->ring_feature[RING_F_VMDQ].limit = 1; #ifdef CONFIG_IXGBE_DCA adapter->flags |= IXGBE_FLAG_DCA_CAPABLE; #endif #ifdef CONFIG_IXGBE_DCB adapter->flags |= IXGBE_FLAG_DCB_CAPABLE; adapter->flags &= ~IXGBE_FLAG_DCB_ENABLED; #endif #ifdef IXGBE_FCOE adapter->flags |= IXGBE_FLAG_FCOE_CAPABLE; adapter->flags &= ~IXGBE_FLAG_FCOE_ENABLED; #ifdef CONFIG_IXGBE_DCB /* Default traffic class to use for FCoE */ adapter->fcoe.up = IXGBE_FCOE_DEFTC; #endif /* CONFIG_IXGBE_DCB */ #endif /* IXGBE_FCOE */ /* initialize static ixgbe jump table entries */ adapter->jump_tables[0] = kzalloc(sizeof(*adapter->jump_tables[0]), GFP_KERNEL); if (!adapter->jump_tables[0]) return -ENOMEM; adapter->jump_tables[0]->mat = ixgbe_ipv4_fields; for (i = 1; i < IXGBE_MAX_LINK_HANDLE; i++) adapter->jump_tables[i] = NULL; adapter->mac_table = kcalloc(hw->mac.num_rar_entries, sizeof(struct ixgbe_mac_addr), GFP_KERNEL); if (!adapter->mac_table) return -ENOMEM; if (ixgbe_init_rss_key(adapter)) return -ENOMEM; adapter->af_xdp_zc_qps = bitmap_zalloc(IXGBE_MAX_XDP_QS, GFP_KERNEL); if (!adapter->af_xdp_zc_qps) return -ENOMEM; /* Set MAC specific capability flags and exceptions */ switch (hw->mac.type) { case ixgbe_mac_82598EB: adapter->flags2 &= ~IXGBE_FLAG2_RSC_CAPABLE; if (hw->device_id == IXGBE_DEV_ID_82598AT) adapter->flags |= IXGBE_FLAG_FAN_FAIL_CAPABLE; adapter->max_q_vectors = MAX_Q_VECTORS_82598; adapter->ring_feature[RING_F_FDIR].limit = 0; adapter->atr_sample_rate = 0; adapter->fdir_pballoc = 0; #ifdef IXGBE_FCOE adapter->flags &= ~IXGBE_FLAG_FCOE_CAPABLE; adapter->flags &= ~IXGBE_FLAG_FCOE_ENABLED; #ifdef CONFIG_IXGBE_DCB adapter->fcoe.up = 0; #endif /* IXGBE_DCB */ #endif /* IXGBE_FCOE */ break; case ixgbe_mac_82599EB: if (hw->device_id == IXGBE_DEV_ID_82599_T3_LOM) adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_CAPABLE; break; case ixgbe_mac_X540: fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM(hw)); if (fwsm & IXGBE_FWSM_TS_ENABLED) adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_CAPABLE; break; case ixgbe_mac_x550em_a: switch (hw->device_id) { case IXGBE_DEV_ID_X550EM_A_1G_T: case IXGBE_DEV_ID_X550EM_A_1G_T_L: adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_CAPABLE; break; default: break; } fallthrough; case ixgbe_mac_X550EM_x: #ifdef CONFIG_IXGBE_DCB adapter->flags &= ~IXGBE_FLAG_DCB_CAPABLE; #endif #ifdef IXGBE_FCOE adapter->flags &= ~IXGBE_FLAG_FCOE_CAPABLE; #ifdef CONFIG_IXGBE_DCB adapter->fcoe.up = 0; #endif /* IXGBE_DCB */ #endif /* IXGBE_FCOE */ fallthrough; case ixgbe_mac_X550: if (hw->mac.type == ixgbe_mac_X550) adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_CAPABLE; #ifdef CONFIG_IXGBE_DCA adapter->flags &= ~IXGBE_FLAG_DCA_CAPABLE; #endif break; default: break; } #ifdef IXGBE_FCOE /* FCoE support exists, always init the FCoE lock */ spin_lock_init(&adapter->fcoe.lock); #endif /* n-tuple support exists, always init our spinlock */ spin_lock_init(&adapter->fdir_perfect_lock); /* init spinlock to avoid concurrency of VF resources */ spin_lock_init(&adapter->vfs_lock); #ifdef CONFIG_IXGBE_DCB ixgbe_init_dcb(adapter); #endif ixgbe_init_ipsec_offload(adapter); /* default flow control settings */ hw->fc.requested_mode = ixgbe_fc_full; hw->fc.current_mode = ixgbe_fc_full; /* init for ethtool output */ ixgbe_pbthresh_setup(adapter); hw->fc.pause_time = IXGBE_DEFAULT_FCPAUSE; hw->fc.send_xon = true; hw->fc.disable_fc_autoneg = ixgbe_device_supports_autoneg_fc(hw); #ifdef CONFIG_PCI_IOV if (max_vfs > 0) e_dev_warn("Enabling SR-IOV VFs using the max_vfs module parameter is deprecated - please use the pci sysfs interface instead.\n"); /* assign number of SR-IOV VFs */ if (hw->mac.type != ixgbe_mac_82598EB) { if (max_vfs > IXGBE_MAX_VFS_DRV_LIMIT) { max_vfs = 0; e_dev_warn("max_vfs parameter out of range. Not assigning any SR-IOV VFs\n"); } } #endif /* CONFIG_PCI_IOV */ /* enable itr by default in dynamic mode */ adapter->rx_itr_setting = 1; adapter->tx_itr_setting = 1; /* set default ring sizes */ adapter->tx_ring_count = IXGBE_DEFAULT_TXD; adapter->rx_ring_count = IXGBE_DEFAULT_RXD; /* set default work limits */ adapter->tx_work_limit = IXGBE_DEFAULT_TX_WORK; /* initialize eeprom parameters */ if (ixgbe_init_eeprom_params_generic(hw)) { e_dev_err("EEPROM initialization failed\n"); return -EIO; } /* PF holds first pool slot */ set_bit(0, adapter->fwd_bitmask); set_bit(__IXGBE_DOWN, &adapter->state); /* enable locking for XDP_TX if we have more CPUs than queues */ if (nr_cpu_ids > IXGBE_MAX_XDP_QS) static_branch_enable(&ixgbe_xdp_locking_key); return 0; } /** * ixgbe_setup_tx_resources - allocate Tx resources (Descriptors) * @tx_ring: tx descriptor ring (for a specific queue) to setup * * Return 0 on success, negative on failure **/ int ixgbe_setup_tx_resources(struct ixgbe_ring *tx_ring) { struct device *dev = tx_ring->dev; int orig_node = dev_to_node(dev); int ring_node = NUMA_NO_NODE; int size; size = sizeof(struct ixgbe_tx_buffer) * tx_ring->count; if (tx_ring->q_vector) ring_node = tx_ring->q_vector->numa_node; tx_ring->tx_buffer_info = vmalloc_node(size, ring_node); if (!tx_ring->tx_buffer_info) tx_ring->tx_buffer_info = vmalloc(size); if (!tx_ring->tx_buffer_info) goto err; /* round up to nearest 4K */ tx_ring->size = tx_ring->count * sizeof(union ixgbe_adv_tx_desc); tx_ring->size = ALIGN(tx_ring->size, 4096); set_dev_node(dev, ring_node); tx_ring->desc = dma_alloc_coherent(dev, tx_ring->size, &tx_ring->dma, GFP_KERNEL); set_dev_node(dev, orig_node); if (!tx_ring->desc) tx_ring->desc = dma_alloc_coherent(dev, tx_ring->size, &tx_ring->dma, GFP_KERNEL); if (!tx_ring->desc) goto err; tx_ring->next_to_use = 0; tx_ring->next_to_clean = 0; return 0; err: vfree(tx_ring->tx_buffer_info); tx_ring->tx_buffer_info = NULL; dev_err(dev, "Unable to allocate memory for the Tx descriptor ring\n"); return -ENOMEM; } /** * ixgbe_setup_all_tx_resources - allocate all queues Tx resources * @adapter: board private structure * * If this function returns with an error, then it's possible one or * more of the rings is populated (while the rest are not). It is the * callers duty to clean those orphaned rings. * * Return 0 on success, negative on failure **/ static int ixgbe_setup_all_tx_resources(struct ixgbe_adapter *adapter) { int i, j = 0, err = 0; for (i = 0; i < adapter->num_tx_queues; i++) { err = ixgbe_setup_tx_resources(adapter->tx_ring[i]); if (!err) continue; e_err(probe, "Allocation for Tx Queue %u failed\n", i); goto err_setup_tx; } for (j = 0; j < adapter->num_xdp_queues; j++) { err = ixgbe_setup_tx_resources(adapter->xdp_ring[j]); if (!err) continue; e_err(probe, "Allocation for Tx Queue %u failed\n", j); goto err_setup_tx; } return 0; err_setup_tx: /* rewind the index freeing the rings as we go */ while (j--) ixgbe_free_tx_resources(adapter->xdp_ring[j]); while (i--) ixgbe_free_tx_resources(adapter->tx_ring[i]); return err; } static int ixgbe_rx_napi_id(struct ixgbe_ring *rx_ring) { struct ixgbe_q_vector *q_vector = rx_ring->q_vector; return q_vector ? q_vector->napi.napi_id : 0; } /** * ixgbe_setup_rx_resources - allocate Rx resources (Descriptors) * @adapter: pointer to ixgbe_adapter * @rx_ring: rx descriptor ring (for a specific queue) to setup * * Returns 0 on success, negative on failure **/ int ixgbe_setup_rx_resources(struct ixgbe_adapter *adapter, struct ixgbe_ring *rx_ring) { struct device *dev = rx_ring->dev; int orig_node = dev_to_node(dev); int ring_node = NUMA_NO_NODE; int size; size = sizeof(struct ixgbe_rx_buffer) * rx_ring->count; if (rx_ring->q_vector) ring_node = rx_ring->q_vector->numa_node; rx_ring->rx_buffer_info = vmalloc_node(size, ring_node); if (!rx_ring->rx_buffer_info) rx_ring->rx_buffer_info = vmalloc(size); if (!rx_ring->rx_buffer_info) goto err; /* Round up to nearest 4K */ rx_ring->size = rx_ring->count * sizeof(union ixgbe_adv_rx_desc); rx_ring->size = ALIGN(rx_ring->size, 4096); set_dev_node(dev, ring_node); rx_ring->desc = dma_alloc_coherent(dev, rx_ring->size, &rx_ring->dma, GFP_KERNEL); set_dev_node(dev, orig_node); if (!rx_ring->desc) rx_ring->desc = dma_alloc_coherent(dev, rx_ring->size, &rx_ring->dma, GFP_KERNEL); if (!rx_ring->desc) goto err; rx_ring->next_to_clean = 0; rx_ring->next_to_use = 0; /* XDP RX-queue info */ if (xdp_rxq_info_reg(&rx_ring->xdp_rxq, adapter->netdev, rx_ring->queue_index, ixgbe_rx_napi_id(rx_ring)) < 0) goto err; WRITE_ONCE(rx_ring->xdp_prog, adapter->xdp_prog); return 0; err: vfree(rx_ring->rx_buffer_info); rx_ring->rx_buffer_info = NULL; dev_err(dev, "Unable to allocate memory for the Rx descriptor ring\n"); return -ENOMEM; } /** * ixgbe_setup_all_rx_resources - allocate all queues Rx resources * @adapter: board private structure * * If this function returns with an error, then it's possible one or * more of the rings is populated (while the rest are not). It is the * callers duty to clean those orphaned rings. * * Return 0 on success, negative on failure **/ static int ixgbe_setup_all_rx_resources(struct ixgbe_adapter *adapter) { int i, err = 0; for (i = 0; i < adapter->num_rx_queues; i++) { err = ixgbe_setup_rx_resources(adapter, adapter->rx_ring[i]); if (!err) continue; e_err(probe, "Allocation for Rx Queue %u failed\n", i); goto err_setup_rx; } #ifdef IXGBE_FCOE err = ixgbe_setup_fcoe_ddp_resources(adapter); if (!err) #endif return 0; err_setup_rx: /* rewind the index freeing the rings as we go */ while (i--) ixgbe_free_rx_resources(adapter->rx_ring[i]); return err; } /** * ixgbe_free_tx_resources - Free Tx Resources per Queue * @tx_ring: Tx descriptor ring for a specific queue * * Free all transmit software resources **/ void ixgbe_free_tx_resources(struct ixgbe_ring *tx_ring) { ixgbe_clean_tx_ring(tx_ring); vfree(tx_ring->tx_buffer_info); tx_ring->tx_buffer_info = NULL; /* if not set, then don't free */ if (!tx_ring->desc) return; dma_free_coherent(tx_ring->dev, tx_ring->size, tx_ring->desc, tx_ring->dma); tx_ring->desc = NULL; } /** * ixgbe_free_all_tx_resources - Free Tx Resources for All Queues * @adapter: board private structure * * Free all transmit software resources **/ static void ixgbe_free_all_tx_resources(struct ixgbe_adapter *adapter) { int i; for (i = 0; i < adapter->num_tx_queues; i++) if (adapter->tx_ring[i]->desc) ixgbe_free_tx_resources(adapter->tx_ring[i]); for (i = 0; i < adapter->num_xdp_queues; i++) if (adapter->xdp_ring[i]->desc) ixgbe_free_tx_resources(adapter->xdp_ring[i]); } /** * ixgbe_free_rx_resources - Free Rx Resources * @rx_ring: ring to clean the resources from * * Free all receive software resources **/ void ixgbe_free_rx_resources(struct ixgbe_ring *rx_ring) { ixgbe_clean_rx_ring(rx_ring); rx_ring->xdp_prog = NULL; xdp_rxq_info_unreg(&rx_ring->xdp_rxq); vfree(rx_ring->rx_buffer_info); rx_ring->rx_buffer_info = NULL; /* if not set, then don't free */ if (!rx_ring->desc) return; dma_free_coherent(rx_ring->dev, rx_ring->size, rx_ring->desc, rx_ring->dma); rx_ring->desc = NULL; } /** * ixgbe_free_all_rx_resources - Free Rx Resources for All Queues * @adapter: board private structure * * Free all receive software resources **/ static void ixgbe_free_all_rx_resources(struct ixgbe_adapter *adapter) { int i; #ifdef IXGBE_FCOE ixgbe_free_fcoe_ddp_resources(adapter); #endif for (i = 0; i < adapter->num_rx_queues; i++) if (adapter->rx_ring[i]->desc) ixgbe_free_rx_resources(adapter->rx_ring[i]); } /** * ixgbe_max_xdp_frame_size - returns the maximum allowed frame size for XDP * @adapter: device handle, pointer to adapter */ static int ixgbe_max_xdp_frame_size(struct ixgbe_adapter *adapter) { if (PAGE_SIZE >= 8192 || adapter->flags2 & IXGBE_FLAG2_RX_LEGACY) return IXGBE_RXBUFFER_2K; else return IXGBE_RXBUFFER_3K; } /** * ixgbe_change_mtu - Change the Maximum Transfer Unit * @netdev: network interface device structure * @new_mtu: new value for maximum frame size * * Returns 0 on success, negative on failure **/ static int ixgbe_change_mtu(struct net_device *netdev, int new_mtu) { struct ixgbe_adapter *adapter = netdev_priv(netdev); if (ixgbe_enabled_xdp_adapter(adapter)) { int new_frame_size = new_mtu + IXGBE_PKT_HDR_PAD; if (new_frame_size > ixgbe_max_xdp_frame_size(adapter)) { e_warn(probe, "Requested MTU size is not supported with XDP\n"); return -EINVAL; } } /* * For 82599EB we cannot allow legacy VFs to enable their receive * paths when MTU greater than 1500 is configured. So display a * warning that legacy VFs will be disabled. */ if ((adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) && (adapter->hw.mac.type == ixgbe_mac_82599EB) && (new_mtu > ETH_DATA_LEN)) e_warn(probe, "Setting MTU > 1500 will disable legacy VFs\n"); netdev_dbg(netdev, "changing MTU from %d to %d\n", netdev->mtu, new_mtu); /* must set new MTU before calling down or up */ netdev->mtu = new_mtu; if (netif_running(netdev)) ixgbe_reinit_locked(adapter); return 0; } /** * ixgbe_open - Called when a network interface is made active * @netdev: network interface device structure * * Returns 0 on success, negative value on failure * * The open entry point is called when a network interface is made * active by the system (IFF_UP). At this point all resources needed * for transmit and receive operations are allocated, the interrupt * handler is registered with the OS, the watchdog timer is started, * and the stack is notified that the interface is ready. **/ int ixgbe_open(struct net_device *netdev) { struct ixgbe_adapter *adapter = netdev_priv(netdev); struct ixgbe_hw *hw = &adapter->hw; int err, queues; /* disallow open during test */ if (test_bit(__IXGBE_TESTING, &adapter->state)) return -EBUSY; netif_carrier_off(netdev); /* allocate transmit descriptors */ err = ixgbe_setup_all_tx_resources(adapter); if (err) goto err_setup_tx; /* allocate receive descriptors */ err = ixgbe_setup_all_rx_resources(adapter); if (err) goto err_setup_rx; ixgbe_configure(adapter); err = ixgbe_request_irq(adapter); if (err) goto err_req_irq; /* Notify the stack of the actual queue counts. */ queues = adapter->num_tx_queues; err = netif_set_real_num_tx_queues(netdev, queues); if (err) goto err_set_queues; queues = adapter->num_rx_queues; err = netif_set_real_num_rx_queues(netdev, queues); if (err) goto err_set_queues; ixgbe_ptp_init(adapter); ixgbe_up_complete(adapter); udp_tunnel_nic_reset_ntf(netdev); return 0; err_set_queues: ixgbe_free_irq(adapter); err_req_irq: ixgbe_free_all_rx_resources(adapter); if (hw->phy.ops.set_phy_power && !adapter->wol) hw->phy.ops.set_phy_power(&adapter->hw, false); err_setup_rx: ixgbe_free_all_tx_resources(adapter); err_setup_tx: ixgbe_reset(adapter); return err; } static void ixgbe_close_suspend(struct ixgbe_adapter *adapter) { ixgbe_ptp_suspend(adapter); if (adapter->hw.phy.ops.enter_lplu) { adapter->hw.phy.reset_disable = true; ixgbe_down(adapter); adapter->hw.phy.ops.enter_lplu(&adapter->hw); adapter->hw.phy.reset_disable = false; } else { ixgbe_down(adapter); } ixgbe_free_irq(adapter); ixgbe_free_all_tx_resources(adapter); ixgbe_free_all_rx_resources(adapter); } /** * ixgbe_close - Disables a network interface * @netdev: network interface device structure * * Returns 0, this is not allowed to fail * * The close entry point is called when an interface is de-activated * by the OS. The hardware is still under the drivers control, but * needs to be disabled. A global MAC reset is issued to stop the * hardware, and all transmit and receive resources are freed. **/ int ixgbe_close(struct net_device *netdev) { struct ixgbe_adapter *adapter = netdev_priv(netdev); ixgbe_ptp_stop(adapter); if (netif_device_present(netdev)) ixgbe_close_suspend(adapter); ixgbe_fdir_filter_exit(adapter); ixgbe_release_hw_control(adapter); return 0; } static int __maybe_unused ixgbe_resume(struct device *dev_d) { struct pci_dev *pdev = to_pci_dev(dev_d); struct ixgbe_adapter *adapter = pci_get_drvdata(pdev); struct net_device *netdev = adapter->netdev; u32 err; adapter->hw.hw_addr = adapter->io_addr; err = pci_enable_device_mem(pdev); if (err) { e_dev_err("Cannot enable PCI device from suspend\n"); return err; } smp_mb__before_atomic(); clear_bit(__IXGBE_DISABLED, &adapter->state); pci_set_master(pdev); device_wakeup_disable(dev_d); ixgbe_reset(adapter); IXGBE_WRITE_REG(&adapter->hw, IXGBE_WUS, ~0); rtnl_lock(); err = ixgbe_init_interrupt_scheme(adapter); if (!err && netif_running(netdev)) err = ixgbe_open(netdev); if (!err) netif_device_attach(netdev); rtnl_unlock(); return err; } static int __ixgbe_shutdown(struct pci_dev *pdev, bool *enable_wake) { struct ixgbe_adapter *adapter = pci_get_drvdata(pdev); struct net_device *netdev = adapter->netdev; struct ixgbe_hw *hw = &adapter->hw; u32 ctrl; u32 wufc = adapter->wol; rtnl_lock(); netif_device_detach(netdev); if (netif_running(netdev)) ixgbe_close_suspend(adapter); ixgbe_clear_interrupt_scheme(adapter); rtnl_unlock(); if (hw->mac.ops.stop_link_on_d3) hw->mac.ops.stop_link_on_d3(hw); if (wufc) { u32 fctrl; ixgbe_set_rx_mode(netdev); /* enable the optics for 82599 SFP+ fiber as we can WoL */ if (hw->mac.ops.enable_tx_laser) hw->mac.ops.enable_tx_laser(hw); /* enable the reception of multicast packets */ fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL); fctrl |= IXGBE_FCTRL_MPE; IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl); ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL); ctrl |= IXGBE_CTRL_GIO_DIS; IXGBE_WRITE_REG(hw, IXGBE_CTRL, ctrl); IXGBE_WRITE_REG(hw, IXGBE_WUFC, wufc); } else { IXGBE_WRITE_REG(hw, IXGBE_WUC, 0); IXGBE_WRITE_REG(hw, IXGBE_WUFC, 0); } switch (hw->mac.type) { case ixgbe_mac_82598EB: pci_wake_from_d3(pdev, false); break; case ixgbe_mac_82599EB: case ixgbe_mac_X540: case ixgbe_mac_X550: case ixgbe_mac_X550EM_x: case ixgbe_mac_x550em_a: pci_wake_from_d3(pdev, !!wufc); break; default: break; } *enable_wake = !!wufc; if (hw->phy.ops.set_phy_power && !*enable_wake) hw->phy.ops.set_phy_power(hw, false); ixgbe_release_hw_control(adapter); if (!test_and_set_bit(__IXGBE_DISABLED, &adapter->state)) pci_disable_device(pdev); return 0; } static int __maybe_unused ixgbe_suspend(struct device *dev_d) { struct pci_dev *pdev = to_pci_dev(dev_d); int retval; bool wake; retval = __ixgbe_shutdown(pdev, &wake); device_set_wakeup_enable(dev_d, wake); return retval; } static void ixgbe_shutdown(struct pci_dev *pdev) { bool wake; __ixgbe_shutdown(pdev, &wake); if (system_state == SYSTEM_POWER_OFF) { pci_wake_from_d3(pdev, wake); pci_set_power_state(pdev, PCI_D3hot); } } /** * ixgbe_update_stats - Update the board statistics counters. * @adapter: board private structure **/ void ixgbe_update_stats(struct ixgbe_adapter *adapter) { struct net_device *netdev = adapter->netdev; struct ixgbe_hw *hw = &adapter->hw; struct ixgbe_hw_stats *hwstats = &adapter->stats; u64 total_mpc = 0; u32 i, missed_rx = 0, mpc, bprc, lxon, lxoff, xon_off_tot; u64 non_eop_descs = 0, restart_queue = 0, tx_busy = 0; u64 alloc_rx_page_failed = 0, alloc_rx_buff_failed = 0; u64 alloc_rx_page = 0; u64 bytes = 0, packets = 0, hw_csum_rx_error = 0; if (test_bit(__IXGBE_DOWN, &adapter->state) || test_bit(__IXGBE_RESETTING, &adapter->state)) return; if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) { u64 rsc_count = 0; u64 rsc_flush = 0; for (i = 0; i < adapter->num_rx_queues; i++) { rsc_count += adapter->rx_ring[i]->rx_stats.rsc_count; rsc_flush += adapter->rx_ring[i]->rx_stats.rsc_flush; } adapter->rsc_total_count = rsc_count; adapter->rsc_total_flush = rsc_flush; } for (i = 0; i < adapter->num_rx_queues; i++) { struct ixgbe_ring *rx_ring = READ_ONCE(adapter->rx_ring[i]); if (!rx_ring) continue; non_eop_descs += rx_ring->rx_stats.non_eop_descs; alloc_rx_page += rx_ring->rx_stats.alloc_rx_page; alloc_rx_page_failed += rx_ring->rx_stats.alloc_rx_page_failed; alloc_rx_buff_failed += rx_ring->rx_stats.alloc_rx_buff_failed; hw_csum_rx_error += rx_ring->rx_stats.csum_err; bytes += rx_ring->stats.bytes; packets += rx_ring->stats.packets; } adapter->non_eop_descs = non_eop_descs; adapter->alloc_rx_page = alloc_rx_page; adapter->alloc_rx_page_failed = alloc_rx_page_failed; adapter->alloc_rx_buff_failed = alloc_rx_buff_failed; adapter->hw_csum_rx_error = hw_csum_rx_error; netdev->stats.rx_bytes = bytes; netdev->stats.rx_packets = packets; bytes = 0; packets = 0; /* gather some stats to the adapter struct that are per queue */ for (i = 0; i < adapter->num_tx_queues; i++) { struct ixgbe_ring *tx_ring = READ_ONCE(adapter->tx_ring[i]); if (!tx_ring) continue; restart_queue += tx_ring->tx_stats.restart_queue; tx_busy += tx_ring->tx_stats.tx_busy; bytes += tx_ring->stats.bytes; packets += tx_ring->stats.packets; } for (i = 0; i < adapter->num_xdp_queues; i++) { struct ixgbe_ring *xdp_ring = READ_ONCE(adapter->xdp_ring[i]); if (!xdp_ring) continue; restart_queue += xdp_ring->tx_stats.restart_queue; tx_busy += xdp_ring->tx_stats.tx_busy; bytes += xdp_ring->stats.bytes; packets += xdp_ring->stats.packets; } adapter->restart_queue = restart_queue; adapter->tx_busy = tx_busy; netdev->stats.tx_bytes = bytes; netdev->stats.tx_packets = packets; hwstats->crcerrs += IXGBE_READ_REG(hw, IXGBE_CRCERRS); /* 8 register reads */ for (i = 0; i < 8; i++) { /* for packet buffers not used, the register should read 0 */ mpc = IXGBE_READ_REG(hw, IXGBE_MPC(i)); missed_rx += mpc; hwstats->mpc[i] += mpc; total_mpc += hwstats->mpc[i]; hwstats->pxontxc[i] += IXGBE_READ_REG(hw, IXGBE_PXONTXC(i)); hwstats->pxofftxc[i] += IXGBE_READ_REG(hw, IXGBE_PXOFFTXC(i)); switch (hw->mac.type) { case ixgbe_mac_82598EB: hwstats->rnbc[i] += IXGBE_READ_REG(hw, IXGBE_RNBC(i)); hwstats->qbtc[i] += IXGBE_READ_REG(hw, IXGBE_QBTC(i)); hwstats->qbrc[i] += IXGBE_READ_REG(hw, IXGBE_QBRC(i)); hwstats->pxonrxc[i] += IXGBE_READ_REG(hw, IXGBE_PXONRXC(i)); break; case ixgbe_mac_82599EB: case ixgbe_mac_X540: case ixgbe_mac_X550: case ixgbe_mac_X550EM_x: case ixgbe_mac_x550em_a: hwstats->pxonrxc[i] += IXGBE_READ_REG(hw, IXGBE_PXONRXCNT(i)); break; default: break; } } /*16 register reads */ for (i = 0; i < 16; i++) { hwstats->qptc[i] += IXGBE_READ_REG(hw, IXGBE_QPTC(i)); hwstats->qprc[i] += IXGBE_READ_REG(hw, IXGBE_QPRC(i)); if ((hw->mac.type == ixgbe_mac_82599EB) || (hw->mac.type == ixgbe_mac_X540) || (hw->mac.type == ixgbe_mac_X550) || (hw->mac.type == ixgbe_mac_X550EM_x) || (hw->mac.type == ixgbe_mac_x550em_a)) { hwstats->qbtc[i] += IXGBE_READ_REG(hw, IXGBE_QBTC_L(i)); IXGBE_READ_REG(hw, IXGBE_QBTC_H(i)); /* to clear */ hwstats->qbrc[i] += IXGBE_READ_REG(hw, IXGBE_QBRC_L(i)); IXGBE_READ_REG(hw, IXGBE_QBRC_H(i)); /* to clear */ } } hwstats->gprc += IXGBE_READ_REG(hw, IXGBE_GPRC); /* work around hardware counting issue */ hwstats->gprc -= missed_rx; ixgbe_update_xoff_received(adapter); /* 82598 hardware only has a 32 bit counter in the high register */ switch (hw->mac.type) { case ixgbe_mac_82598EB: hwstats->lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXC); hwstats->gorc += IXGBE_READ_REG(hw, IXGBE_GORCH); hwstats->gotc += IXGBE_READ_REG(hw, IXGBE_GOTCH); hwstats->tor += IXGBE_READ_REG(hw, IXGBE_TORH); break; case ixgbe_mac_X540: case ixgbe_mac_X550: case ixgbe_mac_X550EM_x: case ixgbe_mac_x550em_a: /* OS2BMC stats are X540 and later */ hwstats->o2bgptc += IXGBE_READ_REG(hw, IXGBE_O2BGPTC); hwstats->o2bspc += IXGBE_READ_REG(hw, IXGBE_O2BSPC); hwstats->b2ospc += IXGBE_READ_REG(hw, IXGBE_B2OSPC); hwstats->b2ogprc += IXGBE_READ_REG(hw, IXGBE_B2OGPRC); fallthrough; case ixgbe_mac_82599EB: for (i = 0; i < 16; i++) adapter->hw_rx_no_dma_resources += IXGBE_READ_REG(hw, IXGBE_QPRDC(i)); hwstats->gorc += IXGBE_READ_REG(hw, IXGBE_GORCL); IXGBE_READ_REG(hw, IXGBE_GORCH); /* to clear */ hwstats->gotc += IXGBE_READ_REG(hw, IXGBE_GOTCL); IXGBE_READ_REG(hw, IXGBE_GOTCH); /* to clear */ hwstats->tor += IXGBE_READ_REG(hw, IXGBE_TORL); IXGBE_READ_REG(hw, IXGBE_TORH); /* to clear */ hwstats->lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXCNT); hwstats->fdirmatch += IXGBE_READ_REG(hw, IXGBE_FDIRMATCH); hwstats->fdirmiss += IXGBE_READ_REG(hw, IXGBE_FDIRMISS); #ifdef IXGBE_FCOE hwstats->fccrc += IXGBE_READ_REG(hw, IXGBE_FCCRC); hwstats->fcoerpdc += IXGBE_READ_REG(hw, IXGBE_FCOERPDC); hwstats->fcoeprc += IXGBE_READ_REG(hw, IXGBE_FCOEPRC); hwstats->fcoeptc += IXGBE_READ_REG(hw, IXGBE_FCOEPTC); hwstats->fcoedwrc += IXGBE_READ_REG(hw, IXGBE_FCOEDWRC); hwstats->fcoedwtc += IXGBE_READ_REG(hw, IXGBE_FCOEDWTC); /* Add up per cpu counters for total ddp aloc fail */ if (adapter->fcoe.ddp_pool) { struct ixgbe_fcoe *fcoe = &adapter->fcoe; struct ixgbe_fcoe_ddp_pool *ddp_pool; unsigned int cpu; u64 noddp = 0, noddp_ext_buff = 0; for_each_possible_cpu(cpu) { ddp_pool = per_cpu_ptr(fcoe->ddp_pool, cpu); noddp += ddp_pool->noddp; noddp_ext_buff += ddp_pool->noddp_ext_buff; } hwstats->fcoe_noddp = noddp; hwstats->fcoe_noddp_ext_buff = noddp_ext_buff; } #endif /* IXGBE_FCOE */ break; default: break; } bprc = IXGBE_READ_REG(hw, IXGBE_BPRC); hwstats->bprc += bprc; hwstats->mprc += IXGBE_READ_REG(hw, IXGBE_MPRC); if (hw->mac.type == ixgbe_mac_82598EB) hwstats->mprc -= bprc; hwstats->roc += IXGBE_READ_REG(hw, IXGBE_ROC); hwstats->prc64 += IXGBE_READ_REG(hw, IXGBE_PRC64); hwstats->prc127 += IXGBE_READ_REG(hw, IXGBE_PRC127); hwstats->prc255 += IXGBE_READ_REG(hw, IXGBE_PRC255); hwstats->prc511 += IXGBE_READ_REG(hw, IXGBE_PRC511); hwstats->prc1023 += IXGBE_READ_REG(hw, IXGBE_PRC1023); hwstats->prc1522 += IXGBE_READ_REG(hw, IXGBE_PRC1522); hwstats->rlec += IXGBE_READ_REG(hw, IXGBE_RLEC); lxon = IXGBE_READ_REG(hw, IXGBE_LXONTXC); hwstats->lxontxc += lxon; lxoff = IXGBE_READ_REG(hw, IXGBE_LXOFFTXC); hwstats->lxofftxc += lxoff; hwstats->gptc += IXGBE_READ_REG(hw, IXGBE_GPTC); hwstats->mptc += IXGBE_READ_REG(hw, IXGBE_MPTC); /* * 82598 errata - tx of flow control packets is included in tx counters */ xon_off_tot = lxon + lxoff; hwstats->gptc -= xon_off_tot; hwstats->mptc -= xon_off_tot; hwstats->gotc -= (xon_off_tot * (ETH_ZLEN + ETH_FCS_LEN)); hwstats->ruc += IXGBE_READ_REG(hw, IXGBE_RUC); hwstats->rfc += IXGBE_READ_REG(hw, IXGBE_RFC); hwstats->rjc += IXGBE_READ_REG(hw, IXGBE_RJC); hwstats->tpr += IXGBE_READ_REG(hw, IXGBE_TPR); hwstats->ptc64 += IXGBE_READ_REG(hw, IXGBE_PTC64); hwstats->ptc64 -= xon_off_tot; hwstats->ptc127 += IXGBE_READ_REG(hw, IXGBE_PTC127); hwstats->ptc255 += IXGBE_READ_REG(hw, IXGBE_PTC255); hwstats->ptc511 += IXGBE_READ_REG(hw, IXGBE_PTC511); hwstats->ptc1023 += IXGBE_READ_REG(hw, IXGBE_PTC1023); hwstats->ptc1522 += IXGBE_READ_REG(hw, IXGBE_PTC1522); hwstats->bptc += IXGBE_READ_REG(hw, IXGBE_BPTC); /* Fill out the OS statistics structure */ netdev->stats.multicast = hwstats->mprc; /* Rx Errors */ netdev->stats.rx_errors = hwstats->crcerrs + hwstats->rlec; netdev->stats.rx_dropped = 0; netdev->stats.rx_length_errors = hwstats->rlec; netdev->stats.rx_crc_errors = hwstats->crcerrs; netdev->stats.rx_missed_errors = total_mpc; /* VF Stats Collection - skip while resetting because these * are not clear on read and otherwise you'll sometimes get * crazy values. */ if (!test_bit(__IXGBE_RESETTING, &adapter->state)) { for (i = 0; i < adapter->num_vfs; i++) { UPDATE_VF_COUNTER_32bit(IXGBE_PVFGPRC(i), adapter->vfinfo[i].last_vfstats.gprc, adapter->vfinfo[i].vfstats.gprc); UPDATE_VF_COUNTER_32bit(IXGBE_PVFGPTC(i), adapter->vfinfo[i].last_vfstats.gptc, adapter->vfinfo[i].vfstats.gptc); UPDATE_VF_COUNTER_36bit(IXGBE_PVFGORC_LSB(i), IXGBE_PVFGORC_MSB(i), adapter->vfinfo[i].last_vfstats.gorc, adapter->vfinfo[i].vfstats.gorc); UPDATE_VF_COUNTER_36bit(IXGBE_PVFGOTC_LSB(i), IXGBE_PVFGOTC_MSB(i), adapter->vfinfo[i].last_vfstats.gotc, adapter->vfinfo[i].vfstats.gotc); UPDATE_VF_COUNTER_32bit(IXGBE_PVFMPRC(i), adapter->vfinfo[i].last_vfstats.mprc, adapter->vfinfo[i].vfstats.mprc); } } } /** * ixgbe_fdir_reinit_subtask - worker thread to reinit FDIR filter table * @adapter: pointer to the device adapter structure **/ static void ixgbe_fdir_reinit_subtask(struct ixgbe_adapter *adapter) { struct ixgbe_hw *hw = &adapter->hw; int i; if (!(adapter->flags2 & IXGBE_FLAG2_FDIR_REQUIRES_REINIT)) return; adapter->flags2 &= ~IXGBE_FLAG2_FDIR_REQUIRES_REINIT; /* if interface is down do nothing */ if (test_bit(__IXGBE_DOWN, &adapter->state)) return; /* do nothing if we are not using signature filters */ if (!(adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE)) return; adapter->fdir_overflow++; if (ixgbe_reinit_fdir_tables_82599(hw) == 0) { for (i = 0; i < adapter->num_tx_queues; i++) set_bit(__IXGBE_TX_FDIR_INIT_DONE, &(adapter->tx_ring[i]->state)); for (i = 0; i < adapter->num_xdp_queues; i++) set_bit(__IXGBE_TX_FDIR_INIT_DONE, &adapter->xdp_ring[i]->state); /* re-enable flow director interrupts */ IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMS_FLOW_DIR); } else { e_err(probe, "failed to finish FDIR re-initialization, " "ignored adding FDIR ATR filters\n"); } } /** * ixgbe_check_hang_subtask - check for hung queues and dropped interrupts * @adapter: pointer to the device adapter structure * * This function serves two purposes. First it strobes the interrupt lines * in order to make certain interrupts are occurring. Secondly it sets the * bits needed to check for TX hangs. As a result we should immediately * determine if a hang has occurred. */ static void ixgbe_check_hang_subtask(struct ixgbe_adapter *adapter) { struct ixgbe_hw *hw = &adapter->hw; u64 eics = 0; int i; /* If we're down, removing or resetting, just bail */ if (test_bit(__IXGBE_DOWN, &adapter->state) || test_bit(__IXGBE_REMOVING, &adapter->state) || test_bit(__IXGBE_RESETTING, &adapter->state)) return; /* Force detection of hung controller */ if (netif_carrier_ok(adapter->netdev)) { for (i = 0; i < adapter->num_tx_queues; i++) set_check_for_tx_hang(adapter->tx_ring[i]); for (i = 0; i < adapter->num_xdp_queues; i++) set_check_for_tx_hang(adapter->xdp_ring[i]); } if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED)) { /* * for legacy and MSI interrupts don't set any bits * that are enabled for EIAM, because this operation * would set *both* EIMS and EICS for any bit in EIAM */ IXGBE_WRITE_REG(hw, IXGBE_EICS, (IXGBE_EICS_TCP_TIMER | IXGBE_EICS_OTHER)); } else { /* get one bit for every active tx/rx interrupt vector */ for (i = 0; i < adapter->num_q_vectors; i++) { struct ixgbe_q_vector *qv = adapter->q_vector[i]; if (qv->rx.ring || qv->tx.ring) eics |= BIT_ULL(i); } } /* Cause software interrupt to ensure rings are cleaned */ ixgbe_irq_rearm_queues(adapter, eics); } /** * ixgbe_watchdog_update_link - update the link status * @adapter: pointer to the device adapter structure **/ static void ixgbe_watchdog_update_link(struct ixgbe_adapter *adapter) { struct ixgbe_hw *hw = &adapter->hw; u32 link_speed = adapter->link_speed; bool link_up = adapter->link_up; bool pfc_en = adapter->dcb_cfg.pfc_mode_enable; if (!(adapter->flags & IXGBE_FLAG_NEED_LINK_UPDATE)) return; if (hw->mac.ops.check_link) { hw->mac.ops.check_link(hw, &link_speed, &link_up, false); } else { /* always assume link is up, if no check link function */ link_speed = IXGBE_LINK_SPEED_10GB_FULL; link_up = true; } if (adapter->ixgbe_ieee_pfc) pfc_en |= !!(adapter->ixgbe_ieee_pfc->pfc_en); if (link_up && !((adapter->flags & IXGBE_FLAG_DCB_ENABLED) && pfc_en)) { hw->mac.ops.fc_enable(hw); ixgbe_set_rx_drop_en(adapter); } if (link_up || time_after(jiffies, (adapter->link_check_timeout + IXGBE_TRY_LINK_TIMEOUT))) { adapter->flags &= ~IXGBE_FLAG_NEED_LINK_UPDATE; IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMC_LSC); IXGBE_WRITE_FLUSH(hw); } adapter->link_up = link_up; adapter->link_speed = link_speed; } static void ixgbe_update_default_up(struct ixgbe_adapter *adapter) { #ifdef CONFIG_IXGBE_DCB struct net_device *netdev = adapter->netdev; struct dcb_app app = { .selector = IEEE_8021QAZ_APP_SEL_ETHERTYPE, .protocol = 0, }; u8 up = 0; if (adapter->dcbx_cap & DCB_CAP_DCBX_VER_IEEE) up = dcb_ieee_getapp_mask(netdev, &app); adapter->default_up = (up > 1) ? (ffs(up) - 1) : 0; #endif } /** * ixgbe_watchdog_link_is_up - update netif_carrier status and * print link up message * @adapter: pointer to the device adapter structure **/ static void ixgbe_watchdog_link_is_up(struct ixgbe_adapter *adapter) { struct net_device *netdev = adapter->netdev; struct ixgbe_hw *hw = &adapter->hw; u32 link_speed = adapter->link_speed; const char *speed_str; bool flow_rx, flow_tx; /* only continue if link was previously down */ if (netif_carrier_ok(netdev)) return; adapter->flags2 &= ~IXGBE_FLAG2_SEARCH_FOR_SFP; switch (hw->mac.type) { case ixgbe_mac_82598EB: { u32 frctl = IXGBE_READ_REG(hw, IXGBE_FCTRL); u32 rmcs = IXGBE_READ_REG(hw, IXGBE_RMCS); flow_rx = !!(frctl & IXGBE_FCTRL_RFCE); flow_tx = !!(rmcs & IXGBE_RMCS_TFCE_802_3X); } break; case ixgbe_mac_X540: case ixgbe_mac_X550: case ixgbe_mac_X550EM_x: case ixgbe_mac_x550em_a: case ixgbe_mac_82599EB: { u32 mflcn = IXGBE_READ_REG(hw, IXGBE_MFLCN); u32 fccfg = IXGBE_READ_REG(hw, IXGBE_FCCFG); flow_rx = !!(mflcn & IXGBE_MFLCN_RFCE); flow_tx = !!(fccfg & IXGBE_FCCFG_TFCE_802_3X); } break; default: flow_tx = false; flow_rx = false; break; } adapter->last_rx_ptp_check = jiffies; if (test_bit(__IXGBE_PTP_RUNNING, &adapter->state)) ixgbe_ptp_start_cyclecounter(adapter); switch (link_speed) { case IXGBE_LINK_SPEED_10GB_FULL: speed_str = "10 Gbps"; break; case IXGBE_LINK_SPEED_5GB_FULL: speed_str = "5 Gbps"; break; case IXGBE_LINK_SPEED_2_5GB_FULL: speed_str = "2.5 Gbps"; break; case IXGBE_LINK_SPEED_1GB_FULL: speed_str = "1 Gbps"; break; case IXGBE_LINK_SPEED_100_FULL: speed_str = "100 Mbps"; break; case IXGBE_LINK_SPEED_10_FULL: speed_str = "10 Mbps"; break; default: speed_str = "unknown speed"; break; } e_info(drv, "NIC Link is Up %s, Flow Control: %s\n", speed_str, ((flow_rx && flow_tx) ? "RX/TX" : (flow_rx ? "RX" : (flow_tx ? "TX" : "None")))); netif_carrier_on(netdev); ixgbe_check_vf_rate_limit(adapter); /* enable transmits */ netif_tx_wake_all_queues(adapter->netdev); /* update the default user priority for VFs */ ixgbe_update_default_up(adapter); /* ping all the active vfs to let them know link has changed */ ixgbe_ping_all_vfs(adapter); } /** * ixgbe_watchdog_link_is_down - update netif_carrier status and * print link down message * @adapter: pointer to the adapter structure **/ static void ixgbe_watchdog_link_is_down(struct ixgbe_adapter *adapter) { struct net_device *netdev = adapter->netdev; struct ixgbe_hw *hw = &adapter->hw; adapter->link_up = false; adapter->link_speed = 0; /* only continue if link was up previously */ if (!netif_carrier_ok(netdev)) return; /* poll for SFP+ cable when link is down */ if (ixgbe_is_sfp(hw) && hw->mac.type == ixgbe_mac_82598EB) adapter->flags2 |= IXGBE_FLAG2_SEARCH_FOR_SFP; if (test_bit(__IXGBE_PTP_RUNNING, &adapter->state)) ixgbe_ptp_start_cyclecounter(adapter); e_info(drv, "NIC Link is Down\n"); netif_carrier_off(netdev); /* ping all the active vfs to let them know link has changed */ ixgbe_ping_all_vfs(adapter); } static bool ixgbe_ring_tx_pending(struct ixgbe_adapter *adapter) { int i; for (i = 0; i < adapter->num_tx_queues; i++) { struct ixgbe_ring *tx_ring = adapter->tx_ring[i]; if (tx_ring->next_to_use != tx_ring->next_to_clean) return true; } for (i = 0; i < adapter->num_xdp_queues; i++) { struct ixgbe_ring *ring = adapter->xdp_ring[i]; if (ring->next_to_use != ring->next_to_clean) return true; } return false; } static bool ixgbe_vf_tx_pending(struct ixgbe_adapter *adapter) { struct ixgbe_hw *hw = &adapter->hw; struct ixgbe_ring_feature *vmdq = &adapter->ring_feature[RING_F_VMDQ]; u32 q_per_pool = __ALIGN_MASK(1, ~vmdq->mask); int i, j; if (!adapter->num_vfs) return false; /* resetting the PF is only needed for MAC before X550 */ if (hw->mac.type >= ixgbe_mac_X550) return false; for (i = 0; i < adapter->num_vfs; i++) { for (j = 0; j < q_per_pool; j++) { u32 h, t; h = IXGBE_READ_REG(hw, IXGBE_PVFTDHN(q_per_pool, i, j)); t = IXGBE_READ_REG(hw, IXGBE_PVFTDTN(q_per_pool, i, j)); if (h != t) return true; } } return false; } /** * ixgbe_watchdog_flush_tx - flush queues on link down * @adapter: pointer to the device adapter structure **/ static void ixgbe_watchdog_flush_tx(struct ixgbe_adapter *adapter) { if (!netif_carrier_ok(adapter->netdev)) { if (ixgbe_ring_tx_pending(adapter) || ixgbe_vf_tx_pending(adapter)) { /* We've lost link, so the controller stops DMA, * but we've got queued Tx work that's never going * to get done, so reset controller to flush Tx. * (Do the reset outside of interrupt context). */ e_warn(drv, "initiating reset to clear Tx work after link loss\n"); set_bit(__IXGBE_RESET_REQUESTED, &adapter->state); } } } #ifdef CONFIG_PCI_IOV static void ixgbe_bad_vf_abort(struct ixgbe_adapter *adapter, u32 vf) { struct ixgbe_hw *hw = &adapter->hw; if (adapter->hw.mac.type == ixgbe_mac_82599EB && adapter->flags2 & IXGBE_FLAG2_AUTO_DISABLE_VF) { adapter->vfinfo[vf].primary_abort_count++; if (adapter->vfinfo[vf].primary_abort_count == IXGBE_PRIMARY_ABORT_LIMIT) { ixgbe_set_vf_link_state(adapter, vf, IFLA_VF_LINK_STATE_DISABLE); adapter->vfinfo[vf].primary_abort_count = 0; e_info(drv, "Malicious Driver Detection event detected on PF %d VF %d MAC: %pM mdd-disable-vf=on", hw->bus.func, vf, adapter->vfinfo[vf].vf_mac_addresses); } } } static void ixgbe_check_for_bad_vf(struct ixgbe_adapter *adapter) { struct ixgbe_hw *hw = &adapter->hw; struct pci_dev *pdev = adapter->pdev; unsigned int vf; u32 gpc; if (!(netif_carrier_ok(adapter->netdev))) return; gpc = IXGBE_READ_REG(hw, IXGBE_TXDGPC); if (gpc) /* If incrementing then no need for the check below */ return; /* Check to see if a bad DMA write target from an errant or * malicious VF has caused a PCIe error. If so then we can * issue a VFLR to the offending VF(s) and then resume without * requesting a full slot reset. */ if (!pdev) return; /* check status reg for all VFs owned by this PF */ for (vf = 0; vf < adapter->num_vfs; ++vf) { struct pci_dev *vfdev = adapter->vfinfo[vf].vfdev; u16 status_reg; if (!vfdev) continue; pci_read_config_word(vfdev, PCI_STATUS, &status_reg); if (status_reg != IXGBE_FAILED_READ_CFG_WORD && status_reg & PCI_STATUS_REC_MASTER_ABORT) { ixgbe_bad_vf_abort(adapter, vf); pcie_flr(vfdev); } } } static void ixgbe_spoof_check(struct ixgbe_adapter *adapter) { u32 ssvpc; /* Do not perform spoof check for 82598 or if not in IOV mode */ if (adapter->hw.mac.type == ixgbe_mac_82598EB || adapter->num_vfs == 0) return; ssvpc = IXGBE_READ_REG(&adapter->hw, IXGBE_SSVPC); /* * ssvpc register is cleared on read, if zero then no * spoofed packets in the last interval. */ if (!ssvpc) return; e_warn(drv, "%u Spoofed packets detected\n", ssvpc); } #else static void ixgbe_spoof_check(struct ixgbe_adapter __always_unused *adapter) { } static void ixgbe_check_for_bad_vf(struct ixgbe_adapter __always_unused *adapter) { } #endif /* CONFIG_PCI_IOV */ /** * ixgbe_watchdog_subtask - check and bring link up * @adapter: pointer to the device adapter structure **/ static void ixgbe_watchdog_subtask(struct ixgbe_adapter *adapter) { /* if interface is down, removing or resetting, do nothing */ if (test_bit(__IXGBE_DOWN, &adapter->state) || test_bit(__IXGBE_REMOVING, &adapter->state) || test_bit(__IXGBE_RESETTING, &adapter->state)) return; ixgbe_watchdog_update_link(adapter); if (adapter->link_up) ixgbe_watchdog_link_is_up(adapter); else ixgbe_watchdog_link_is_down(adapter); ixgbe_check_for_bad_vf(adapter); ixgbe_spoof_check(adapter); ixgbe_update_stats(adapter); ixgbe_watchdog_flush_tx(adapter); } /** * ixgbe_sfp_detection_subtask - poll for SFP+ cable * @adapter: the ixgbe adapter structure **/ static void ixgbe_sfp_detection_subtask(struct ixgbe_adapter *adapter) { struct ixgbe_hw *hw = &adapter->hw; s32 err; /* not searching for SFP so there is nothing to do here */ if (!(adapter->flags2 & IXGBE_FLAG2_SEARCH_FOR_SFP) && !(adapter->flags2 & IXGBE_FLAG2_SFP_NEEDS_RESET)) return; if (adapter->sfp_poll_time && time_after(adapter->sfp_poll_time, jiffies)) return; /* If not yet time to poll for SFP */ /* someone else is in init, wait until next service event */ if (test_and_set_bit(__IXGBE_IN_SFP_INIT, &adapter->state)) return; adapter->sfp_poll_time = jiffies + IXGBE_SFP_POLL_JIFFIES - 1; err = hw->phy.ops.identify_sfp(hw); if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) goto sfp_out; if (err == IXGBE_ERR_SFP_NOT_PRESENT) { /* If no cable is present, then we need to reset * the next time we find a good cable. */ adapter->flags2 |= IXGBE_FLAG2_SFP_NEEDS_RESET; } /* exit on error */ if (err) goto sfp_out; /* exit if reset not needed */ if (!(adapter->flags2 & IXGBE_FLAG2_SFP_NEEDS_RESET)) goto sfp_out; adapter->flags2 &= ~IXGBE_FLAG2_SFP_NEEDS_RESET; /* * A module may be identified correctly, but the EEPROM may not have * support for that module. setup_sfp() will fail in that case, so * we should not allow that module to load. */ if (hw->mac.type == ixgbe_mac_82598EB) err = hw->phy.ops.reset(hw); else err = hw->mac.ops.setup_sfp(hw); if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) goto sfp_out; adapter->flags |= IXGBE_FLAG_NEED_LINK_CONFIG; e_info(probe, "detected SFP+: %d\n", hw->phy.sfp_type); sfp_out: clear_bit(__IXGBE_IN_SFP_INIT, &adapter->state); if ((err == IXGBE_ERR_SFP_NOT_SUPPORTED) && (adapter->netdev->reg_state == NETREG_REGISTERED)) { e_dev_err("failed to initialize because an unsupported " "SFP+ module type was detected.\n"); e_dev_err("Reload the driver after installing a " "supported module.\n"); unregister_netdev(adapter->netdev); } } /** * ixgbe_sfp_link_config_subtask - set up link SFP after module install * @adapter: the ixgbe adapter structure **/ static void ixgbe_sfp_link_config_subtask(struct ixgbe_adapter *adapter) { struct ixgbe_hw *hw = &adapter->hw; u32 cap_speed; u32 speed; bool autoneg = false; if (!(adapter->flags & IXGBE_FLAG_NEED_LINK_CONFIG)) return; /* someone else is in init, wait until next service event */ if (test_and_set_bit(__IXGBE_IN_SFP_INIT, &adapter->state)) return; adapter->flags &= ~IXGBE_FLAG_NEED_LINK_CONFIG; hw->mac.ops.get_link_capabilities(hw, &cap_speed, &autoneg); /* advertise highest capable link speed */ if (!autoneg && (cap_speed & IXGBE_LINK_SPEED_10GB_FULL)) speed = IXGBE_LINK_SPEED_10GB_FULL; else speed = cap_speed & (IXGBE_LINK_SPEED_10GB_FULL | IXGBE_LINK_SPEED_1GB_FULL); if (hw->mac.ops.setup_link) hw->mac.ops.setup_link(hw, speed, true); adapter->flags |= IXGBE_FLAG_NEED_LINK_UPDATE; adapter->link_check_timeout = jiffies; clear_bit(__IXGBE_IN_SFP_INIT, &adapter->state); } /** * ixgbe_service_timer - Timer Call-back * @t: pointer to timer_list structure **/ static void ixgbe_service_timer(struct timer_list *t) { struct ixgbe_adapter *adapter = from_timer(adapter, t, service_timer); unsigned long next_event_offset; /* poll faster when waiting for link */ if (adapter->flags & IXGBE_FLAG_NEED_LINK_UPDATE) next_event_offset = HZ / 10; else next_event_offset = HZ * 2; /* Reset the timer */ mod_timer(&adapter->service_timer, next_event_offset + jiffies); ixgbe_service_event_schedule(adapter); } static void ixgbe_phy_interrupt_subtask(struct ixgbe_adapter *adapter) { struct ixgbe_hw *hw = &adapter->hw; u32 status; if (!(adapter->flags2 & IXGBE_FLAG2_PHY_INTERRUPT)) return; adapter->flags2 &= ~IXGBE_FLAG2_PHY_INTERRUPT; if (!hw->phy.ops.handle_lasi) return; status = hw->phy.ops.handle_lasi(&adapter->hw); if (status != IXGBE_ERR_OVERTEMP) return; e_crit(drv, "%s\n", ixgbe_overheat_msg); } static void ixgbe_reset_subtask(struct ixgbe_adapter *adapter) { if (!test_and_clear_bit(__IXGBE_RESET_REQUESTED, &adapter->state)) return; rtnl_lock(); /* If we're already down, removing or resetting, just bail */ if (test_bit(__IXGBE_DOWN, &adapter->state) || test_bit(__IXGBE_REMOVING, &adapter->state) || test_bit(__IXGBE_RESETTING, &adapter->state)) { rtnl_unlock(); return; } ixgbe_dump(adapter); netdev_err(adapter->netdev, "Reset adapter\n"); adapter->tx_timeout_count++; ixgbe_reinit_locked(adapter); rtnl_unlock(); } /** * ixgbe_check_fw_error - Check firmware for errors * @adapter: the adapter private structure * * Check firmware errors in register FWSM */ static bool ixgbe_check_fw_error(struct ixgbe_adapter *adapter) { struct ixgbe_hw *hw = &adapter->hw; u32 fwsm; /* read fwsm.ext_err_ind register and log errors */ fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM(hw)); if (fwsm & IXGBE_FWSM_EXT_ERR_IND_MASK || !(fwsm & IXGBE_FWSM_FW_VAL_BIT)) e_dev_warn("Warning firmware error detected FWSM: 0x%08X\n", fwsm); if (hw->mac.ops.fw_recovery_mode && hw->mac.ops.fw_recovery_mode(hw)) { e_dev_err("Firmware recovery mode detected. Limiting functionality. Refer to the Intel(R) Ethernet Adapters and Devices User Guide for details on firmware recovery mode.\n"); return true; } return false; } /** * ixgbe_service_task - manages and runs subtasks * @work: pointer to work_struct containing our data **/ static void ixgbe_service_task(struct work_struct *work) { struct ixgbe_adapter *adapter = container_of(work, struct ixgbe_adapter, service_task); if (ixgbe_removed(adapter->hw.hw_addr)) { if (!test_bit(__IXGBE_DOWN, &adapter->state)) { rtnl_lock(); ixgbe_down(adapter); rtnl_unlock(); } ixgbe_service_event_complete(adapter); return; } if (ixgbe_check_fw_error(adapter)) { if (!test_bit(__IXGBE_DOWN, &adapter->state)) unregister_netdev(adapter->netdev); ixgbe_service_event_complete(adapter); return; } ixgbe_reset_subtask(adapter); ixgbe_phy_interrupt_subtask(adapter); ixgbe_sfp_detection_subtask(adapter); ixgbe_sfp_link_config_subtask(adapter); ixgbe_check_overtemp_subtask(adapter); ixgbe_watchdog_subtask(adapter); ixgbe_fdir_reinit_subtask(adapter); ixgbe_check_hang_subtask(adapter); if (test_bit(__IXGBE_PTP_RUNNING, &adapter->state)) { ixgbe_ptp_overflow_check(adapter); if (adapter->flags & IXGBE_FLAG_RX_HWTSTAMP_IN_REGISTER) ixgbe_ptp_rx_hang(adapter); ixgbe_ptp_tx_hang(adapter); } ixgbe_service_event_complete(adapter); } static int ixgbe_tso(struct ixgbe_ring *tx_ring, struct ixgbe_tx_buffer *first, u8 *hdr_len, struct ixgbe_ipsec_tx_data *itd) { u32 vlan_macip_lens, type_tucmd, mss_l4len_idx; struct sk_buff *skb = first->skb; union { struct iphdr *v4; struct ipv6hdr *v6; unsigned char *hdr; } ip; union { struct tcphdr *tcp; struct udphdr *udp; unsigned char *hdr; } l4; u32 paylen, l4_offset; u32 fceof_saidx = 0; int err; if (skb->ip_summed != CHECKSUM_PARTIAL) return 0; if (!skb_is_gso(skb)) return 0; err = skb_cow_head(skb, 0); if (err < 0) return err; if (eth_p_mpls(first->protocol)) ip.hdr = skb_inner_network_header(skb); else ip.hdr = skb_network_header(skb); l4.hdr = skb_checksum_start(skb); /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */ type_tucmd = (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) ? IXGBE_ADVTXD_TUCMD_L4T_UDP : IXGBE_ADVTXD_TUCMD_L4T_TCP; /* initialize outer IP header fields */ if (ip.v4->version == 4) { unsigned char *csum_start = skb_checksum_start(skb); unsigned char *trans_start = ip.hdr + (ip.v4->ihl * 4); int len = csum_start - trans_start; /* IP header will have to cancel out any data that * is not a part of the outer IP header, so set to * a reverse csum if needed, else init check to 0. */ ip.v4->check = (skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL) ? csum_fold(csum_partial(trans_start, len, 0)) : 0; type_tucmd |= IXGBE_ADVTXD_TUCMD_IPV4; ip.v4->tot_len = 0; first->tx_flags |= IXGBE_TX_FLAGS_TSO | IXGBE_TX_FLAGS_CSUM | IXGBE_TX_FLAGS_IPV4; } else { ip.v6->payload_len = 0; first->tx_flags |= IXGBE_TX_FLAGS_TSO | IXGBE_TX_FLAGS_CSUM; } /* determine offset of inner transport header */ l4_offset = l4.hdr - skb->data; /* remove payload length from inner checksum */ paylen = skb->len - l4_offset; if (type_tucmd & IXGBE_ADVTXD_TUCMD_L4T_TCP) { /* compute length of segmentation header */ *hdr_len = (l4.tcp->doff * 4) + l4_offset; csum_replace_by_diff(&l4.tcp->check, (__force __wsum)htonl(paylen)); } else { /* compute length of segmentation header */ *hdr_len = sizeof(*l4.udp) + l4_offset; csum_replace_by_diff(&l4.udp->check, (__force __wsum)htonl(paylen)); } /* update gso size and bytecount with header size */ first->gso_segs = skb_shinfo(skb)->gso_segs; first->bytecount += (first->gso_segs - 1) * *hdr_len; /* mss_l4len_id: use 0 as index for TSO */ mss_l4len_idx = (*hdr_len - l4_offset) << IXGBE_ADVTXD_L4LEN_SHIFT; mss_l4len_idx |= skb_shinfo(skb)->gso_size << IXGBE_ADVTXD_MSS_SHIFT; fceof_saidx |= itd->sa_idx; type_tucmd |= itd->flags | itd->trailer_len; /* vlan_macip_lens: HEADLEN, MACLEN, VLAN tag */ vlan_macip_lens = l4.hdr - ip.hdr; vlan_macip_lens |= (ip.hdr - skb->data) << IXGBE_ADVTXD_MACLEN_SHIFT; vlan_macip_lens |= first->tx_flags & IXGBE_TX_FLAGS_VLAN_MASK; ixgbe_tx_ctxtdesc(tx_ring, vlan_macip_lens, fceof_saidx, type_tucmd, mss_l4len_idx); return 1; } static void ixgbe_tx_csum(struct ixgbe_ring *tx_ring, struct ixgbe_tx_buffer *first, struct ixgbe_ipsec_tx_data *itd) { struct sk_buff *skb = first->skb; u32 vlan_macip_lens = 0; u32 fceof_saidx = 0; u32 type_tucmd = 0; if (skb->ip_summed != CHECKSUM_PARTIAL) { csum_failed: if (!(first->tx_flags & (IXGBE_TX_FLAGS_HW_VLAN | IXGBE_TX_FLAGS_CC))) return; goto no_csum; } switch (skb->csum_offset) { case offsetof(struct tcphdr, check): type_tucmd = IXGBE_ADVTXD_TUCMD_L4T_TCP; fallthrough; case offsetof(struct udphdr, check): break; case offsetof(struct sctphdr, checksum): /* validate that this is actually an SCTP request */ if (skb_csum_is_sctp(skb)) { type_tucmd = IXGBE_ADVTXD_TUCMD_L4T_SCTP; break; } fallthrough; default: skb_checksum_help(skb); goto csum_failed; } /* update TX checksum flag */ first->tx_flags |= IXGBE_TX_FLAGS_CSUM; vlan_macip_lens = skb_checksum_start_offset(skb) - skb_network_offset(skb); no_csum: /* vlan_macip_lens: MACLEN, VLAN tag */ vlan_macip_lens |= skb_network_offset(skb) << IXGBE_ADVTXD_MACLEN_SHIFT; vlan_macip_lens |= first->tx_flags & IXGBE_TX_FLAGS_VLAN_MASK; fceof_saidx |= itd->sa_idx; type_tucmd |= itd->flags | itd->trailer_len; ixgbe_tx_ctxtdesc(tx_ring, vlan_macip_lens, fceof_saidx, type_tucmd, 0); } #define IXGBE_SET_FLAG(_input, _flag, _result) \ ((_flag <= _result) ? \ ((u32)(_input & _flag) * (_result / _flag)) : \ ((u32)(_input & _flag) / (_flag / _result))) static u32 ixgbe_tx_cmd_type(struct sk_buff *skb, u32 tx_flags) { /* set type for advanced descriptor with frame checksum insertion */ u32 cmd_type = IXGBE_ADVTXD_DTYP_DATA | IXGBE_ADVTXD_DCMD_DEXT | IXGBE_ADVTXD_DCMD_IFCS; /* set HW vlan bit if vlan is present */ cmd_type |= IXGBE_SET_FLAG(tx_flags, IXGBE_TX_FLAGS_HW_VLAN, IXGBE_ADVTXD_DCMD_VLE); /* set segmentation enable bits for TSO/FSO */ cmd_type |= IXGBE_SET_FLAG(tx_flags, IXGBE_TX_FLAGS_TSO, IXGBE_ADVTXD_DCMD_TSE); /* set timestamp bit if present */ cmd_type |= IXGBE_SET_FLAG(tx_flags, IXGBE_TX_FLAGS_TSTAMP, IXGBE_ADVTXD_MAC_TSTAMP); /* insert frame checksum */ cmd_type ^= IXGBE_SET_FLAG(skb->no_fcs, 1, IXGBE_ADVTXD_DCMD_IFCS); return cmd_type; } static void ixgbe_tx_olinfo_status(union ixgbe_adv_tx_desc *tx_desc, u32 tx_flags, unsigned int paylen) { u32 olinfo_status = paylen << IXGBE_ADVTXD_PAYLEN_SHIFT; /* enable L4 checksum for TSO and TX checksum offload */ olinfo_status |= IXGBE_SET_FLAG(tx_flags, IXGBE_TX_FLAGS_CSUM, IXGBE_ADVTXD_POPTS_TXSM); /* enable IPv4 checksum for TSO */ olinfo_status |= IXGBE_SET_FLAG(tx_flags, IXGBE_TX_FLAGS_IPV4, IXGBE_ADVTXD_POPTS_IXSM); /* enable IPsec */ olinfo_status |= IXGBE_SET_FLAG(tx_flags, IXGBE_TX_FLAGS_IPSEC, IXGBE_ADVTXD_POPTS_IPSEC); /* * Check Context must be set if Tx switch is enabled, which it * always is for case where virtual functions are running */ olinfo_status |= IXGBE_SET_FLAG(tx_flags, IXGBE_TX_FLAGS_CC, IXGBE_ADVTXD_CC); tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status); } static int __ixgbe_maybe_stop_tx(struct ixgbe_ring *tx_ring, u16 size) { if (!netif_subqueue_try_stop(tx_ring->netdev, tx_ring->queue_index, ixgbe_desc_unused(tx_ring), size)) return -EBUSY; ++tx_ring->tx_stats.restart_queue; return 0; } static inline int ixgbe_maybe_stop_tx(struct ixgbe_ring *tx_ring, u16 size) { if (likely(ixgbe_desc_unused(tx_ring) >= size)) return 0; return __ixgbe_maybe_stop_tx(tx_ring, size); } static int ixgbe_tx_map(struct ixgbe_ring *tx_ring, struct ixgbe_tx_buffer *first, const u8 hdr_len) { struct sk_buff *skb = first->skb; struct ixgbe_tx_buffer *tx_buffer; union ixgbe_adv_tx_desc *tx_desc; skb_frag_t *frag; dma_addr_t dma; unsigned int data_len, size; u32 tx_flags = first->tx_flags; u32 cmd_type = ixgbe_tx_cmd_type(skb, tx_flags); u16 i = tx_ring->next_to_use; tx_desc = IXGBE_TX_DESC(tx_ring, i); ixgbe_tx_olinfo_status(tx_desc, tx_flags, skb->len - hdr_len); size = skb_headlen(skb); data_len = skb->data_len; #ifdef IXGBE_FCOE if (tx_flags & IXGBE_TX_FLAGS_FCOE) { if (data_len < sizeof(struct fcoe_crc_eof)) { size -= sizeof(struct fcoe_crc_eof) - data_len; data_len = 0; } else { data_len -= sizeof(struct fcoe_crc_eof); } } #endif dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE); tx_buffer = first; for (frag = &skb_shinfo(skb)->frags[0];; frag++) { if (dma_mapping_error(tx_ring->dev, dma)) goto dma_error; /* record length, and DMA address */ dma_unmap_len_set(tx_buffer, len, size); dma_unmap_addr_set(tx_buffer, dma, dma); tx_desc->read.buffer_addr = cpu_to_le64(dma); while (unlikely(size > IXGBE_MAX_DATA_PER_TXD)) { tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type ^ IXGBE_MAX_DATA_PER_TXD); i++; tx_desc++; if (i == tx_ring->count) { tx_desc = IXGBE_TX_DESC(tx_ring, 0); i = 0; } tx_desc->read.olinfo_status = 0; dma += IXGBE_MAX_DATA_PER_TXD; size -= IXGBE_MAX_DATA_PER_TXD; tx_desc->read.buffer_addr = cpu_to_le64(dma); } if (likely(!data_len)) break; tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type ^ size); i++; tx_desc++; if (i == tx_ring->count) { tx_desc = IXGBE_TX_DESC(tx_ring, 0); i = 0; } tx_desc->read.olinfo_status = 0; #ifdef IXGBE_FCOE size = min_t(unsigned int, data_len, skb_frag_size(frag)); #else size = skb_frag_size(frag); #endif data_len -= size; dma = skb_frag_dma_map(tx_ring->dev, frag, 0, size, DMA_TO_DEVICE); tx_buffer = &tx_ring->tx_buffer_info[i]; } /* write last descriptor with RS and EOP bits */ cmd_type |= size | IXGBE_TXD_CMD; tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type); netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount); /* set the timestamp */ first->time_stamp = jiffies; skb_tx_timestamp(skb); /* * Force memory writes to complete before letting h/w know there * are new descriptors to fetch. (Only applicable for weak-ordered * memory model archs, such as IA-64). * * We also need this memory barrier to make certain all of the * status bits have been updated before next_to_watch is written. */ wmb(); /* set next_to_watch value indicating a packet is present */ first->next_to_watch = tx_desc; i++; if (i == tx_ring->count) i = 0; tx_ring->next_to_use = i; ixgbe_maybe_stop_tx(tx_ring, DESC_NEEDED); if (netif_xmit_stopped(txring_txq(tx_ring)) || !netdev_xmit_more()) { writel(i, tx_ring->tail); } return 0; dma_error: dev_err(tx_ring->dev, "TX DMA map failed\n"); /* clear dma mappings for failed tx_buffer_info map */ for (;;) { tx_buffer = &tx_ring->tx_buffer_info[i]; if (dma_unmap_len(tx_buffer, len)) dma_unmap_page(tx_ring->dev, dma_unmap_addr(tx_buffer, dma), dma_unmap_len(tx_buffer, len), DMA_TO_DEVICE); dma_unmap_len_set(tx_buffer, len, 0); if (tx_buffer == first) break; if (i == 0) i += tx_ring->count; i--; } dev_kfree_skb_any(first->skb); first->skb = NULL; tx_ring->next_to_use = i; return -1; } static void ixgbe_atr(struct ixgbe_ring *ring, struct ixgbe_tx_buffer *first) { struct ixgbe_q_vector *q_vector = ring->q_vector; union ixgbe_atr_hash_dword input = { .dword = 0 }; union ixgbe_atr_hash_dword common = { .dword = 0 }; union { unsigned char *network; struct iphdr *ipv4; struct ipv6hdr *ipv6; } hdr; struct tcphdr *th; unsigned int hlen; struct sk_buff *skb; __be16 vlan_id; int l4_proto; /* if ring doesn't have a interrupt vector, cannot perform ATR */ if (!q_vector) return; /* do nothing if sampling is disabled */ if (!ring->atr_sample_rate) return; ring->atr_count++; /* currently only IPv4/IPv6 with TCP is supported */ if ((first->protocol != htons(ETH_P_IP)) && (first->protocol != htons(ETH_P_IPV6))) return; /* snag network header to get L4 type and address */ skb = first->skb; hdr.network = skb_network_header(skb); if (unlikely(hdr.network <= skb->data)) return; if (skb->encapsulation && first->protocol == htons(ETH_P_IP) && hdr.ipv4->protocol == IPPROTO_UDP) { struct ixgbe_adapter *adapter = q_vector->adapter; if (unlikely(skb_tail_pointer(skb) < hdr.network + vxlan_headroom(0))) return; /* verify the port is recognized as VXLAN */ if (adapter->vxlan_port && udp_hdr(skb)->dest == adapter->vxlan_port) hdr.network = skb_inner_network_header(skb); if (adapter->geneve_port && udp_hdr(skb)->dest == adapter->geneve_port) hdr.network = skb_inner_network_header(skb); } /* Make sure we have at least [minimum IPv4 header + TCP] * or [IPv6 header] bytes */ if (unlikely(skb_tail_pointer(skb) < hdr.network + 40)) return; /* Currently only IPv4/IPv6 with TCP is supported */ switch (hdr.ipv4->version) { case IPVERSION: /* access ihl as u8 to avoid unaligned access on ia64 */ hlen = (hdr.network[0] & 0x0F) << 2; l4_proto = hdr.ipv4->protocol; break; case 6: hlen = hdr.network - skb->data; l4_proto = ipv6_find_hdr(skb, &hlen, IPPROTO_TCP, NULL, NULL); hlen -= hdr.network - skb->data; break; default: return; } if (l4_proto != IPPROTO_TCP) return; if (unlikely(skb_tail_pointer(skb) < hdr.network + hlen + sizeof(struct tcphdr))) return; th = (struct tcphdr *)(hdr.network + hlen); /* skip this packet since the socket is closing */ if (th->fin) return; /* sample on all syn packets or once every atr sample count */ if (!th->syn && (ring->atr_count < ring->atr_sample_rate)) return; /* reset sample count */ ring->atr_count = 0; vlan_id = htons(first->tx_flags >> IXGBE_TX_FLAGS_VLAN_SHIFT); /* * src and dst are inverted, think how the receiver sees them * * The input is broken into two sections, a non-compressed section * containing vm_pool, vlan_id, and flow_type. The rest of the data * is XORed together and stored in the compressed dword. */ input.formatted.vlan_id = vlan_id; /* * since src port and flex bytes occupy the same word XOR them together * and write the value to source port portion of compressed dword */ if (first->tx_flags & (IXGBE_TX_FLAGS_SW_VLAN | IXGBE_TX_FLAGS_HW_VLAN)) common.port.src ^= th->dest ^ htons(ETH_P_8021Q); else common.port.src ^= th->dest ^ first->protocol; common.port.dst ^= th->source; switch (hdr.ipv4->version) { case IPVERSION: input.formatted.flow_type = IXGBE_ATR_FLOW_TYPE_TCPV4; common.ip ^= hdr.ipv4->saddr ^ hdr.ipv4->daddr; break; case 6: input.formatted.flow_type = IXGBE_ATR_FLOW_TYPE_TCPV6; common.ip ^= hdr.ipv6->saddr.s6_addr32[0] ^ hdr.ipv6->saddr.s6_addr32[1] ^ hdr.ipv6->saddr.s6_addr32[2] ^ hdr.ipv6->saddr.s6_addr32[3] ^ hdr.ipv6->daddr.s6_addr32[0] ^ hdr.ipv6->daddr.s6_addr32[1] ^ hdr.ipv6->daddr.s6_addr32[2] ^ hdr.ipv6->daddr.s6_addr32[3]; break; default: break; } if (hdr.network != skb_network_header(skb)) input.formatted.flow_type |= IXGBE_ATR_L4TYPE_TUNNEL_MASK; /* This assumes the Rx queue and Tx queue are bound to the same CPU */ ixgbe_fdir_add_signature_filter_82599(&q_vector->adapter->hw, input, common, ring->queue_index); } #ifdef IXGBE_FCOE static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb, struct net_device *sb_dev) { struct ixgbe_adapter *adapter; struct ixgbe_ring_feature *f; int txq; if (sb_dev) { u8 tc = netdev_get_prio_tc_map(dev, skb->priority); struct net_device *vdev = sb_dev; txq = vdev->tc_to_txq[tc].offset; txq += reciprocal_scale(skb_get_hash(skb), vdev->tc_to_txq[tc].count); return txq; } /* * only execute the code below if protocol is FCoE * or FIP and we have FCoE enabled on the adapter */ switch (vlan_get_protocol(skb)) { case htons(ETH_P_FCOE): case htons(ETH_P_FIP): adapter = netdev_priv(dev); if (!sb_dev && (adapter->flags & IXGBE_FLAG_FCOE_ENABLED)) break; fallthrough; default: return netdev_pick_tx(dev, skb, sb_dev); } f = &adapter->ring_feature[RING_F_FCOE]; txq = skb_rx_queue_recorded(skb) ? skb_get_rx_queue(skb) : smp_processor_id(); while (txq >= f->indices) txq -= f->indices; return txq + f->offset; } #endif int ixgbe_xmit_xdp_ring(struct ixgbe_ring *ring, struct xdp_frame *xdpf) { struct skb_shared_info *sinfo = xdp_get_shared_info_from_frame(xdpf); u8 nr_frags = unlikely(xdp_frame_has_frags(xdpf)) ? sinfo->nr_frags : 0; u16 i = 0, index = ring->next_to_use; struct ixgbe_tx_buffer *tx_head = &ring->tx_buffer_info[index]; struct ixgbe_tx_buffer *tx_buff = tx_head; union ixgbe_adv_tx_desc *tx_desc = IXGBE_TX_DESC(ring, index); u32 cmd_type, len = xdpf->len; void *data = xdpf->data; if (unlikely(ixgbe_desc_unused(ring) < 1 + nr_frags)) return IXGBE_XDP_CONSUMED; tx_head->bytecount = xdp_get_frame_len(xdpf); tx_head->gso_segs = 1; tx_head->xdpf = xdpf; tx_desc->read.olinfo_status = cpu_to_le32(tx_head->bytecount << IXGBE_ADVTXD_PAYLEN_SHIFT); for (;;) { dma_addr_t dma; dma = dma_map_single(ring->dev, data, len, DMA_TO_DEVICE); if (dma_mapping_error(ring->dev, dma)) goto unmap; dma_unmap_len_set(tx_buff, len, len); dma_unmap_addr_set(tx_buff, dma, dma); cmd_type = IXGBE_ADVTXD_DTYP_DATA | IXGBE_ADVTXD_DCMD_DEXT | IXGBE_ADVTXD_DCMD_IFCS | len; tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type); tx_desc->read.buffer_addr = cpu_to_le64(dma); tx_buff->protocol = 0; if (++index == ring->count) index = 0; if (i == nr_frags) break; tx_buff = &ring->tx_buffer_info[index]; tx_desc = IXGBE_TX_DESC(ring, index); tx_desc->read.olinfo_status = 0; data = skb_frag_address(&sinfo->frags[i]); len = skb_frag_size(&sinfo->frags[i]); i++; } /* put descriptor type bits */ tx_desc->read.cmd_type_len |= cpu_to_le32(IXGBE_TXD_CMD); /* Avoid any potential race with xdp_xmit and cleanup */ smp_wmb(); tx_head->next_to_watch = tx_desc; ring->next_to_use = index; return IXGBE_XDP_TX; unmap: for (;;) { tx_buff = &ring->tx_buffer_info[index]; if (dma_unmap_len(tx_buff, len)) dma_unmap_page(ring->dev, dma_unmap_addr(tx_buff, dma), dma_unmap_len(tx_buff, len), DMA_TO_DEVICE); dma_unmap_len_set(tx_buff, len, 0); if (tx_buff == tx_head) break; if (!index) index += ring->count; index--; } return IXGBE_XDP_CONSUMED; } netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb, struct ixgbe_adapter *adapter, struct ixgbe_ring *tx_ring) { struct ixgbe_tx_buffer *first; int tso; u32 tx_flags = 0; unsigned short f; u16 count = TXD_USE_COUNT(skb_headlen(skb)); struct ixgbe_ipsec_tx_data ipsec_tx = { 0 }; __be16 protocol = skb->protocol; u8 hdr_len = 0; /* * need: 1 descriptor per page * PAGE_SIZE/IXGBE_MAX_DATA_PER_TXD, * + 1 desc for skb_headlen/IXGBE_MAX_DATA_PER_TXD, * + 2 desc gap to keep tail from touching head, * + 1 desc for context descriptor, * otherwise try next time */ for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) count += TXD_USE_COUNT(skb_frag_size( &skb_shinfo(skb)->frags[f])); if (ixgbe_maybe_stop_tx(tx_ring, count + 3)) { tx_ring->tx_stats.tx_busy++; return NETDEV_TX_BUSY; } /* record the location of the first descriptor for this packet */ first = &tx_ring->tx_buffer_info[tx_ring->next_to_use]; first->skb = skb; first->bytecount = skb->len; first->gso_segs = 1; /* if we have a HW VLAN tag being added default to the HW one */ if (skb_vlan_tag_present(skb)) { tx_flags |= skb_vlan_tag_get(skb) << IXGBE_TX_FLAGS_VLAN_SHIFT; tx_flags |= IXGBE_TX_FLAGS_HW_VLAN; /* else if it is a SW VLAN check the next protocol and store the tag */ } else if (protocol == htons(ETH_P_8021Q)) { struct vlan_hdr *vhdr, _vhdr; vhdr = skb_header_pointer(skb, ETH_HLEN, sizeof(_vhdr), &_vhdr); if (!vhdr) goto out_drop; tx_flags |= ntohs(vhdr->h_vlan_TCI) << IXGBE_TX_FLAGS_VLAN_SHIFT; tx_flags |= IXGBE_TX_FLAGS_SW_VLAN; } protocol = vlan_get_protocol(skb); if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && adapter->ptp_clock) { if (adapter->tstamp_config.tx_type == HWTSTAMP_TX_ON && !test_and_set_bit_lock(__IXGBE_PTP_TX_IN_PROGRESS, &adapter->state)) { skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; tx_flags |= IXGBE_TX_FLAGS_TSTAMP; /* schedule check for Tx timestamp */ adapter->ptp_tx_skb = skb_get(skb); adapter->ptp_tx_start = jiffies; schedule_work(&adapter->ptp_tx_work); } else { adapter->tx_hwtstamp_skipped++; } } #ifdef CONFIG_PCI_IOV /* * Use the l2switch_enable flag - would be false if the DMA * Tx switch had been disabled. */ if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) tx_flags |= IXGBE_TX_FLAGS_CC; #endif /* DCB maps skb priorities 0-7 onto 3 bit PCP of VLAN tag. */ if ((adapter->flags & IXGBE_FLAG_DCB_ENABLED) && ((tx_flags & (IXGBE_TX_FLAGS_HW_VLAN | IXGBE_TX_FLAGS_SW_VLAN)) || (skb->priority != TC_PRIO_CONTROL))) { tx_flags &= ~IXGBE_TX_FLAGS_VLAN_PRIO_MASK; tx_flags |= (skb->priority & 0x7) << IXGBE_TX_FLAGS_VLAN_PRIO_SHIFT; if (tx_flags & IXGBE_TX_FLAGS_SW_VLAN) { struct vlan_ethhdr *vhdr; if (skb_cow_head(skb, 0)) goto out_drop; vhdr = skb_vlan_eth_hdr(skb); vhdr->h_vlan_TCI = htons(tx_flags >> IXGBE_TX_FLAGS_VLAN_SHIFT); } else { tx_flags |= IXGBE_TX_FLAGS_HW_VLAN; } } /* record initial flags and protocol */ first->tx_flags = tx_flags; first->protocol = protocol; #ifdef IXGBE_FCOE /* setup tx offload for FCoE */ if ((protocol == htons(ETH_P_FCOE)) && (tx_ring->netdev->features & (NETIF_F_FSO | NETIF_F_FCOE_CRC))) { tso = ixgbe_fso(tx_ring, first, &hdr_len); if (tso < 0) goto out_drop; goto xmit_fcoe; } #endif /* IXGBE_FCOE */ #ifdef CONFIG_IXGBE_IPSEC if (xfrm_offload(skb) && !ixgbe_ipsec_tx(tx_ring, first, &ipsec_tx)) goto out_drop; #endif tso = ixgbe_tso(tx_ring, first, &hdr_len, &ipsec_tx); if (tso < 0) goto out_drop; else if (!tso) ixgbe_tx_csum(tx_ring, first, &ipsec_tx); /* add the ATR filter if ATR is on */ if (test_bit(__IXGBE_TX_FDIR_INIT_DONE, &tx_ring->state)) ixgbe_atr(tx_ring, first); #ifdef IXGBE_FCOE xmit_fcoe: #endif /* IXGBE_FCOE */ if (ixgbe_tx_map(tx_ring, first, hdr_len)) goto cleanup_tx_timestamp; return NETDEV_TX_OK; out_drop: dev_kfree_skb_any(first->skb); first->skb = NULL; cleanup_tx_timestamp: if (unlikely(tx_flags & IXGBE_TX_FLAGS_TSTAMP)) { dev_kfree_skb_any(adapter->ptp_tx_skb); adapter->ptp_tx_skb = NULL; cancel_work_sync(&adapter->ptp_tx_work); clear_bit_unlock(__IXGBE_PTP_TX_IN_PROGRESS, &adapter->state); } return NETDEV_TX_OK; } static netdev_tx_t __ixgbe_xmit_frame(struct sk_buff *skb, struct net_device *netdev, struct ixgbe_ring *ring) { struct ixgbe_adapter *adapter = netdev_priv(netdev); struct ixgbe_ring *tx_ring; /* * The minimum packet size for olinfo paylen is 17 so pad the skb * in order to meet this minimum size requirement. */ if (skb_put_padto(skb, 17)) return NETDEV_TX_OK; tx_ring = ring ? ring : adapter->tx_ring[skb_get_queue_mapping(skb)]; if (unlikely(test_bit(__IXGBE_TX_DISABLED, &tx_ring->state))) return NETDEV_TX_BUSY; return ixgbe_xmit_frame_ring(skb, adapter, tx_ring); } static netdev_tx_t ixgbe_xmit_frame(struct sk_buff *skb, struct net_device *netdev) { return __ixgbe_xmit_frame(skb, netdev, NULL); } /** * ixgbe_set_mac - Change the Ethernet Address of the NIC * @netdev: network interface device structure * @p: pointer to an address structure * * Returns 0 on success, negative on failure **/ static int ixgbe_set_mac(struct net_device *netdev, void *p) { struct ixgbe_adapter *adapter = netdev_priv(netdev); struct ixgbe_hw *hw = &adapter->hw; struct sockaddr *addr = p; if (!is_valid_ether_addr(addr->sa_data)) return -EADDRNOTAVAIL; eth_hw_addr_set(netdev, addr->sa_data); memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len); ixgbe_mac_set_default_filter(adapter); return 0; } static int ixgbe_mdio_read(struct net_device *netdev, int prtad, int devad, u16 addr) { struct ixgbe_adapter *adapter = netdev_priv(netdev); struct ixgbe_hw *hw = &adapter->hw; u16 value; int rc; if (adapter->mii_bus) { int regnum = addr; if (devad != MDIO_DEVAD_NONE) return mdiobus_c45_read(adapter->mii_bus, prtad, devad, regnum); return mdiobus_read(adapter->mii_bus, prtad, regnum); } if (prtad != hw->phy.mdio.prtad) return -EINVAL; rc = hw->phy.ops.read_reg(hw, addr, devad, &value); if (!rc) rc = value; return rc; } static int ixgbe_mdio_write(struct net_device *netdev, int prtad, int devad, u16 addr, u16 value) { struct ixgbe_adapter *adapter = netdev_priv(netdev); struct ixgbe_hw *hw = &adapter->hw; if (adapter->mii_bus) { int regnum = addr; if (devad != MDIO_DEVAD_NONE) return mdiobus_c45_write(adapter->mii_bus, prtad, devad, regnum, value); return mdiobus_write(adapter->mii_bus, prtad, regnum, value); } if (prtad != hw->phy.mdio.prtad) return -EINVAL; return hw->phy.ops.write_reg(hw, addr, devad, value); } static int ixgbe_ioctl(struct net_device *netdev, struct ifreq *req, int cmd) { struct ixgbe_adapter *adapter = netdev_priv(netdev); switch (cmd) { case SIOCSHWTSTAMP: return ixgbe_ptp_set_ts_config(adapter, req); case SIOCGHWTSTAMP: return ixgbe_ptp_get_ts_config(adapter, req); case SIOCGMIIPHY: if (!adapter->hw.phy.ops.read_reg) return -EOPNOTSUPP; fallthrough; default: return mdio_mii_ioctl(&adapter->hw.phy.mdio, if_mii(req), cmd); } } /** * ixgbe_add_sanmac_netdev - Add the SAN MAC address to the corresponding * netdev->dev_addrs * @dev: network interface device structure * * Returns non-zero on failure **/ static int ixgbe_add_sanmac_netdev(struct net_device *dev) { int err = 0; struct ixgbe_adapter *adapter = netdev_priv(dev); struct ixgbe_hw *hw = &adapter->hw; if (is_valid_ether_addr(hw->mac.san_addr)) { rtnl_lock(); err = dev_addr_add(dev, hw->mac.san_addr, NETDEV_HW_ADDR_T_SAN); rtnl_unlock(); /* update SAN MAC vmdq pool selection */ hw->mac.ops.set_vmdq_san_mac(hw, VMDQ_P(0)); } return err; } /** * ixgbe_del_sanmac_netdev - Removes the SAN MAC address to the corresponding * netdev->dev_addrs * @dev: network interface device structure * * Returns non-zero on failure **/ static int ixgbe_del_sanmac_netdev(struct net_device *dev) { int err = 0; struct ixgbe_adapter *adapter = netdev_priv(dev); struct ixgbe_mac_info *mac = &adapter->hw.mac; if (is_valid_ether_addr(mac->san_addr)) { rtnl_lock(); err = dev_addr_del(dev, mac->san_addr, NETDEV_HW_ADDR_T_SAN); rtnl_unlock(); } return err; } static void ixgbe_get_ring_stats64(struct rtnl_link_stats64 *stats, struct ixgbe_ring *ring) { u64 bytes, packets; unsigned int start; if (ring) { do { start = u64_stats_fetch_begin(&ring->syncp); packets = ring->stats.packets; bytes = ring->stats.bytes; } while (u64_stats_fetch_retry(&ring->syncp, start)); stats->tx_packets += packets; stats->tx_bytes += bytes; } } static void ixgbe_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats) { struct ixgbe_adapter *adapter = netdev_priv(netdev); int i; rcu_read_lock(); for (i = 0; i < adapter->num_rx_queues; i++) { struct ixgbe_ring *ring = READ_ONCE(adapter->rx_ring[i]); u64 bytes, packets; unsigned int start; if (ring) { do { start = u64_stats_fetch_begin(&ring->syncp); packets = ring->stats.packets; bytes = ring->stats.bytes; } while (u64_stats_fetch_retry(&ring->syncp, start)); stats->rx_packets += packets; stats->rx_bytes += bytes; } } for (i = 0; i < adapter->num_tx_queues; i++) { struct ixgbe_ring *ring = READ_ONCE(adapter->tx_ring[i]); ixgbe_get_ring_stats64(stats, ring); } for (i = 0; i < adapter->num_xdp_queues; i++) { struct ixgbe_ring *ring = READ_ONCE(adapter->xdp_ring[i]); ixgbe_get_ring_stats64(stats, ring); } rcu_read_unlock(); /* following stats updated by ixgbe_watchdog_task() */ stats->multicast = netdev->stats.multicast; stats->rx_errors = netdev->stats.rx_errors; stats->rx_length_errors = netdev->stats.rx_length_errors; stats->rx_crc_errors = netdev->stats.rx_crc_errors; stats->rx_missed_errors = netdev->stats.rx_missed_errors; } static int ixgbe_ndo_get_vf_stats(struct net_device *netdev, int vf, struct ifla_vf_stats *vf_stats) { struct ixgbe_adapter *adapter = netdev_priv(netdev); if (vf < 0 || vf >= adapter->num_vfs) return -EINVAL; vf_stats->rx_packets = adapter->vfinfo[vf].vfstats.gprc; vf_stats->rx_bytes = adapter->vfinfo[vf].vfstats.gorc; vf_stats->tx_packets = adapter->vfinfo[vf].vfstats.gptc; vf_stats->tx_bytes = adapter->vfinfo[vf].vfstats.gotc; vf_stats->multicast = adapter->vfinfo[vf].vfstats.mprc; return 0; } #ifdef CONFIG_IXGBE_DCB /** * ixgbe_validate_rtr - verify 802.1Qp to Rx packet buffer mapping is valid. * @adapter: pointer to ixgbe_adapter * @tc: number of traffic classes currently enabled * * Configure a valid 802.1Qp to Rx packet buffer mapping ie confirm * 802.1Q priority maps to a packet buffer that exists. */ static void ixgbe_validate_rtr(struct ixgbe_adapter *adapter, u8 tc) { struct ixgbe_hw *hw = &adapter->hw; u32 reg, rsave; int i; /* 82598 have a static priority to TC mapping that can not * be changed so no validation is needed. */ if (hw->mac.type == ixgbe_mac_82598EB) return; reg = IXGBE_READ_REG(hw, IXGBE_RTRUP2TC); rsave = reg; for (i = 0; i < MAX_TRAFFIC_CLASS; i++) { u8 up2tc = reg >> (i * IXGBE_RTRUP2TC_UP_SHIFT); /* If up2tc is out of bounds default to zero */ if (up2tc > tc) reg &= ~(0x7 << IXGBE_RTRUP2TC_UP_SHIFT); } if (reg != rsave) IXGBE_WRITE_REG(hw, IXGBE_RTRUP2TC, reg); return; } /** * ixgbe_set_prio_tc_map - Configure netdev prio tc map * @adapter: Pointer to adapter struct * * Populate the netdev user priority to tc map */ static void ixgbe_set_prio_tc_map(struct ixgbe_adapter *adapter) { struct net_device *dev = adapter->netdev; struct ixgbe_dcb_config *dcb_cfg = &adapter->dcb_cfg; struct ieee_ets *ets = adapter->ixgbe_ieee_ets; u8 prio; for (prio = 0; prio < MAX_USER_PRIORITY; prio++) { u8 tc = 0; if (adapter->dcbx_cap & DCB_CAP_DCBX_VER_CEE) tc = ixgbe_dcb_get_tc_from_up(dcb_cfg, 0, prio); else if (ets) tc = ets->prio_tc[prio]; netdev_set_prio_tc_map(dev, prio, tc); } } #endif /* CONFIG_IXGBE_DCB */ static int ixgbe_reassign_macvlan_pool(struct net_device *vdev, struct netdev_nested_priv *priv) { struct ixgbe_adapter *adapter = (struct ixgbe_adapter *)priv->data; struct ixgbe_fwd_adapter *accel; int pool; /* we only care about macvlans... */ if (!netif_is_macvlan(vdev)) return 0; /* that have hardware offload enabled... */ accel = macvlan_accel_priv(vdev); if (!accel) return 0; /* If we can relocate to a different bit do so */ pool = find_first_zero_bit(adapter->fwd_bitmask, adapter->num_rx_pools); if (pool < adapter->num_rx_pools) { set_bit(pool, adapter->fwd_bitmask); accel->pool = pool; return 0; } /* if we cannot find a free pool then disable the offload */ netdev_err(vdev, "L2FW offload disabled due to lack of queue resources\n"); macvlan_release_l2fw_offload(vdev); /* unbind the queues and drop the subordinate channel config */ netdev_unbind_sb_channel(adapter->netdev, vdev); netdev_set_sb_channel(vdev, 0); kfree(accel); return 0; } static void ixgbe_defrag_macvlan_pools(struct net_device *dev) { struct ixgbe_adapter *adapter = netdev_priv(dev); struct netdev_nested_priv priv = { .data = (void *)adapter, }; /* flush any stale bits out of the fwd bitmask */ bitmap_clear(adapter->fwd_bitmask, 1, 63); /* walk through upper devices reassigning pools */ netdev_walk_all_upper_dev_rcu(dev, ixgbe_reassign_macvlan_pool, &priv); } /** * ixgbe_setup_tc - configure net_device for multiple traffic classes * * @dev: net device to configure * @tc: number of traffic classes to enable */ int ixgbe_setup_tc(struct net_device *dev, u8 tc) { struct ixgbe_adapter *adapter = netdev_priv(dev); struct ixgbe_hw *hw = &adapter->hw; /* Hardware supports up to 8 traffic classes */ if (tc > adapter->dcb_cfg.num_tcs.pg_tcs) return -EINVAL; if (hw->mac.type == ixgbe_mac_82598EB && tc && tc < MAX_TRAFFIC_CLASS) return -EINVAL; /* Hardware has to reinitialize queues and interrupts to * match packet buffer alignment. Unfortunately, the * hardware is not flexible enough to do this dynamically. */ if (netif_running(dev)) ixgbe_close(dev); else ixgbe_reset(adapter); ixgbe_clear_interrupt_scheme(adapter); #ifdef CONFIG_IXGBE_DCB if (tc) { if (adapter->xdp_prog) { e_warn(probe, "DCB is not supported with XDP\n"); ixgbe_init_interrupt_scheme(adapter); if (netif_running(dev)) ixgbe_open(dev); return -EINVAL; } netdev_set_num_tc(dev, tc); ixgbe_set_prio_tc_map(adapter); adapter->hw_tcs = tc; adapter->flags |= IXGBE_FLAG_DCB_ENABLED; if (adapter->hw.mac.type == ixgbe_mac_82598EB) { adapter->last_lfc_mode = adapter->hw.fc.requested_mode; adapter->hw.fc.requested_mode = ixgbe_fc_none; } } else { netdev_reset_tc(dev); if (adapter->hw.mac.type == ixgbe_mac_82598EB) adapter->hw.fc.requested_mode = adapter->last_lfc_mode; adapter->flags &= ~IXGBE_FLAG_DCB_ENABLED; adapter->hw_tcs = tc; adapter->temp_dcb_cfg.pfc_mode_enable = false; adapter->dcb_cfg.pfc_mode_enable = false; } ixgbe_validate_rtr(adapter, tc); #endif /* CONFIG_IXGBE_DCB */ ixgbe_init_interrupt_scheme(adapter); ixgbe_defrag_macvlan_pools(dev); if (netif_running(dev)) return ixgbe_open(dev); return 0; } static int ixgbe_delete_clsu32(struct ixgbe_adapter *adapter, struct tc_cls_u32_offload *cls) { u32 hdl = cls->knode.handle; u32 uhtid = TC_U32_USERHTID(cls->knode.handle); u32 loc = cls->knode.handle & 0xfffff; int err = 0, i, j; struct ixgbe_jump_table *jump = NULL; if (loc > IXGBE_MAX_HW_ENTRIES) return -EINVAL; if ((uhtid != 0x800) && (uhtid >= IXGBE_MAX_LINK_HANDLE)) return -EINVAL; /* Clear this filter in the link data it is associated with */ if (uhtid != 0x800) { jump = adapter->jump_tables[uhtid]; if (!jump) return -EINVAL; if (!test_bit(loc - 1, jump->child_loc_map)) return -EINVAL; clear_bit(loc - 1, jump->child_loc_map); } /* Check if the filter being deleted is a link */ for (i = 1; i < IXGBE_MAX_LINK_HANDLE; i++) { jump = adapter->jump_tables[i]; if (jump && jump->link_hdl == hdl) { /* Delete filters in the hardware in the child hash * table associated with this link */ for (j = 0; j < IXGBE_MAX_HW_ENTRIES; j++) { if (!test_bit(j, jump->child_loc_map)) continue; spin_lock(&adapter->fdir_perfect_lock); err = ixgbe_update_ethtool_fdir_entry(adapter, NULL, j + 1); spin_unlock(&adapter->fdir_perfect_lock); clear_bit(j, jump->child_loc_map); } /* Remove resources for this link */ kfree(jump->input); kfree(jump->mask); kfree(jump); adapter->jump_tables[i] = NULL; return err; } } spin_lock(&adapter->fdir_perfect_lock); err = ixgbe_update_ethtool_fdir_entry(adapter, NULL, loc); spin_unlock(&adapter->fdir_perfect_lock); return err; } static int ixgbe_configure_clsu32_add_hnode(struct ixgbe_adapter *adapter, struct tc_cls_u32_offload *cls) { u32 uhtid = TC_U32_USERHTID(cls->hnode.handle); if (uhtid >= IXGBE_MAX_LINK_HANDLE) return -EINVAL; /* This ixgbe devices do not support hash tables at the moment * so abort when given hash tables. */ if (cls->hnode.divisor > 0) return -EINVAL; set_bit(uhtid - 1, &adapter->tables); return 0; } static int ixgbe_configure_clsu32_del_hnode(struct ixgbe_adapter *adapter, struct tc_cls_u32_offload *cls) { u32 uhtid = TC_U32_USERHTID(cls->hnode.handle); if (uhtid >= IXGBE_MAX_LINK_HANDLE) return -EINVAL; clear_bit(uhtid - 1, &adapter->tables); return 0; } #ifdef CONFIG_NET_CLS_ACT struct upper_walk_data { struct ixgbe_adapter *adapter; u64 action; int ifindex; u8 queue; }; static int get_macvlan_queue(struct net_device *upper, struct netdev_nested_priv *priv) { if (netif_is_macvlan(upper)) { struct ixgbe_fwd_adapter *vadapter = macvlan_accel_priv(upper); struct ixgbe_adapter *adapter; struct upper_walk_data *data; int ifindex; data = (struct upper_walk_data *)priv->data; ifindex = data->ifindex; adapter = data->adapter; if (vadapter && upper->ifindex == ifindex) { data->queue = adapter->rx_ring[vadapter->rx_base_queue]->reg_idx; data->action = data->queue; return 1; } } return 0; } static int handle_redirect_action(struct ixgbe_adapter *adapter, int ifindex, u8 *queue, u64 *action) { struct ixgbe_ring_feature *vmdq = &adapter->ring_feature[RING_F_VMDQ]; unsigned int num_vfs = adapter->num_vfs, vf; struct netdev_nested_priv priv; struct upper_walk_data data; struct net_device *upper; /* redirect to a SRIOV VF */ for (vf = 0; vf < num_vfs; ++vf) { upper = pci_get_drvdata(adapter->vfinfo[vf].vfdev); if (upper->ifindex == ifindex) { *queue = vf * __ALIGN_MASK(1, ~vmdq->mask); *action = vf + 1; *action <<= ETHTOOL_RX_FLOW_SPEC_RING_VF_OFF; return 0; } } /* redirect to a offloaded macvlan netdev */ data.adapter = adapter; data.ifindex = ifindex; data.action = 0; data.queue = 0; priv.data = (void *)&data; if (netdev_walk_all_upper_dev_rcu(adapter->netdev, get_macvlan_queue, &priv)) { *action = data.action; *queue = data.queue; return 0; } return -EINVAL; } static int parse_tc_actions(struct ixgbe_adapter *adapter, struct tcf_exts *exts, u64 *action, u8 *queue) { const struct tc_action *a; int i; if (!tcf_exts_has_actions(exts)) return -EINVAL; tcf_exts_for_each_action(i, a, exts) { /* Drop action */ if (is_tcf_gact_shot(a)) { *action = IXGBE_FDIR_DROP_QUEUE; *queue = IXGBE_FDIR_DROP_QUEUE; return 0; } /* Redirect to a VF or a offloaded macvlan */ if (is_tcf_mirred_egress_redirect(a)) { struct net_device *dev = tcf_mirred_dev(a); if (!dev) return -EINVAL; return handle_redirect_action(adapter, dev->ifindex, queue, action); } return -EINVAL; } return -EINVAL; } #else static int parse_tc_actions(struct ixgbe_adapter *adapter, struct tcf_exts *exts, u64 *action, u8 *queue) { return -EINVAL; } #endif /* CONFIG_NET_CLS_ACT */ static int ixgbe_clsu32_build_input(struct ixgbe_fdir_filter *input, union ixgbe_atr_input *mask, struct tc_cls_u32_offload *cls, struct ixgbe_mat_field *field_ptr, struct ixgbe_nexthdr *nexthdr) { int i, j, off; __be32 val, m; bool found_entry = false, found_jump_field = false; for (i = 0; i < cls->knode.sel->nkeys; i++) { off = cls->knode.sel->keys[i].off; val = cls->knode.sel->keys[i].val; m = cls->knode.sel->keys[i].mask; for (j = 0; field_ptr[j].val; j++) { if (field_ptr[j].off == off) { field_ptr[j].val(input, mask, (__force u32)val, (__force u32)m); input->filter.formatted.flow_type |= field_ptr[j].type; found_entry = true; break; } } if (nexthdr) { if (nexthdr->off == cls->knode.sel->keys[i].off && nexthdr->val == (__force u32)cls->knode.sel->keys[i].val && nexthdr->mask == (__force u32)cls->knode.sel->keys[i].mask) found_jump_field = true; else continue; } } if (nexthdr && !found_jump_field) return -EINVAL; if (!found_entry) return 0; mask->formatted.flow_type = IXGBE_ATR_L4TYPE_IPV6_MASK | IXGBE_ATR_L4TYPE_MASK; if (input->filter.formatted.flow_type == IXGBE_ATR_FLOW_TYPE_IPV4) mask->formatted.flow_type &= IXGBE_ATR_L4TYPE_IPV6_MASK; return 0; } static int ixgbe_configure_clsu32(struct ixgbe_adapter *adapter, struct tc_cls_u32_offload *cls) { __be16 protocol = cls->common.protocol; u32 loc = cls->knode.handle & 0xfffff; struct ixgbe_hw *hw = &adapter->hw; struct ixgbe_mat_field *field_ptr; struct ixgbe_fdir_filter *input = NULL; union ixgbe_atr_input *mask = NULL; struct ixgbe_jump_table *jump = NULL; int i, err = -EINVAL; u8 queue; u32 uhtid, link_uhtid; uhtid = TC_U32_USERHTID(cls->knode.handle); link_uhtid = TC_U32_USERHTID(cls->knode.link_handle); /* At the moment cls_u32 jumps to network layer and skips past * L2 headers. The canonical method to match L2 frames is to use * negative values. However this is error prone at best but really * just broken because there is no way to "know" what sort of hdr * is in front of the network layer. Fix cls_u32 to support L2 * headers when needed. */ if (protocol != htons(ETH_P_IP)) return err; if (loc >= ((1024 << adapter->fdir_pballoc) - 2)) { e_err(drv, "Location out of range\n"); return err; } /* cls u32 is a graph starting at root node 0x800. The driver tracks * links and also the fields used to advance the parser across each * link (e.g. nexthdr/eat parameters from 'tc'). This way we can map * the u32 graph onto the hardware parse graph denoted in ixgbe_model.h * To add support for new nodes update ixgbe_model.h parse structures * this function _should_ be generic try not to hardcode values here. */ if (uhtid == 0x800) { field_ptr = (adapter->jump_tables[0])->mat; } else { if (uhtid >= IXGBE_MAX_LINK_HANDLE) return err; if (!adapter->jump_tables[uhtid]) return err; field_ptr = (adapter->jump_tables[uhtid])->mat; } if (!field_ptr) return err; /* At this point we know the field_ptr is valid and need to either * build cls_u32 link or attach filter. Because adding a link to * a handle that does not exist is invalid and the same for adding * rules to handles that don't exist. */ if (link_uhtid) { struct ixgbe_nexthdr *nexthdr = ixgbe_ipv4_jumps; if (link_uhtid >= IXGBE_MAX_LINK_HANDLE) return err; if (!test_bit(link_uhtid - 1, &adapter->tables)) return err; /* Multiple filters as links to the same hash table are not * supported. To add a new filter with the same next header * but different match/jump conditions, create a new hash table * and link to it. */ if (adapter->jump_tables[link_uhtid] && (adapter->jump_tables[link_uhtid])->link_hdl) { e_err(drv, "Link filter exists for link: %x\n", link_uhtid); return err; } for (i = 0; nexthdr[i].jump; i++) { if (nexthdr[i].o != cls->knode.sel->offoff || nexthdr[i].s != cls->knode.sel->offshift || nexthdr[i].m != (__force u32)cls->knode.sel->offmask) return err; jump = kzalloc(sizeof(*jump), GFP_KERNEL); if (!jump) return -ENOMEM; input = kzalloc(sizeof(*input), GFP_KERNEL); if (!input) { err = -ENOMEM; goto free_jump; } mask = kzalloc(sizeof(*mask), GFP_KERNEL); if (!mask) { err = -ENOMEM; goto free_input; } jump->input = input; jump->mask = mask; jump->link_hdl = cls->knode.handle; err = ixgbe_clsu32_build_input(input, mask, cls, field_ptr, &nexthdr[i]); if (!err) { jump->mat = nexthdr[i].jump; adapter->jump_tables[link_uhtid] = jump; break; } else { kfree(mask); kfree(input); kfree(jump); } } return 0; } input = kzalloc(sizeof(*input), GFP_KERNEL); if (!input) return -ENOMEM; mask = kzalloc(sizeof(*mask), GFP_KERNEL); if (!mask) { err = -ENOMEM; goto free_input; } if ((uhtid != 0x800) && (adapter->jump_tables[uhtid])) { if ((adapter->jump_tables[uhtid])->input) memcpy(input, (adapter->jump_tables[uhtid])->input, sizeof(*input)); if ((adapter->jump_tables[uhtid])->mask) memcpy(mask, (adapter->jump_tables[uhtid])->mask, sizeof(*mask)); /* Lookup in all child hash tables if this location is already * filled with a filter */ for (i = 1; i < IXGBE_MAX_LINK_HANDLE; i++) { struct ixgbe_jump_table *link = adapter->jump_tables[i]; if (link && (test_bit(loc - 1, link->child_loc_map))) { e_err(drv, "Filter exists in location: %x\n", loc); err = -EINVAL; goto err_out; } } } err = ixgbe_clsu32_build_input(input, mask, cls, field_ptr, NULL); if (err) goto err_out; err = parse_tc_actions(adapter, cls->knode.exts, &input->action, &queue); if (err < 0) goto err_out; input->sw_idx = loc; spin_lock(&adapter->fdir_perfect_lock); if (hlist_empty(&adapter->fdir_filter_list)) { memcpy(&adapter->fdir_mask, mask, sizeof(*mask)); err = ixgbe_fdir_set_input_mask_82599(hw, mask); if (err) goto err_out_w_lock; } else if (memcmp(&adapter->fdir_mask, mask, sizeof(*mask))) { err = -EINVAL; goto err_out_w_lock; } ixgbe_atr_compute_perfect_hash_82599(&input->filter, mask); err = ixgbe_fdir_write_perfect_filter_82599(hw, &input->filter, input->sw_idx, queue); if (err) goto err_out_w_lock; ixgbe_update_ethtool_fdir_entry(adapter, input, input->sw_idx); spin_unlock(&adapter->fdir_perfect_lock); if ((uhtid != 0x800) && (adapter->jump_tables[uhtid])) set_bit(loc - 1, (adapter->jump_tables[uhtid])->child_loc_map); kfree(mask); return err; err_out_w_lock: spin_unlock(&adapter->fdir_perfect_lock); err_out: kfree(mask); free_input: kfree(input); free_jump: kfree(jump); return err; } static int ixgbe_setup_tc_cls_u32(struct ixgbe_adapter *adapter, struct tc_cls_u32_offload *cls_u32) { switch (cls_u32->command) { case TC_CLSU32_NEW_KNODE: case TC_CLSU32_REPLACE_KNODE: return ixgbe_configure_clsu32(adapter, cls_u32); case TC_CLSU32_DELETE_KNODE: return ixgbe_delete_clsu32(adapter, cls_u32); case TC_CLSU32_NEW_HNODE: case TC_CLSU32_REPLACE_HNODE: return ixgbe_configure_clsu32_add_hnode(adapter, cls_u32); case TC_CLSU32_DELETE_HNODE: return ixgbe_configure_clsu32_del_hnode(adapter, cls_u32); default: return -EOPNOTSUPP; } } static int ixgbe_setup_tc_block_cb(enum tc_setup_type type, void *type_data, void *cb_priv) { struct ixgbe_adapter *adapter = cb_priv; if (!tc_cls_can_offload_and_chain0(adapter->netdev, type_data)) return -EOPNOTSUPP; switch (type) { case TC_SETUP_CLSU32: return ixgbe_setup_tc_cls_u32(adapter, type_data); default: return -EOPNOTSUPP; } } static int ixgbe_setup_tc_mqprio(struct net_device *dev, struct tc_mqprio_qopt *mqprio) { mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS; return ixgbe_setup_tc(dev, mqprio->num_tc); } static LIST_HEAD(ixgbe_block_cb_list); static int __ixgbe_setup_tc(struct net_device *dev, enum tc_setup_type type, void *type_data) { struct ixgbe_adapter *adapter = netdev_priv(dev); switch (type) { case TC_SETUP_BLOCK: return flow_block_cb_setup_simple(type_data, &ixgbe_block_cb_list, ixgbe_setup_tc_block_cb, adapter, adapter, true); case TC_SETUP_QDISC_MQPRIO: return ixgbe_setup_tc_mqprio(dev, type_data); default: return -EOPNOTSUPP; } } #ifdef CONFIG_PCI_IOV void ixgbe_sriov_reinit(struct ixgbe_adapter *adapter) { struct net_device *netdev = adapter->netdev; rtnl_lock(); ixgbe_setup_tc(netdev, adapter->hw_tcs); rtnl_unlock(); } #endif void ixgbe_do_reset(struct net_device *netdev) { struct ixgbe_adapter *adapter = netdev_priv(netdev); if (netif_running(netdev)) ixgbe_reinit_locked(adapter); else ixgbe_reset(adapter); } static netdev_features_t ixgbe_fix_features(struct net_device *netdev, netdev_features_t features) { struct ixgbe_adapter *adapter = netdev_priv(netdev); /* If Rx checksum is disabled, then RSC/LRO should also be disabled */ if (!(features & NETIF_F_RXCSUM)) features &= ~NETIF_F_LRO; /* Turn off LRO if not RSC capable */ if (!(adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE)) features &= ~NETIF_F_LRO; if (adapter->xdp_prog && (features & NETIF_F_LRO)) { e_dev_err("LRO is not supported with XDP\n"); features &= ~NETIF_F_LRO; } return features; } static void ixgbe_reset_l2fw_offload(struct ixgbe_adapter *adapter) { int rss = min_t(int, ixgbe_max_rss_indices(adapter), num_online_cpus()); /* go back to full RSS if we're not running SR-IOV */ if (!adapter->ring_feature[RING_F_VMDQ].offset) adapter->flags &= ~(IXGBE_FLAG_VMDQ_ENABLED | IXGBE_FLAG_SRIOV_ENABLED); adapter->ring_feature[RING_F_RSS].limit = rss; adapter->ring_feature[RING_F_VMDQ].limit = 1; ixgbe_setup_tc(adapter->netdev, adapter->hw_tcs); } static int ixgbe_set_features(struct net_device *netdev, netdev_features_t features) { struct ixgbe_adapter *adapter = netdev_priv(netdev); netdev_features_t changed = netdev->features ^ features; bool need_reset = false; /* Make sure RSC matches LRO, reset if change */ if (!(features & NETIF_F_LRO)) { if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) need_reset = true; adapter->flags2 &= ~IXGBE_FLAG2_RSC_ENABLED; } else if ((adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE) && !(adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED)) { if (adapter->rx_itr_setting == 1 || adapter->rx_itr_setting > IXGBE_MIN_RSC_ITR) { adapter->flags2 |= IXGBE_FLAG2_RSC_ENABLED; need_reset = true; } else if ((changed ^ features) & NETIF_F_LRO) { e_info(probe, "rx-usecs set too low, " "disabling RSC\n"); } } /* * Check if Flow Director n-tuple support or hw_tc support was * enabled or disabled. If the state changed, we need to reset. */ if ((features & NETIF_F_NTUPLE) || (features & NETIF_F_HW_TC)) { /* turn off ATR, enable perfect filters and reset */ if (!(adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)) need_reset = true; adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE; adapter->flags |= IXGBE_FLAG_FDIR_PERFECT_CAPABLE; } else { /* turn off perfect filters, enable ATR and reset */ if (adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE) need_reset = true; adapter->flags &= ~IXGBE_FLAG_FDIR_PERFECT_CAPABLE; /* We cannot enable ATR if SR-IOV is enabled */ if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED || /* We cannot enable ATR if we have 2 or more tcs */ (adapter->hw_tcs > 1) || /* We cannot enable ATR if RSS is disabled */ (adapter->ring_feature[RING_F_RSS].limit <= 1) || /* A sample rate of 0 indicates ATR disabled */ (!adapter->atr_sample_rate)) ; /* do nothing not supported */ else /* otherwise supported and set the flag */ adapter->flags |= IXGBE_FLAG_FDIR_HASH_CAPABLE; } if (changed & NETIF_F_RXALL) need_reset = true; netdev->features = features; if ((changed & NETIF_F_HW_L2FW_DOFFLOAD) && adapter->num_rx_pools > 1) ixgbe_reset_l2fw_offload(adapter); else if (need_reset) ixgbe_do_reset(netdev); else if (changed & (NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_FILTER)) ixgbe_set_rx_mode(netdev); return 1; } static int ixgbe_ndo_fdb_add(struct ndmsg *ndm, struct nlattr *tb[], struct net_device *dev, const unsigned char *addr, u16 vid, u16 flags, struct netlink_ext_ack *extack) { /* guarantee we can provide a unique filter for the unicast address */ if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr)) { struct ixgbe_adapter *adapter = netdev_priv(dev); u16 pool = VMDQ_P(0); if (netdev_uc_count(dev) >= ixgbe_available_rars(adapter, pool)) return -ENOMEM; } return ndo_dflt_fdb_add(ndm, tb, dev, addr, vid, flags); } /** * ixgbe_configure_bridge_mode - set various bridge modes * @adapter: the private structure * @mode: requested bridge mode * * Configure some settings require for various bridge modes. **/ static int ixgbe_configure_bridge_mode(struct ixgbe_adapter *adapter, __u16 mode) { struct ixgbe_hw *hw = &adapter->hw; unsigned int p, num_pools; u32 vmdctl; switch (mode) { case BRIDGE_MODE_VEPA: /* disable Tx loopback, rely on switch hairpin mode */ IXGBE_WRITE_REG(&adapter->hw, IXGBE_PFDTXGSWC, 0); /* must enable Rx switching replication to allow multicast * packet reception on all VFs, and to enable source address * pruning. */ vmdctl = IXGBE_READ_REG(hw, IXGBE_VMD_CTL); vmdctl |= IXGBE_VT_CTL_REPLEN; IXGBE_WRITE_REG(hw, IXGBE_VMD_CTL, vmdctl); /* enable Rx source address pruning. Note, this requires * replication to be enabled or else it does nothing. */ num_pools = adapter->num_vfs + adapter->num_rx_pools; for (p = 0; p < num_pools; p++) { if (hw->mac.ops.set_source_address_pruning) hw->mac.ops.set_source_address_pruning(hw, true, p); } break; case BRIDGE_MODE_VEB: /* enable Tx loopback for internal VF/PF communication */ IXGBE_WRITE_REG(&adapter->hw, IXGBE_PFDTXGSWC, IXGBE_PFDTXGSWC_VT_LBEN); /* disable Rx switching replication unless we have SR-IOV * virtual functions */ vmdctl = IXGBE_READ_REG(hw, IXGBE_VMD_CTL); if (!adapter->num_vfs) vmdctl &= ~IXGBE_VT_CTL_REPLEN; IXGBE_WRITE_REG(hw, IXGBE_VMD_CTL, vmdctl); /* disable Rx source address pruning, since we don't expect to * be receiving external loopback of our transmitted frames. */ num_pools = adapter->num_vfs + adapter->num_rx_pools; for (p = 0; p < num_pools; p++) { if (hw->mac.ops.set_source_address_pruning) hw->mac.ops.set_source_address_pruning(hw, false, p); } break; default: return -EINVAL; } adapter->bridge_mode = mode; e_info(drv, "enabling bridge mode: %s\n", mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB"); return 0; } static int ixgbe_ndo_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh, u16 flags, struct netlink_ext_ack *extack) { struct ixgbe_adapter *adapter = netdev_priv(dev); struct nlattr *attr, *br_spec; int rem; if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)) return -EOPNOTSUPP; br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC); if (!br_spec) return -EINVAL; nla_for_each_nested(attr, br_spec, rem) { int status; __u16 mode; if (nla_type(attr) != IFLA_BRIDGE_MODE) continue; mode = nla_get_u16(attr); status = ixgbe_configure_bridge_mode(adapter, mode); if (status) return status; break; } return 0; } static int ixgbe_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq, struct net_device *dev, u32 filter_mask, int nlflags) { struct ixgbe_adapter *adapter = netdev_priv(dev); if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)) return 0; return ndo_dflt_bridge_getlink(skb, pid, seq, dev, adapter->bridge_mode, 0, 0, nlflags, filter_mask, NULL); } static void *ixgbe_fwd_add(struct net_device *pdev, struct net_device *vdev) { struct ixgbe_adapter *adapter = netdev_priv(pdev); struct ixgbe_fwd_adapter *accel; int tcs = adapter->hw_tcs ? : 1; int pool, err; if (adapter->xdp_prog) { e_warn(probe, "L2FW offload is not supported with XDP\n"); return ERR_PTR(-EINVAL); } /* The hardware supported by ixgbe only filters on the destination MAC * address. In order to avoid issues we only support offloading modes * where the hardware can actually provide the functionality. */ if (!macvlan_supports_dest_filter(vdev)) return ERR_PTR(-EMEDIUMTYPE); /* We need to lock down the macvlan to be a single queue device so that * we can reuse the tc_to_txq field in the macvlan netdev to represent * the queue mapping to our netdev. */ if (netif_is_multiqueue(vdev)) return ERR_PTR(-ERANGE); pool = find_first_zero_bit(adapter->fwd_bitmask, adapter->num_rx_pools); if (pool == adapter->num_rx_pools) { u16 used_pools = adapter->num_vfs + adapter->num_rx_pools; u16 reserved_pools; if (((adapter->flags & IXGBE_FLAG_DCB_ENABLED) && adapter->num_rx_pools >= (MAX_TX_QUEUES / tcs)) || adapter->num_rx_pools > IXGBE_MAX_MACVLANS) return ERR_PTR(-EBUSY); /* Hardware has a limited number of available pools. Each VF, * and the PF require a pool. Check to ensure we don't * attempt to use more then the available number of pools. */ if (used_pools >= IXGBE_MAX_VF_FUNCTIONS) return ERR_PTR(-EBUSY); /* Enable VMDq flag so device will be set in VM mode */ adapter->flags |= IXGBE_FLAG_VMDQ_ENABLED | IXGBE_FLAG_SRIOV_ENABLED; /* Try to reserve as many queues per pool as possible, * we start with the configurations that support 4 queues * per pools, followed by 2, and then by just 1 per pool. */ if (used_pools < 32 && adapter->num_rx_pools < 16) reserved_pools = min_t(u16, 32 - used_pools, 16 - adapter->num_rx_pools); else if (adapter->num_rx_pools < 32) reserved_pools = min_t(u16, 64 - used_pools, 32 - adapter->num_rx_pools); else reserved_pools = 64 - used_pools; if (!reserved_pools) return ERR_PTR(-EBUSY); adapter->ring_feature[RING_F_VMDQ].limit += reserved_pools; /* Force reinit of ring allocation with VMDQ enabled */ err = ixgbe_setup_tc(pdev, adapter->hw_tcs); if (err) return ERR_PTR(err); if (pool >= adapter->num_rx_pools) return ERR_PTR(-ENOMEM); } accel = kzalloc(sizeof(*accel), GFP_KERNEL); if (!accel) return ERR_PTR(-ENOMEM); set_bit(pool, adapter->fwd_bitmask); netdev_set_sb_channel(vdev, pool); accel->pool = pool; accel->netdev = vdev; if (!netif_running(pdev)) return accel; err = ixgbe_fwd_ring_up(adapter, accel); if (err) return ERR_PTR(err); return accel; } static void ixgbe_fwd_del(struct net_device *pdev, void *priv) { struct ixgbe_fwd_adapter *accel = priv; struct ixgbe_adapter *adapter = netdev_priv(pdev); unsigned int rxbase = accel->rx_base_queue; unsigned int i; /* delete unicast filter associated with offloaded interface */ ixgbe_del_mac_filter(adapter, accel->netdev->dev_addr, VMDQ_P(accel->pool)); /* Allow remaining Rx packets to get flushed out of the * Rx FIFO before we drop the netdev for the ring. */ usleep_range(10000, 20000); for (i = 0; i < adapter->num_rx_queues_per_pool; i++) { struct ixgbe_ring *ring = adapter->rx_ring[rxbase + i]; struct ixgbe_q_vector *qv = ring->q_vector; /* Make sure we aren't processing any packets and clear * netdev to shut down the ring. */ if (netif_running(adapter->netdev)) napi_synchronize(&qv->napi); ring->netdev = NULL; } /* unbind the queues and drop the subordinate channel config */ netdev_unbind_sb_channel(pdev, accel->netdev); netdev_set_sb_channel(accel->netdev, 0); clear_bit(accel->pool, adapter->fwd_bitmask); kfree(accel); } #define IXGBE_MAX_MAC_HDR_LEN 127 #define IXGBE_MAX_NETWORK_HDR_LEN 511 static netdev_features_t ixgbe_features_check(struct sk_buff *skb, struct net_device *dev, netdev_features_t features) { unsigned int network_hdr_len, mac_hdr_len; /* Make certain the headers can be described by a context descriptor */ mac_hdr_len = skb_network_header(skb) - skb->data; if (unlikely(mac_hdr_len > IXGBE_MAX_MAC_HDR_LEN)) return features & ~(NETIF_F_HW_CSUM | NETIF_F_SCTP_CRC | NETIF_F_GSO_UDP_L4 | NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_TSO | NETIF_F_TSO6); network_hdr_len = skb_checksum_start(skb) - skb_network_header(skb); if (unlikely(network_hdr_len > IXGBE_MAX_NETWORK_HDR_LEN)) return features & ~(NETIF_F_HW_CSUM | NETIF_F_SCTP_CRC | NETIF_F_GSO_UDP_L4 | NETIF_F_TSO | NETIF_F_TSO6); /* We can only support IPV4 TSO in tunnels if we can mangle the * inner IP ID field, so strip TSO if MANGLEID is not supported. * IPsec offoad sets skb->encapsulation but still can handle * the TSO, so it's the exception. */ if (skb->encapsulation && !(features & NETIF_F_TSO_MANGLEID)) { #ifdef CONFIG_IXGBE_IPSEC if (!secpath_exists(skb)) #endif features &= ~NETIF_F_TSO; } return features; } static int ixgbe_xdp_setup(struct net_device *dev, struct bpf_prog *prog) { int i, frame_size = dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN; struct ixgbe_adapter *adapter = netdev_priv(dev); struct bpf_prog *old_prog; bool need_reset; int num_queues; if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) return -EINVAL; if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) return -EINVAL; /* verify ixgbe ring attributes are sufficient for XDP */ for (i = 0; i < adapter->num_rx_queues; i++) { struct ixgbe_ring *ring = adapter->rx_ring[i]; if (ring_is_rsc_enabled(ring)) return -EINVAL; if (frame_size > ixgbe_rx_bufsz(ring)) return -EINVAL; } /* if the number of cpus is much larger than the maximum of queues, * we should stop it and then return with ENOMEM like before. */ if (nr_cpu_ids > IXGBE_MAX_XDP_QS * 2) return -ENOMEM; old_prog = xchg(&adapter->xdp_prog, prog); need_reset = (!!prog != !!old_prog); /* If transitioning XDP modes reconfigure rings */ if (need_reset) { int err; if (!prog) /* Wait until ndo_xsk_wakeup completes. */ synchronize_rcu(); err = ixgbe_setup_tc(dev, adapter->hw_tcs); if (err) return -EINVAL; if (!prog) xdp_features_clear_redirect_target(dev); } else { for (i = 0; i < adapter->num_rx_queues; i++) { WRITE_ONCE(adapter->rx_ring[i]->xdp_prog, adapter->xdp_prog); } } if (old_prog) bpf_prog_put(old_prog); /* Kick start the NAPI context if there is an AF_XDP socket open * on that queue id. This so that receiving will start. */ if (need_reset && prog) { num_queues = min_t(int, adapter->num_rx_queues, adapter->num_xdp_queues); for (i = 0; i < num_queues; i++) if (adapter->xdp_ring[i]->xsk_pool) (void)ixgbe_xsk_wakeup(adapter->netdev, i, XDP_WAKEUP_RX); xdp_features_set_redirect_target(dev, true); } return 0; } static int ixgbe_xdp(struct net_device *dev, struct netdev_bpf *xdp) { struct ixgbe_adapter *adapter = netdev_priv(dev); switch (xdp->command) { case XDP_SETUP_PROG: return ixgbe_xdp_setup(dev, xdp->prog); case XDP_SETUP_XSK_POOL: return ixgbe_xsk_pool_setup(adapter, xdp->xsk.pool, xdp->xsk.queue_id); default: return -EINVAL; } } void ixgbe_xdp_ring_update_tail(struct ixgbe_ring *ring) { /* Force memory writes to complete before letting h/w know there * are new descriptors to fetch. */ wmb(); writel(ring->next_to_use, ring->tail); } void ixgbe_xdp_ring_update_tail_locked(struct ixgbe_ring *ring) { if (static_branch_unlikely(&ixgbe_xdp_locking_key)) spin_lock(&ring->tx_lock); ixgbe_xdp_ring_update_tail(ring); if (static_branch_unlikely(&ixgbe_xdp_locking_key)) spin_unlock(&ring->tx_lock); } static int ixgbe_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames, u32 flags) { struct ixgbe_adapter *adapter = netdev_priv(dev); struct ixgbe_ring *ring; int nxmit = 0; int i; if (unlikely(test_bit(__IXGBE_DOWN, &adapter->state))) return -ENETDOWN; if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK)) return -EINVAL; /* During program transitions its possible adapter->xdp_prog is assigned * but ring has not been configured yet. In this case simply abort xmit. */ ring = adapter->xdp_prog ? ixgbe_determine_xdp_ring(adapter) : NULL; if (unlikely(!ring)) return -ENXIO; if (unlikely(test_bit(__IXGBE_TX_DISABLED, &ring->state))) return -ENXIO; if (static_branch_unlikely(&ixgbe_xdp_locking_key)) spin_lock(&ring->tx_lock); for (i = 0; i < n; i++) { struct xdp_frame *xdpf = frames[i]; int err; err = ixgbe_xmit_xdp_ring(ring, xdpf); if (err != IXGBE_XDP_TX) break; nxmit++; } if (unlikely(flags & XDP_XMIT_FLUSH)) ixgbe_xdp_ring_update_tail(ring); if (static_branch_unlikely(&ixgbe_xdp_locking_key)) spin_unlock(&ring->tx_lock); return nxmit; } static const struct net_device_ops ixgbe_netdev_ops = { .ndo_open = ixgbe_open, .ndo_stop = ixgbe_close, .ndo_start_xmit = ixgbe_xmit_frame, .ndo_set_rx_mode = ixgbe_set_rx_mode, .ndo_validate_addr = eth_validate_addr, .ndo_set_mac_address = ixgbe_set_mac, .ndo_change_mtu = ixgbe_change_mtu, .ndo_tx_timeout = ixgbe_tx_timeout, .ndo_set_tx_maxrate = ixgbe_tx_maxrate, .ndo_vlan_rx_add_vid = ixgbe_vlan_rx_add_vid, .ndo_vlan_rx_kill_vid = ixgbe_vlan_rx_kill_vid, .ndo_eth_ioctl = ixgbe_ioctl, .ndo_set_vf_mac = ixgbe_ndo_set_vf_mac, .ndo_set_vf_vlan = ixgbe_ndo_set_vf_vlan, .ndo_set_vf_rate = ixgbe_ndo_set_vf_bw, .ndo_set_vf_spoofchk = ixgbe_ndo_set_vf_spoofchk, .ndo_set_vf_link_state = ixgbe_ndo_set_vf_link_state, .ndo_set_vf_rss_query_en = ixgbe_ndo_set_vf_rss_query_en, .ndo_set_vf_trust = ixgbe_ndo_set_vf_trust, .ndo_get_vf_config = ixgbe_ndo_get_vf_config, .ndo_get_vf_stats = ixgbe_ndo_get_vf_stats, .ndo_get_stats64 = ixgbe_get_stats64, .ndo_setup_tc = __ixgbe_setup_tc, #ifdef IXGBE_FCOE .ndo_select_queue = ixgbe_select_queue, .ndo_fcoe_ddp_setup = ixgbe_fcoe_ddp_get, .ndo_fcoe_ddp_target = ixgbe_fcoe_ddp_target, .ndo_fcoe_ddp_done = ixgbe_fcoe_ddp_put, .ndo_fcoe_enable = ixgbe_fcoe_enable, .ndo_fcoe_disable = ixgbe_fcoe_disable, .ndo_fcoe_get_wwn = ixgbe_fcoe_get_wwn, .ndo_fcoe_get_hbainfo = ixgbe_fcoe_get_hbainfo, #endif /* IXGBE_FCOE */ .ndo_set_features = ixgbe_set_features, .ndo_fix_features = ixgbe_fix_features, .ndo_fdb_add = ixgbe_ndo_fdb_add, .ndo_bridge_setlink = ixgbe_ndo_bridge_setlink, .ndo_bridge_getlink = ixgbe_ndo_bridge_getlink, .ndo_dfwd_add_station = ixgbe_fwd_add, .ndo_dfwd_del_station = ixgbe_fwd_del, .ndo_features_check = ixgbe_features_check, .ndo_bpf = ixgbe_xdp, .ndo_xdp_xmit = ixgbe_xdp_xmit, .ndo_xsk_wakeup = ixgbe_xsk_wakeup, }; static void ixgbe_disable_txr_hw(struct ixgbe_adapter *adapter, struct ixgbe_ring *tx_ring) { unsigned long wait_delay, delay_interval; struct ixgbe_hw *hw = &adapter->hw; u8 reg_idx = tx_ring->reg_idx; int wait_loop; u32 txdctl; IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(reg_idx), IXGBE_TXDCTL_SWFLSH); /* delay mechanism from ixgbe_disable_tx */ delay_interval = ixgbe_get_completion_timeout(adapter) / 100; wait_loop = IXGBE_MAX_RX_DESC_POLL; wait_delay = delay_interval; while (wait_loop--) { usleep_range(wait_delay, wait_delay + 10); wait_delay += delay_interval * 2; txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(reg_idx)); if (!(txdctl & IXGBE_TXDCTL_ENABLE)) return; } e_err(drv, "TXDCTL.ENABLE not cleared within the polling period\n"); } static void ixgbe_disable_txr(struct ixgbe_adapter *adapter, struct ixgbe_ring *tx_ring) { set_bit(__IXGBE_TX_DISABLED, &tx_ring->state); ixgbe_disable_txr_hw(adapter, tx_ring); } static void ixgbe_disable_rxr_hw(struct ixgbe_adapter *adapter, struct ixgbe_ring *rx_ring) { unsigned long wait_delay, delay_interval; struct ixgbe_hw *hw = &adapter->hw; u8 reg_idx = rx_ring->reg_idx; int wait_loop; u32 rxdctl; rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(reg_idx)); rxdctl &= ~IXGBE_RXDCTL_ENABLE; rxdctl |= IXGBE_RXDCTL_SWFLSH; /* write value back with RXDCTL.ENABLE bit cleared */ IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(reg_idx), rxdctl); /* RXDCTL.EN may not change on 82598 if link is down, so skip it */ if (hw->mac.type == ixgbe_mac_82598EB && !(IXGBE_READ_REG(hw, IXGBE_LINKS) & IXGBE_LINKS_UP)) return; /* delay mechanism from ixgbe_disable_rx */ delay_interval = ixgbe_get_completion_timeout(adapter) / 100; wait_loop = IXGBE_MAX_RX_DESC_POLL; wait_delay = delay_interval; while (wait_loop--) { usleep_range(wait_delay, wait_delay + 10); wait_delay += delay_interval * 2; rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(reg_idx)); if (!(rxdctl & IXGBE_RXDCTL_ENABLE)) return; } e_err(drv, "RXDCTL.ENABLE not cleared within the polling period\n"); } static void ixgbe_reset_txr_stats(struct ixgbe_ring *tx_ring) { memset(&tx_ring->stats, 0, sizeof(tx_ring->stats)); memset(&tx_ring->tx_stats, 0, sizeof(tx_ring->tx_stats)); } static void ixgbe_reset_rxr_stats(struct ixgbe_ring *rx_ring) { memset(&rx_ring->stats, 0, sizeof(rx_ring->stats)); memset(&rx_ring->rx_stats, 0, sizeof(rx_ring->rx_stats)); } /** * ixgbe_txrx_ring_disable - Disable Rx/Tx/XDP Tx rings * @adapter: adapter structure * @ring: ring index * * This function disables a certain Rx/Tx/XDP Tx ring. The function * assumes that the netdev is running. **/ void ixgbe_txrx_ring_disable(struct ixgbe_adapter *adapter, int ring) { struct ixgbe_ring *rx_ring, *tx_ring, *xdp_ring; rx_ring = adapter->rx_ring[ring]; tx_ring = adapter->tx_ring[ring]; xdp_ring = adapter->xdp_ring[ring]; ixgbe_disable_txr(adapter, tx_ring); if (xdp_ring) ixgbe_disable_txr(adapter, xdp_ring); ixgbe_disable_rxr_hw(adapter, rx_ring); if (xdp_ring) synchronize_rcu(); /* Rx/Tx/XDP Tx share the same napi context. */ napi_disable(&rx_ring->q_vector->napi); ixgbe_clean_tx_ring(tx_ring); if (xdp_ring) ixgbe_clean_tx_ring(xdp_ring); ixgbe_clean_rx_ring(rx_ring); ixgbe_reset_txr_stats(tx_ring); if (xdp_ring) ixgbe_reset_txr_stats(xdp_ring); ixgbe_reset_rxr_stats(rx_ring); } /** * ixgbe_txrx_ring_enable - Enable Rx/Tx/XDP Tx rings * @adapter: adapter structure * @ring: ring index * * This function enables a certain Rx/Tx/XDP Tx ring. The function * assumes that the netdev is running. **/ void ixgbe_txrx_ring_enable(struct ixgbe_adapter *adapter, int ring) { struct ixgbe_ring *rx_ring, *tx_ring, *xdp_ring; rx_ring = adapter->rx_ring[ring]; tx_ring = adapter->tx_ring[ring]; xdp_ring = adapter->xdp_ring[ring]; /* Rx/Tx/XDP Tx share the same napi context. */ napi_enable(&rx_ring->q_vector->napi); ixgbe_configure_tx_ring(adapter, tx_ring); if (xdp_ring) ixgbe_configure_tx_ring(adapter, xdp_ring); ixgbe_configure_rx_ring(adapter, rx_ring); clear_bit(__IXGBE_TX_DISABLED, &tx_ring->state); if (xdp_ring) clear_bit(__IXGBE_TX_DISABLED, &xdp_ring->state); } /** * ixgbe_enumerate_functions - Get the number of ports this device has * @adapter: adapter structure * * This function enumerates the phsyical functions co-located on a single slot, * in order to determine how many ports a device has. This is most useful in * determining the required GT/s of PCIe bandwidth necessary for optimal * performance. **/ static inline int ixgbe_enumerate_functions(struct ixgbe_adapter *adapter) { struct pci_dev *entry, *pdev = adapter->pdev; int physfns = 0; /* Some cards can not use the generic count PCIe functions method, * because they are behind a parent switch, so we hardcode these with * the correct number of functions. */ if (ixgbe_pcie_from_parent(&adapter->hw)) physfns = 4; list_for_each_entry(entry, &adapter->pdev->bus->devices, bus_list) { /* don't count virtual functions */ if (entry->is_virtfn) continue; /* When the devices on the bus don't all match our device ID, * we can't reliably determine the correct number of * functions. This can occur if a function has been direct * attached to a virtual machine using VT-d, for example. In * this case, simply return -1 to indicate this. */ if ((entry->vendor != pdev->vendor) || (entry->device != pdev->device)) return -1; physfns++; } return physfns; } /** * ixgbe_wol_supported - Check whether device supports WoL * @adapter: the adapter private structure * @device_id: the device ID * @subdevice_id: the subsystem device ID * * This function is used by probe and ethtool to determine * which devices have WoL support * **/ bool ixgbe_wol_supported(struct ixgbe_adapter *adapter, u16 device_id, u16 subdevice_id) { struct ixgbe_hw *hw = &adapter->hw; u16 wol_cap = adapter->eeprom_cap & IXGBE_DEVICE_CAPS_WOL_MASK; /* WOL not supported on 82598 */ if (hw->mac.type == ixgbe_mac_82598EB) return false; /* check eeprom to see if WOL is enabled for X540 and newer */ if (hw->mac.type >= ixgbe_mac_X540) { if ((wol_cap == IXGBE_DEVICE_CAPS_WOL_PORT0_1) || ((wol_cap == IXGBE_DEVICE_CAPS_WOL_PORT0) && (hw->bus.func == 0))) return true; } /* WOL is determined based on device IDs for 82599 MACs */ switch (device_id) { case IXGBE_DEV_ID_82599_SFP: /* Only these subdevices could supports WOL */ switch (subdevice_id) { case IXGBE_SUBDEV_ID_82599_560FLR: case IXGBE_SUBDEV_ID_82599_LOM_SNAP6: case IXGBE_SUBDEV_ID_82599_SFP_WOL0: case IXGBE_SUBDEV_ID_82599_SFP_2OCP: /* only support first port */ if (hw->bus.func != 0) break; fallthrough; case IXGBE_SUBDEV_ID_82599_SP_560FLR: case IXGBE_SUBDEV_ID_82599_SFP: case IXGBE_SUBDEV_ID_82599_RNDC: case IXGBE_SUBDEV_ID_82599_ECNA_DP: case IXGBE_SUBDEV_ID_82599_SFP_1OCP: case IXGBE_SUBDEV_ID_82599_SFP_LOM_OEM1: case IXGBE_SUBDEV_ID_82599_SFP_LOM_OEM2: return true; } break; case IXGBE_DEV_ID_82599EN_SFP: /* Only these subdevices support WOL */ switch (subdevice_id) { case IXGBE_SUBDEV_ID_82599EN_SFP_OCP1: return true; } break; case IXGBE_DEV_ID_82599_COMBO_BACKPLANE: /* All except this subdevice support WOL */ if (subdevice_id != IXGBE_SUBDEV_ID_82599_KX4_KR_MEZZ) return true; break; case IXGBE_DEV_ID_82599_KX4: return true; default: break; } return false; } /** * ixgbe_set_fw_version - Set FW version * @adapter: the adapter private structure * * This function is used by probe and ethtool to determine the FW version to * format to display. The FW version is taken from the EEPROM/NVM. */ static void ixgbe_set_fw_version(struct ixgbe_adapter *adapter) { struct ixgbe_hw *hw = &adapter->hw; struct ixgbe_nvm_version nvm_ver; ixgbe_get_oem_prod_version(hw, &nvm_ver); if (nvm_ver.oem_valid) { snprintf(adapter->eeprom_id, sizeof(adapter->eeprom_id), "%x.%x.%x", nvm_ver.oem_major, nvm_ver.oem_minor, nvm_ver.oem_release); return; } ixgbe_get_etk_id(hw, &nvm_ver); ixgbe_get_orom_version(hw, &nvm_ver); if (nvm_ver.or_valid) { snprintf(adapter->eeprom_id, sizeof(adapter->eeprom_id), "0x%08x, %d.%d.%d", nvm_ver.etk_id, nvm_ver.or_major, nvm_ver.or_build, nvm_ver.or_patch); return; } /* Set ETrack ID format */ snprintf(adapter->eeprom_id, sizeof(adapter->eeprom_id), "0x%08x", nvm_ver.etk_id); } /** * ixgbe_probe - Device Initialization Routine * @pdev: PCI device information struct * @ent: entry in ixgbe_pci_tbl * * Returns 0 on success, negative on failure * * ixgbe_probe initializes an adapter identified by a pci_dev structure. * The OS initialization, configuring of the adapter private structure, * and a hardware reset occur. **/ static int ixgbe_probe(struct pci_dev *pdev, const struct pci_device_id *ent) { struct net_device *netdev; struct ixgbe_adapter *adapter = NULL; struct ixgbe_hw *hw; const struct ixgbe_info *ii = ixgbe_info_tbl[ent->driver_data]; unsigned int indices = MAX_TX_QUEUES; u8 part_str[IXGBE_PBANUM_LENGTH]; int i, err, expected_gts; bool disable_dev = false; #ifdef IXGBE_FCOE u16 device_caps; #endif u32 eec; /* Catch broken hardware that put the wrong VF device ID in * the PCIe SR-IOV capability. */ if (pdev->is_virtfn) { WARN(1, KERN_ERR "%s (%hx:%hx) should not be a VF!\n", pci_name(pdev), pdev->vendor, pdev->device); return -EINVAL; } err = pci_enable_device_mem(pdev); if (err) return err; err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); if (err) { dev_err(&pdev->dev, "No usable DMA configuration, aborting\n"); goto err_dma; } err = pci_request_mem_regions(pdev, ixgbe_driver_name); if (err) { dev_err(&pdev->dev, "pci_request_selected_regions failed 0x%x\n", err); goto err_pci_reg; } pci_set_master(pdev); pci_save_state(pdev); if (ii->mac == ixgbe_mac_82598EB) { #ifdef CONFIG_IXGBE_DCB /* 8 TC w/ 4 queues per TC */ indices = 4 * MAX_TRAFFIC_CLASS; #else indices = IXGBE_MAX_RSS_INDICES; #endif } netdev = alloc_etherdev_mq(sizeof(struct ixgbe_adapter), indices); if (!netdev) { err = -ENOMEM; goto err_alloc_etherdev; } SET_NETDEV_DEV(netdev, &pdev->dev); adapter = netdev_priv(netdev); adapter->netdev = netdev; adapter->pdev = pdev; hw = &adapter->hw; hw->back = adapter; adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE); hw->hw_addr = ioremap(pci_resource_start(pdev, 0), pci_resource_len(pdev, 0)); adapter->io_addr = hw->hw_addr; if (!hw->hw_addr) { err = -EIO; goto err_ioremap; } netdev->netdev_ops = &ixgbe_netdev_ops; ixgbe_set_ethtool_ops(netdev); netdev->watchdog_timeo = 5 * HZ; strscpy(netdev->name, pci_name(pdev), sizeof(netdev->name)); /* Setup hw api */ hw->mac.ops = *ii->mac_ops; hw->mac.type = ii->mac; hw->mvals = ii->mvals; if (ii->link_ops) hw->link.ops = *ii->link_ops; /* EEPROM */ hw->eeprom.ops = *ii->eeprom_ops; eec = IXGBE_READ_REG(hw, IXGBE_EEC(hw)); if (ixgbe_removed(hw->hw_addr)) { err = -EIO; goto err_ioremap; } /* If EEPROM is valid (bit 8 = 1), use default otherwise use bit bang */ if (!(eec & BIT(8))) hw->eeprom.ops.read = &ixgbe_read_eeprom_bit_bang_generic; /* PHY */ hw->phy.ops = *ii->phy_ops; hw->phy.sfp_type = ixgbe_sfp_type_unknown; /* ixgbe_identify_phy_generic will set prtad and mmds properly */ hw->phy.mdio.prtad = MDIO_PRTAD_NONE; hw->phy.mdio.mmds = 0; hw->phy.mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22; hw->phy.mdio.dev = netdev; hw->phy.mdio.mdio_read = ixgbe_mdio_read; hw->phy.mdio.mdio_write = ixgbe_mdio_write; /* setup the private structure */ err = ixgbe_sw_init(adapter, ii); if (err) goto err_sw_init; if (adapter->hw.mac.type == ixgbe_mac_82599EB) adapter->flags2 |= IXGBE_FLAG2_AUTO_DISABLE_VF; switch (adapter->hw.mac.type) { case ixgbe_mac_X550: case ixgbe_mac_X550EM_x: netdev->udp_tunnel_nic_info = &ixgbe_udp_tunnels_x550; break; case ixgbe_mac_x550em_a: netdev->udp_tunnel_nic_info = &ixgbe_udp_tunnels_x550em_a; break; default: break; } /* Make sure the SWFW semaphore is in a valid state */ if (hw->mac.ops.init_swfw_sync) hw->mac.ops.init_swfw_sync(hw); /* Make it possible the adapter to be woken up via WOL */ switch (adapter->hw.mac.type) { case ixgbe_mac_82599EB: case ixgbe_mac_X540: case ixgbe_mac_X550: case ixgbe_mac_X550EM_x: case ixgbe_mac_x550em_a: IXGBE_WRITE_REG(&adapter->hw, IXGBE_WUS, ~0); break; default: break; } /* * If there is a fan on this device and it has failed log the * failure. */ if (adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE) { u32 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP); if (esdp & IXGBE_ESDP_SDP1) e_crit(probe, "Fan has stopped, replace the adapter\n"); } if (allow_unsupported_sfp) hw->allow_unsupported_sfp = allow_unsupported_sfp; /* reset_hw fills in the perm_addr as well */ hw->phy.reset_if_overtemp = true; err = hw->mac.ops.reset_hw(hw); hw->phy.reset_if_overtemp = false; ixgbe_set_eee_capable(adapter); if (err == IXGBE_ERR_SFP_NOT_PRESENT) { err = 0; } else if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) { e_dev_err("failed to load because an unsupported SFP+ or QSFP module type was detected.\n"); e_dev_err("Reload the driver after installing a supported module.\n"); goto err_sw_init; } else if (err) { e_dev_err("HW Init failed: %d\n", err); goto err_sw_init; } #ifdef CONFIG_PCI_IOV /* SR-IOV not supported on the 82598 */ if (adapter->hw.mac.type == ixgbe_mac_82598EB) goto skip_sriov; /* Mailbox */ ixgbe_init_mbx_params_pf(hw); hw->mbx.ops = ii->mbx_ops; pci_sriov_set_totalvfs(pdev, IXGBE_MAX_VFS_DRV_LIMIT); ixgbe_enable_sriov(adapter, max_vfs); skip_sriov: #endif netdev->features = NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_RXHASH | NETIF_F_RXCSUM | NETIF_F_HW_CSUM; #define IXGBE_GSO_PARTIAL_FEATURES (NETIF_F_GSO_GRE | \ NETIF_F_GSO_GRE_CSUM | \ NETIF_F_GSO_IPXIP4 | \ NETIF_F_GSO_IPXIP6 | \ NETIF_F_GSO_UDP_TUNNEL | \ NETIF_F_GSO_UDP_TUNNEL_CSUM) netdev->gso_partial_features = IXGBE_GSO_PARTIAL_FEATURES; netdev->features |= NETIF_F_GSO_PARTIAL | IXGBE_GSO_PARTIAL_FEATURES; if (hw->mac.type >= ixgbe_mac_82599EB) netdev->features |= NETIF_F_SCTP_CRC | NETIF_F_GSO_UDP_L4; #ifdef CONFIG_IXGBE_IPSEC #define IXGBE_ESP_FEATURES (NETIF_F_HW_ESP | \ NETIF_F_HW_ESP_TX_CSUM | \ NETIF_F_GSO_ESP) if (adapter->ipsec) netdev->features |= IXGBE_ESP_FEATURES; #endif /* copy netdev features into list of user selectable features */ netdev->hw_features |= netdev->features | NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_RXALL | NETIF_F_HW_L2FW_DOFFLOAD; if (hw->mac.type >= ixgbe_mac_82599EB) netdev->hw_features |= NETIF_F_NTUPLE | NETIF_F_HW_TC; netdev->features |= NETIF_F_HIGHDMA; netdev->vlan_features |= netdev->features | NETIF_F_TSO_MANGLEID; netdev->hw_enc_features |= netdev->vlan_features; netdev->mpls_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_HW_CSUM; netdev->mpls_features |= IXGBE_GSO_PARTIAL_FEATURES; /* set this bit last since it cannot be part of vlan_features */ netdev->features |= NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_TX; netdev->priv_flags |= IFF_UNICAST_FLT; netdev->priv_flags |= IFF_SUPP_NOFCS; netdev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT | NETDEV_XDP_ACT_XSK_ZEROCOPY; /* MTU range: 68 - 9710 */ netdev->min_mtu = ETH_MIN_MTU; netdev->max_mtu = IXGBE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN); #ifdef CONFIG_IXGBE_DCB if (adapter->flags & IXGBE_FLAG_DCB_CAPABLE) netdev->dcbnl_ops = &ixgbe_dcbnl_ops; #endif #ifdef IXGBE_FCOE if (adapter->flags & IXGBE_FLAG_FCOE_CAPABLE) { unsigned int fcoe_l; if (hw->mac.ops.get_device_caps) { hw->mac.ops.get_device_caps(hw, &device_caps); if (device_caps & IXGBE_DEVICE_CAPS_FCOE_OFFLOADS) adapter->flags &= ~IXGBE_FLAG_FCOE_CAPABLE; } fcoe_l = min_t(int, IXGBE_FCRETA_SIZE, num_online_cpus()); adapter->ring_feature[RING_F_FCOE].limit = fcoe_l; netdev->features |= NETIF_F_FSO | NETIF_F_FCOE_CRC; netdev->vlan_features |= NETIF_F_FSO | NETIF_F_FCOE_CRC | NETIF_F_FCOE_MTU; } #endif /* IXGBE_FCOE */ if (adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE) netdev->hw_features |= NETIF_F_LRO; if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) netdev->features |= NETIF_F_LRO; if (ixgbe_check_fw_error(adapter)) { err = -EIO; goto err_sw_init; } /* make sure the EEPROM is good */ if (hw->eeprom.ops.validate_checksum(hw, NULL) < 0) { e_dev_err("The EEPROM Checksum Is Not Valid\n"); err = -EIO; goto err_sw_init; } eth_platform_get_mac_address(&adapter->pdev->dev, adapter->hw.mac.perm_addr); eth_hw_addr_set(netdev, hw->mac.perm_addr); if (!is_valid_ether_addr(netdev->dev_addr)) { e_dev_err("invalid MAC address\n"); err = -EIO; goto err_sw_init; } /* Set hw->mac.addr to permanent MAC address */ ether_addr_copy(hw->mac.addr, hw->mac.perm_addr); ixgbe_mac_set_default_filter(adapter); timer_setup(&adapter->service_timer, ixgbe_service_timer, 0); if (ixgbe_removed(hw->hw_addr)) { err = -EIO; goto err_sw_init; } INIT_WORK(&adapter->service_task, ixgbe_service_task); set_bit(__IXGBE_SERVICE_INITED, &adapter->state); clear_bit(__IXGBE_SERVICE_SCHED, &adapter->state); err = ixgbe_init_interrupt_scheme(adapter); if (err) goto err_sw_init; for (i = 0; i < adapter->num_rx_queues; i++) u64_stats_init(&adapter->rx_ring[i]->syncp); for (i = 0; i < adapter->num_tx_queues; i++) u64_stats_init(&adapter->tx_ring[i]->syncp); for (i = 0; i < adapter->num_xdp_queues; i++) u64_stats_init(&adapter->xdp_ring[i]->syncp); /* WOL not supported for all devices */ adapter->wol = 0; hw->eeprom.ops.read(hw, 0x2c, &adapter->eeprom_cap); hw->wol_enabled = ixgbe_wol_supported(adapter, pdev->device, pdev->subsystem_device); if (hw->wol_enabled) adapter->wol = IXGBE_WUFC_MAG; device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol); /* save off EEPROM version number */ ixgbe_set_fw_version(adapter); /* pick up the PCI bus settings for reporting later */ if (ixgbe_pcie_from_parent(hw)) ixgbe_get_parent_bus_info(adapter); else hw->mac.ops.get_bus_info(hw); /* calculate the expected PCIe bandwidth required for optimal * performance. Note that some older parts will never have enough * bandwidth due to being older generation PCIe parts. We clamp these * parts to ensure no warning is displayed if it can't be fixed. */ switch (hw->mac.type) { case ixgbe_mac_82598EB: expected_gts = min(ixgbe_enumerate_functions(adapter) * 10, 16); break; default: expected_gts = ixgbe_enumerate_functions(adapter) * 10; break; } /* don't check link if we failed to enumerate functions */ if (expected_gts > 0) ixgbe_check_minimum_link(adapter, expected_gts); err = ixgbe_read_pba_string_generic(hw, part_str, sizeof(part_str)); if (err) strscpy(part_str, "Unknown", sizeof(part_str)); if (ixgbe_is_sfp(hw) && hw->phy.sfp_type != ixgbe_sfp_type_not_present) e_dev_info("MAC: %d, PHY: %d, SFP+: %d, PBA No: %s\n", hw->mac.type, hw->phy.type, hw->phy.sfp_type, part_str); else e_dev_info("MAC: %d, PHY: %d, PBA No: %s\n", hw->mac.type, hw->phy.type, part_str); e_dev_info("%pM\n", netdev->dev_addr); /* reset the hardware with the new settings */ err = hw->mac.ops.start_hw(hw); if (err == IXGBE_ERR_EEPROM_VERSION) { /* We are running on a pre-production device, log a warning */ e_dev_warn("This device is a pre-production adapter/LOM. " "Please be aware there may be issues associated " "with your hardware. If you are experiencing " "problems please contact your Intel or hardware " "representative who provided you with this " "hardware.\n"); } strcpy(netdev->name, "eth%d"); pci_set_drvdata(pdev, adapter); err = register_netdev(netdev); if (err) goto err_register; /* power down the optics for 82599 SFP+ fiber */ if (hw->mac.ops.disable_tx_laser) hw->mac.ops.disable_tx_laser(hw); /* carrier off reporting is important to ethtool even BEFORE open */ netif_carrier_off(netdev); #ifdef CONFIG_IXGBE_DCA if (dca_add_requester(&pdev->dev) == 0) { adapter->flags |= IXGBE_FLAG_DCA_ENABLED; ixgbe_setup_dca(adapter); } #endif if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) { e_info(probe, "IOV is enabled with %d VFs\n", adapter->num_vfs); for (i = 0; i < adapter->num_vfs; i++) ixgbe_vf_configuration(pdev, (i | 0x10000000)); } /* firmware requires driver version to be 0xFFFFFFFF * since os does not support feature */ if (hw->mac.ops.set_fw_drv_ver) hw->mac.ops.set_fw_drv_ver(hw, 0xFF, 0xFF, 0xFF, 0xFF, sizeof(UTS_RELEASE) - 1, UTS_RELEASE); /* add san mac addr to netdev */ ixgbe_add_sanmac_netdev(netdev); e_dev_info("%s\n", ixgbe_default_device_descr); #ifdef CONFIG_IXGBE_HWMON if (ixgbe_sysfs_init(adapter)) e_err(probe, "failed to allocate sysfs resources\n"); #endif /* CONFIG_IXGBE_HWMON */ ixgbe_dbg_adapter_init(adapter); /* setup link for SFP devices with MNG FW, else wait for IXGBE_UP */ if (ixgbe_mng_enabled(hw) && ixgbe_is_sfp(hw) && hw->mac.ops.setup_link) hw->mac.ops.setup_link(hw, IXGBE_LINK_SPEED_10GB_FULL | IXGBE_LINK_SPEED_1GB_FULL, true); err = ixgbe_mii_bus_init(hw); if (err) goto err_netdev; return 0; err_netdev: unregister_netdev(netdev); err_register: ixgbe_release_hw_control(adapter); ixgbe_clear_interrupt_scheme(adapter); err_sw_init: ixgbe_disable_sriov(adapter); adapter->flags2 &= ~IXGBE_FLAG2_SEARCH_FOR_SFP; iounmap(adapter->io_addr); kfree(adapter->jump_tables[0]); kfree(adapter->mac_table); kfree(adapter->rss_key); bitmap_free(adapter->af_xdp_zc_qps); err_ioremap: disable_dev = !test_and_set_bit(__IXGBE_DISABLED, &adapter->state); free_netdev(netdev); err_alloc_etherdev: pci_release_mem_regions(pdev); err_pci_reg: err_dma: if (!adapter || disable_dev) pci_disable_device(pdev); return err; } /** * ixgbe_remove - Device Removal Routine * @pdev: PCI device information struct * * ixgbe_remove is called by the PCI subsystem to alert the driver * that it should release a PCI device. The could be caused by a * Hot-Plug event, or because the driver is going to be removed from * memory. **/ static void ixgbe_remove(struct pci_dev *pdev) { struct ixgbe_adapter *adapter = pci_get_drvdata(pdev); struct net_device *netdev; bool disable_dev; int i; /* if !adapter then we already cleaned up in probe */ if (!adapter) return; netdev = adapter->netdev; ixgbe_dbg_adapter_exit(adapter); set_bit(__IXGBE_REMOVING, &adapter->state); cancel_work_sync(&adapter->service_task); if (adapter->mii_bus) mdiobus_unregister(adapter->mii_bus); #ifdef CONFIG_IXGBE_DCA if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) { adapter->flags &= ~IXGBE_FLAG_DCA_ENABLED; dca_remove_requester(&pdev->dev); IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_CTRL, IXGBE_DCA_CTRL_DCA_DISABLE); } #endif #ifdef CONFIG_IXGBE_HWMON ixgbe_sysfs_exit(adapter); #endif /* CONFIG_IXGBE_HWMON */ /* remove the added san mac */ ixgbe_del_sanmac_netdev(netdev); #ifdef CONFIG_PCI_IOV ixgbe_disable_sriov(adapter); #endif if (netdev->reg_state == NETREG_REGISTERED) unregister_netdev(netdev); ixgbe_stop_ipsec_offload(adapter); ixgbe_clear_interrupt_scheme(adapter); ixgbe_release_hw_control(adapter); #ifdef CONFIG_DCB kfree(adapter->ixgbe_ieee_pfc); kfree(adapter->ixgbe_ieee_ets); #endif iounmap(adapter->io_addr); pci_release_mem_regions(pdev); e_dev_info("complete\n"); for (i = 0; i < IXGBE_MAX_LINK_HANDLE; i++) { if (adapter->jump_tables[i]) { kfree(adapter->jump_tables[i]->input); kfree(adapter->jump_tables[i]->mask); } kfree(adapter->jump_tables[i]); } kfree(adapter->mac_table); kfree(adapter->rss_key); bitmap_free(adapter->af_xdp_zc_qps); disable_dev = !test_and_set_bit(__IXGBE_DISABLED, &adapter->state); free_netdev(netdev); if (disable_dev) pci_disable_device(pdev); } /** * ixgbe_io_error_detected - called when PCI error is detected * @pdev: Pointer to PCI device * @state: The current pci connection state * * This function is called after a PCI bus error affecting * this device has been detected. */ static pci_ers_result_t ixgbe_io_error_detected(struct pci_dev *pdev, pci_channel_state_t state) { struct ixgbe_adapter *adapter = pci_get_drvdata(pdev); struct net_device *netdev = adapter->netdev; #ifdef CONFIG_PCI_IOV struct ixgbe_hw *hw = &adapter->hw; struct pci_dev *bdev, *vfdev; u32 dw0, dw1, dw2, dw3; int vf, pos; u16 req_id, pf_func; if (adapter->hw.mac.type == ixgbe_mac_82598EB || adapter->num_vfs == 0) goto skip_bad_vf_detection; bdev = pdev->bus->self; while (bdev && (pci_pcie_type(bdev) != PCI_EXP_TYPE_ROOT_PORT)) bdev = bdev->bus->self; if (!bdev) goto skip_bad_vf_detection; pos = pci_find_ext_capability(bdev, PCI_EXT_CAP_ID_ERR); if (!pos) goto skip_bad_vf_detection; dw0 = ixgbe_read_pci_cfg_dword(hw, pos + PCI_ERR_HEADER_LOG); dw1 = ixgbe_read_pci_cfg_dword(hw, pos + PCI_ERR_HEADER_LOG + 4); dw2 = ixgbe_read_pci_cfg_dword(hw, pos + PCI_ERR_HEADER_LOG + 8); dw3 = ixgbe_read_pci_cfg_dword(hw, pos + PCI_ERR_HEADER_LOG + 12); if (ixgbe_removed(hw->hw_addr)) goto skip_bad_vf_detection; req_id = dw1 >> 16; /* On the 82599 if bit 7 of the requestor ID is set then it's a VF */ if (!(req_id & 0x0080)) goto skip_bad_vf_detection; pf_func = req_id & 0x01; if ((pf_func & 1) == (pdev->devfn & 1)) { unsigned int device_id; vf = (req_id & 0x7F) >> 1; e_dev_err("VF %d has caused a PCIe error\n", vf); e_dev_err("TLP: dw0: %8.8x\tdw1: %8.8x\tdw2: " "%8.8x\tdw3: %8.8x\n", dw0, dw1, dw2, dw3); switch (adapter->hw.mac.type) { case ixgbe_mac_82599EB: device_id = IXGBE_82599_VF_DEVICE_ID; break; case ixgbe_mac_X540: device_id = IXGBE_X540_VF_DEVICE_ID; break; case ixgbe_mac_X550: device_id = IXGBE_DEV_ID_X550_VF; break; case ixgbe_mac_X550EM_x: device_id = IXGBE_DEV_ID_X550EM_X_VF; break; case ixgbe_mac_x550em_a: device_id = IXGBE_DEV_ID_X550EM_A_VF; break; default: device_id = 0; break; } /* Find the pci device of the offending VF */ vfdev = pci_get_device(PCI_VENDOR_ID_INTEL, device_id, NULL); while (vfdev) { if (vfdev->devfn == (req_id & 0xFF)) break; vfdev = pci_get_device(PCI_VENDOR_ID_INTEL, device_id, vfdev); } /* * There's a slim chance the VF could have been hot plugged, * so if it is no longer present we don't need to issue the * VFLR. Just clean up the AER in that case. */ if (vfdev) { pcie_flr(vfdev); /* Free device reference count */ pci_dev_put(vfdev); } } /* * Even though the error may have occurred on the other port * we still need to increment the vf error reference count for * both ports because the I/O resume function will be called * for both of them. */ adapter->vferr_refcount++; return PCI_ERS_RESULT_RECOVERED; skip_bad_vf_detection: #endif /* CONFIG_PCI_IOV */ if (!test_bit(__IXGBE_SERVICE_INITED, &adapter->state)) return PCI_ERS_RESULT_DISCONNECT; if (!netif_device_present(netdev)) return PCI_ERS_RESULT_DISCONNECT; rtnl_lock(); netif_device_detach(netdev); if (netif_running(netdev)) ixgbe_close_suspend(adapter); if (state == pci_channel_io_perm_failure) { rtnl_unlock(); return PCI_ERS_RESULT_DISCONNECT; } if (!test_and_set_bit(__IXGBE_DISABLED, &adapter->state)) pci_disable_device(pdev); rtnl_unlock(); /* Request a slot reset. */ return PCI_ERS_RESULT_NEED_RESET; } /** * ixgbe_io_slot_reset - called after the pci bus has been reset. * @pdev: Pointer to PCI device * * Restart the card from scratch, as if from a cold-boot. */ static pci_ers_result_t ixgbe_io_slot_reset(struct pci_dev *pdev) { struct ixgbe_adapter *adapter = pci_get_drvdata(pdev); pci_ers_result_t result; if (pci_enable_device_mem(pdev)) { e_err(probe, "Cannot re-enable PCI device after reset.\n"); result = PCI_ERS_RESULT_DISCONNECT; } else { smp_mb__before_atomic(); clear_bit(__IXGBE_DISABLED, &adapter->state); adapter->hw.hw_addr = adapter->io_addr; pci_set_master(pdev); pci_restore_state(pdev); pci_save_state(pdev); pci_wake_from_d3(pdev, false); ixgbe_reset(adapter); IXGBE_WRITE_REG(&adapter->hw, IXGBE_WUS, ~0); result = PCI_ERS_RESULT_RECOVERED; } return result; } /** * ixgbe_io_resume - called when traffic can start flowing again. * @pdev: Pointer to PCI device * * This callback is called when the error recovery driver tells us that * its OK to resume normal operation. */ static void ixgbe_io_resume(struct pci_dev *pdev) { struct ixgbe_adapter *adapter = pci_get_drvdata(pdev); struct net_device *netdev = adapter->netdev; #ifdef CONFIG_PCI_IOV if (adapter->vferr_refcount) { e_info(drv, "Resuming after VF err\n"); adapter->vferr_refcount--; return; } #endif rtnl_lock(); if (netif_running(netdev)) ixgbe_open(netdev); netif_device_attach(netdev); rtnl_unlock(); } static const struct pci_error_handlers ixgbe_err_handler = { .error_detected = ixgbe_io_error_detected, .slot_reset = ixgbe_io_slot_reset, .resume = ixgbe_io_resume, }; static SIMPLE_DEV_PM_OPS(ixgbe_pm_ops, ixgbe_suspend, ixgbe_resume); static struct pci_driver ixgbe_driver = { .name = ixgbe_driver_name, .id_table = ixgbe_pci_tbl, .probe = ixgbe_probe, .remove = ixgbe_remove, .driver.pm = &ixgbe_pm_ops, .shutdown = ixgbe_shutdown, .sriov_configure = ixgbe_pci_sriov_configure, .err_handler = &ixgbe_err_handler }; /** * ixgbe_init_module - Driver Registration Routine * * ixgbe_init_module is the first routine called when the driver is * loaded. All it does is register with the PCI subsystem. **/ static int __init ixgbe_init_module(void) { int ret; pr_info("%s\n", ixgbe_driver_string); pr_info("%s\n", ixgbe_copyright); ixgbe_wq = create_singlethread_workqueue(ixgbe_driver_name); if (!ixgbe_wq) { pr_err("%s: Failed to create workqueue\n", ixgbe_driver_name); return -ENOMEM; } ixgbe_dbg_init(); ret = pci_register_driver(&ixgbe_driver); if (ret) { destroy_workqueue(ixgbe_wq); ixgbe_dbg_exit(); return ret; } #ifdef CONFIG_IXGBE_DCA dca_register_notify(&dca_notifier); #endif return 0; } module_init(ixgbe_init_module); /** * ixgbe_exit_module - Driver Exit Cleanup Routine * * ixgbe_exit_module is called just before the driver is removed * from memory. **/ static void __exit ixgbe_exit_module(void) { #ifdef CONFIG_IXGBE_DCA dca_unregister_notify(&dca_notifier); #endif pci_unregister_driver(&ixgbe_driver); ixgbe_dbg_exit(); if (ixgbe_wq) { destroy_workqueue(ixgbe_wq); ixgbe_wq = NULL; } } #ifdef CONFIG_IXGBE_DCA static int ixgbe_notify_dca(struct notifier_block *nb, unsigned long event, void *p) { int ret_val; ret_val = driver_for_each_device(&ixgbe_driver.driver, NULL, &event, __ixgbe_notify_dca); return ret_val ? NOTIFY_BAD : NOTIFY_DONE; } #endif /* CONFIG_IXGBE_DCA */ module_exit(ixgbe_exit_module); /* ixgbe_main.c */
linux-master
drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
// SPDX-License-Identifier: GPL-2.0 /* Copyright(c) 2017 Oracle and/or its affiliates. All rights reserved. */ #include "ixgbe.h" #include <net/xfrm.h> #include <crypto/aead.h> #include <linux/if_bridge.h> #define IXGBE_IPSEC_KEY_BITS 160 static const char aes_gcm_name[] = "rfc4106(gcm(aes))"; static void ixgbe_ipsec_del_sa(struct xfrm_state *xs); /** * ixgbe_ipsec_set_tx_sa - set the Tx SA registers * @hw: hw specific details * @idx: register index to write * @key: key byte array * @salt: salt bytes **/ static void ixgbe_ipsec_set_tx_sa(struct ixgbe_hw *hw, u16 idx, u32 key[], u32 salt) { u32 reg; int i; for (i = 0; i < 4; i++) IXGBE_WRITE_REG(hw, IXGBE_IPSTXKEY(i), (__force u32)cpu_to_be32(key[3 - i])); IXGBE_WRITE_REG(hw, IXGBE_IPSTXSALT, (__force u32)cpu_to_be32(salt)); IXGBE_WRITE_FLUSH(hw); reg = IXGBE_READ_REG(hw, IXGBE_IPSTXIDX); reg &= IXGBE_RXTXIDX_IPS_EN; reg |= idx << IXGBE_RXTXIDX_IDX_SHIFT | IXGBE_RXTXIDX_WRITE; IXGBE_WRITE_REG(hw, IXGBE_IPSTXIDX, reg); IXGBE_WRITE_FLUSH(hw); } /** * ixgbe_ipsec_set_rx_item - set an Rx table item * @hw: hw specific details * @idx: register index to write * @tbl: table selector * * Trigger the device to store into a particular Rx table the * data that has already been loaded into the input register **/ static void ixgbe_ipsec_set_rx_item(struct ixgbe_hw *hw, u16 idx, enum ixgbe_ipsec_tbl_sel tbl) { u32 reg; reg = IXGBE_READ_REG(hw, IXGBE_IPSRXIDX); reg &= IXGBE_RXTXIDX_IPS_EN; reg |= tbl << IXGBE_RXIDX_TBL_SHIFT | idx << IXGBE_RXTXIDX_IDX_SHIFT | IXGBE_RXTXIDX_WRITE; IXGBE_WRITE_REG(hw, IXGBE_IPSRXIDX, reg); IXGBE_WRITE_FLUSH(hw); } /** * ixgbe_ipsec_set_rx_sa - set up the register bits to save SA info * @hw: hw specific details * @idx: register index to write * @spi: security parameter index * @key: key byte array * @salt: salt bytes * @mode: rx decrypt control bits * @ip_idx: index into IP table for related IP address **/ static void ixgbe_ipsec_set_rx_sa(struct ixgbe_hw *hw, u16 idx, __be32 spi, u32 key[], u32 salt, u32 mode, u32 ip_idx) { int i; /* store the SPI (in bigendian) and IPidx */ IXGBE_WRITE_REG(hw, IXGBE_IPSRXSPI, (__force u32)cpu_to_le32((__force u32)spi)); IXGBE_WRITE_REG(hw, IXGBE_IPSRXIPIDX, ip_idx); IXGBE_WRITE_FLUSH(hw); ixgbe_ipsec_set_rx_item(hw, idx, ips_rx_spi_tbl); /* store the key, salt, and mode */ for (i = 0; i < 4; i++) IXGBE_WRITE_REG(hw, IXGBE_IPSRXKEY(i), (__force u32)cpu_to_be32(key[3 - i])); IXGBE_WRITE_REG(hw, IXGBE_IPSRXSALT, (__force u32)cpu_to_be32(salt)); IXGBE_WRITE_REG(hw, IXGBE_IPSRXMOD, mode); IXGBE_WRITE_FLUSH(hw); ixgbe_ipsec_set_rx_item(hw, idx, ips_rx_key_tbl); } /** * ixgbe_ipsec_set_rx_ip - set up the register bits to save SA IP addr info * @hw: hw specific details * @idx: register index to write * @addr: IP address byte array **/ static void ixgbe_ipsec_set_rx_ip(struct ixgbe_hw *hw, u16 idx, __be32 addr[]) { int i; /* store the ip address */ for (i = 0; i < 4; i++) IXGBE_WRITE_REG(hw, IXGBE_IPSRXIPADDR(i), (__force u32)cpu_to_le32((__force u32)addr[i])); IXGBE_WRITE_FLUSH(hw); ixgbe_ipsec_set_rx_item(hw, idx, ips_rx_ip_tbl); } /** * ixgbe_ipsec_clear_hw_tables - because some tables don't get cleared on reset * @adapter: board private structure **/ static void ixgbe_ipsec_clear_hw_tables(struct ixgbe_adapter *adapter) { struct ixgbe_hw *hw = &adapter->hw; u32 buf[4] = {0, 0, 0, 0}; u16 idx; /* disable Rx and Tx SA lookup */ IXGBE_WRITE_REG(hw, IXGBE_IPSRXIDX, 0); IXGBE_WRITE_REG(hw, IXGBE_IPSTXIDX, 0); /* scrub the tables - split the loops for the max of the IP table */ for (idx = 0; idx < IXGBE_IPSEC_MAX_RX_IP_COUNT; idx++) { ixgbe_ipsec_set_tx_sa(hw, idx, buf, 0); ixgbe_ipsec_set_rx_sa(hw, idx, 0, buf, 0, 0, 0); ixgbe_ipsec_set_rx_ip(hw, idx, (__be32 *)buf); } for (; idx < IXGBE_IPSEC_MAX_SA_COUNT; idx++) { ixgbe_ipsec_set_tx_sa(hw, idx, buf, 0); ixgbe_ipsec_set_rx_sa(hw, idx, 0, buf, 0, 0, 0); } } /** * ixgbe_ipsec_stop_data * @adapter: board private structure **/ static void ixgbe_ipsec_stop_data(struct ixgbe_adapter *adapter) { struct ixgbe_hw *hw = &adapter->hw; bool link = adapter->link_up; u32 t_rdy, r_rdy; u32 limit; u32 reg; /* halt data paths */ reg = IXGBE_READ_REG(hw, IXGBE_SECTXCTRL); reg |= IXGBE_SECTXCTRL_TX_DIS; IXGBE_WRITE_REG(hw, IXGBE_SECTXCTRL, reg); reg = IXGBE_READ_REG(hw, IXGBE_SECRXCTRL); reg |= IXGBE_SECRXCTRL_RX_DIS; IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, reg); /* If both Tx and Rx are ready there are no packets * that we need to flush so the loopback configuration * below is not necessary. */ t_rdy = IXGBE_READ_REG(hw, IXGBE_SECTXSTAT) & IXGBE_SECTXSTAT_SECTX_RDY; r_rdy = IXGBE_READ_REG(hw, IXGBE_SECRXSTAT) & IXGBE_SECRXSTAT_SECRX_RDY; if (t_rdy && r_rdy) return; /* If the tx fifo doesn't have link, but still has data, * we can't clear the tx sec block. Set the MAC loopback * before block clear */ if (!link) { reg = IXGBE_READ_REG(hw, IXGBE_MACC); reg |= IXGBE_MACC_FLU; IXGBE_WRITE_REG(hw, IXGBE_MACC, reg); reg = IXGBE_READ_REG(hw, IXGBE_HLREG0); reg |= IXGBE_HLREG0_LPBK; IXGBE_WRITE_REG(hw, IXGBE_HLREG0, reg); IXGBE_WRITE_FLUSH(hw); mdelay(3); } /* wait for the paths to empty */ limit = 20; do { mdelay(10); t_rdy = IXGBE_READ_REG(hw, IXGBE_SECTXSTAT) & IXGBE_SECTXSTAT_SECTX_RDY; r_rdy = IXGBE_READ_REG(hw, IXGBE_SECRXSTAT) & IXGBE_SECRXSTAT_SECRX_RDY; } while (!(t_rdy && r_rdy) && limit--); /* undo loopback if we played with it earlier */ if (!link) { reg = IXGBE_READ_REG(hw, IXGBE_MACC); reg &= ~IXGBE_MACC_FLU; IXGBE_WRITE_REG(hw, IXGBE_MACC, reg); reg = IXGBE_READ_REG(hw, IXGBE_HLREG0); reg &= ~IXGBE_HLREG0_LPBK; IXGBE_WRITE_REG(hw, IXGBE_HLREG0, reg); IXGBE_WRITE_FLUSH(hw); } } /** * ixgbe_ipsec_stop_engine * @adapter: board private structure **/ static void ixgbe_ipsec_stop_engine(struct ixgbe_adapter *adapter) { struct ixgbe_hw *hw = &adapter->hw; u32 reg; ixgbe_ipsec_stop_data(adapter); /* disable Rx and Tx SA lookup */ IXGBE_WRITE_REG(hw, IXGBE_IPSTXIDX, 0); IXGBE_WRITE_REG(hw, IXGBE_IPSRXIDX, 0); /* disable the Rx and Tx engines and full packet store-n-forward */ reg = IXGBE_READ_REG(hw, IXGBE_SECTXCTRL); reg |= IXGBE_SECTXCTRL_SECTX_DIS; reg &= ~IXGBE_SECTXCTRL_STORE_FORWARD; IXGBE_WRITE_REG(hw, IXGBE_SECTXCTRL, reg); reg = IXGBE_READ_REG(hw, IXGBE_SECRXCTRL); reg |= IXGBE_SECRXCTRL_SECRX_DIS; IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, reg); /* restore the "tx security buffer almost full threshold" to 0x250 */ IXGBE_WRITE_REG(hw, IXGBE_SECTXBUFFAF, 0x250); /* Set minimum IFG between packets back to the default 0x1 */ reg = IXGBE_READ_REG(hw, IXGBE_SECTXMINIFG); reg = (reg & 0xfffffff0) | 0x1; IXGBE_WRITE_REG(hw, IXGBE_SECTXMINIFG, reg); /* final set for normal (no ipsec offload) processing */ IXGBE_WRITE_REG(hw, IXGBE_SECTXCTRL, IXGBE_SECTXCTRL_SECTX_DIS); IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, IXGBE_SECRXCTRL_SECRX_DIS); IXGBE_WRITE_FLUSH(hw); } /** * ixgbe_ipsec_start_engine * @adapter: board private structure * * NOTE: this increases power consumption whether being used or not **/ static void ixgbe_ipsec_start_engine(struct ixgbe_adapter *adapter) { struct ixgbe_hw *hw = &adapter->hw; u32 reg; ixgbe_ipsec_stop_data(adapter); /* Set minimum IFG between packets to 3 */ reg = IXGBE_READ_REG(hw, IXGBE_SECTXMINIFG); reg = (reg & 0xfffffff0) | 0x3; IXGBE_WRITE_REG(hw, IXGBE_SECTXMINIFG, reg); /* Set "tx security buffer almost full threshold" to 0x15 so that the * almost full indication is generated only after buffer contains at * least an entire jumbo packet. */ reg = IXGBE_READ_REG(hw, IXGBE_SECTXBUFFAF); reg = (reg & 0xfffffc00) | 0x15; IXGBE_WRITE_REG(hw, IXGBE_SECTXBUFFAF, reg); /* restart the data paths by clearing the DISABLE bits */ IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, 0); IXGBE_WRITE_REG(hw, IXGBE_SECTXCTRL, IXGBE_SECTXCTRL_STORE_FORWARD); /* enable Rx and Tx SA lookup */ IXGBE_WRITE_REG(hw, IXGBE_IPSTXIDX, IXGBE_RXTXIDX_IPS_EN); IXGBE_WRITE_REG(hw, IXGBE_IPSRXIDX, IXGBE_RXTXIDX_IPS_EN); IXGBE_WRITE_FLUSH(hw); } /** * ixgbe_ipsec_restore - restore the ipsec HW settings after a reset * @adapter: board private structure * * Reload the HW tables from the SW tables after they've been bashed * by a chip reset. * * Any VF entries are removed from the SW and HW tables since either * (a) the VF also gets reset on PF reset and will ask again for the * offloads, or (b) the VF has been removed by a change in the num_vfs. **/ void ixgbe_ipsec_restore(struct ixgbe_adapter *adapter) { struct ixgbe_ipsec *ipsec = adapter->ipsec; struct ixgbe_hw *hw = &adapter->hw; int i; if (!(adapter->flags2 & IXGBE_FLAG2_IPSEC_ENABLED)) return; /* clean up and restart the engine */ ixgbe_ipsec_stop_engine(adapter); ixgbe_ipsec_clear_hw_tables(adapter); ixgbe_ipsec_start_engine(adapter); /* reload the Rx and Tx keys */ for (i = 0; i < IXGBE_IPSEC_MAX_SA_COUNT; i++) { struct rx_sa *r = &ipsec->rx_tbl[i]; struct tx_sa *t = &ipsec->tx_tbl[i]; if (r->used) { if (r->mode & IXGBE_RXTXMOD_VF) ixgbe_ipsec_del_sa(r->xs); else ixgbe_ipsec_set_rx_sa(hw, i, r->xs->id.spi, r->key, r->salt, r->mode, r->iptbl_ind); } if (t->used) { if (t->mode & IXGBE_RXTXMOD_VF) ixgbe_ipsec_del_sa(t->xs); else ixgbe_ipsec_set_tx_sa(hw, i, t->key, t->salt); } } /* reload the IP addrs */ for (i = 0; i < IXGBE_IPSEC_MAX_RX_IP_COUNT; i++) { struct rx_ip_sa *ipsa = &ipsec->ip_tbl[i]; if (ipsa->used) ixgbe_ipsec_set_rx_ip(hw, i, ipsa->ipaddr); } } /** * ixgbe_ipsec_find_empty_idx - find the first unused security parameter index * @ipsec: pointer to ipsec struct * @rxtable: true if we need to look in the Rx table * * Returns the first unused index in either the Rx or Tx SA table **/ static int ixgbe_ipsec_find_empty_idx(struct ixgbe_ipsec *ipsec, bool rxtable) { u32 i; if (rxtable) { if (ipsec->num_rx_sa == IXGBE_IPSEC_MAX_SA_COUNT) return -ENOSPC; /* search rx sa table */ for (i = 0; i < IXGBE_IPSEC_MAX_SA_COUNT; i++) { if (!ipsec->rx_tbl[i].used) return i; } } else { if (ipsec->num_tx_sa == IXGBE_IPSEC_MAX_SA_COUNT) return -ENOSPC; /* search tx sa table */ for (i = 0; i < IXGBE_IPSEC_MAX_SA_COUNT; i++) { if (!ipsec->tx_tbl[i].used) return i; } } return -ENOSPC; } /** * ixgbe_ipsec_find_rx_state - find the state that matches * @ipsec: pointer to ipsec struct * @daddr: inbound address to match * @proto: protocol to match * @spi: SPI to match * @ip4: true if using an ipv4 address * * Returns a pointer to the matching SA state information **/ static struct xfrm_state *ixgbe_ipsec_find_rx_state(struct ixgbe_ipsec *ipsec, __be32 *daddr, u8 proto, __be32 spi, bool ip4) { struct rx_sa *rsa; struct xfrm_state *ret = NULL; rcu_read_lock(); hash_for_each_possible_rcu(ipsec->rx_sa_list, rsa, hlist, (__force u32)spi) { if (rsa->mode & IXGBE_RXTXMOD_VF) continue; if (spi == rsa->xs->id.spi && ((ip4 && *daddr == rsa->xs->id.daddr.a4) || (!ip4 && !memcmp(daddr, &rsa->xs->id.daddr.a6, sizeof(rsa->xs->id.daddr.a6)))) && proto == rsa->xs->id.proto) { ret = rsa->xs; xfrm_state_hold(ret); break; } } rcu_read_unlock(); return ret; } /** * ixgbe_ipsec_parse_proto_keys - find the key and salt based on the protocol * @xs: pointer to xfrm_state struct * @mykey: pointer to key array to populate * @mysalt: pointer to salt value to populate * * This copies the protocol keys and salt to our own data tables. The * 82599 family only supports the one algorithm. **/ static int ixgbe_ipsec_parse_proto_keys(struct xfrm_state *xs, u32 *mykey, u32 *mysalt) { struct net_device *dev = xs->xso.real_dev; unsigned char *key_data; char *alg_name = NULL; int key_len; if (!xs->aead) { netdev_err(dev, "Unsupported IPsec algorithm\n"); return -EINVAL; } if (xs->aead->alg_icv_len != IXGBE_IPSEC_AUTH_BITS) { netdev_err(dev, "IPsec offload requires %d bit authentication\n", IXGBE_IPSEC_AUTH_BITS); return -EINVAL; } key_data = &xs->aead->alg_key[0]; key_len = xs->aead->alg_key_len; alg_name = xs->aead->alg_name; if (strcmp(alg_name, aes_gcm_name)) { netdev_err(dev, "Unsupported IPsec algorithm - please use %s\n", aes_gcm_name); return -EINVAL; } /* The key bytes come down in a bigendian array of bytes, so * we don't need to do any byteswapping. * 160 accounts for 16 byte key and 4 byte salt */ if (key_len == IXGBE_IPSEC_KEY_BITS) { *mysalt = ((u32 *)key_data)[4]; } else if (key_len != (IXGBE_IPSEC_KEY_BITS - (sizeof(*mysalt) * 8))) { netdev_err(dev, "IPsec hw offload only supports keys up to 128 bits with a 32 bit salt\n"); return -EINVAL; } else { netdev_info(dev, "IPsec hw offload parameters missing 32 bit salt value\n"); *mysalt = 0; } memcpy(mykey, key_data, 16); return 0; } /** * ixgbe_ipsec_check_mgmt_ip - make sure there is no clash with mgmt IP filters * @xs: pointer to transformer state struct **/ static int ixgbe_ipsec_check_mgmt_ip(struct xfrm_state *xs) { struct net_device *dev = xs->xso.real_dev; struct ixgbe_adapter *adapter = netdev_priv(dev); struct ixgbe_hw *hw = &adapter->hw; u32 mfval, manc, reg; int num_filters = 4; bool manc_ipv4; u32 bmcipval; int i, j; #define MANC_EN_IPV4_FILTER BIT(24) #define MFVAL_IPV4_FILTER_SHIFT 16 #define MFVAL_IPV6_FILTER_SHIFT 24 #define MIPAF_ARR(_m, _n) (IXGBE_MIPAF + ((_m) * 0x10) + ((_n) * 4)) #define IXGBE_BMCIP(_n) (0x5050 + ((_n) * 4)) #define IXGBE_BMCIPVAL 0x5060 #define BMCIP_V4 0x2 #define BMCIP_V6 0x3 #define BMCIP_MASK 0x3 manc = IXGBE_READ_REG(hw, IXGBE_MANC); manc_ipv4 = !!(manc & MANC_EN_IPV4_FILTER); mfval = IXGBE_READ_REG(hw, IXGBE_MFVAL); bmcipval = IXGBE_READ_REG(hw, IXGBE_BMCIPVAL); if (xs->props.family == AF_INET) { /* are there any IPv4 filters to check? */ if (manc_ipv4) { /* the 4 ipv4 filters are all in MIPAF(3, i) */ for (i = 0; i < num_filters; i++) { if (!(mfval & BIT(MFVAL_IPV4_FILTER_SHIFT + i))) continue; reg = IXGBE_READ_REG(hw, MIPAF_ARR(3, i)); if (reg == (__force u32)xs->id.daddr.a4) return 1; } } if ((bmcipval & BMCIP_MASK) == BMCIP_V4) { reg = IXGBE_READ_REG(hw, IXGBE_BMCIP(3)); if (reg == (__force u32)xs->id.daddr.a4) return 1; } } else { /* if there are ipv4 filters, they are in the last ipv6 slot */ if (manc_ipv4) num_filters = 3; for (i = 0; i < num_filters; i++) { if (!(mfval & BIT(MFVAL_IPV6_FILTER_SHIFT + i))) continue; for (j = 0; j < 4; j++) { reg = IXGBE_READ_REG(hw, MIPAF_ARR(i, j)); if (reg != (__force u32)xs->id.daddr.a6[j]) break; } if (j == 4) /* did we match all 4 words? */ return 1; } if ((bmcipval & BMCIP_MASK) == BMCIP_V6) { for (j = 0; j < 4; j++) { reg = IXGBE_READ_REG(hw, IXGBE_BMCIP(j)); if (reg != (__force u32)xs->id.daddr.a6[j]) break; } if (j == 4) /* did we match all 4 words? */ return 1; } } return 0; } /** * ixgbe_ipsec_add_sa - program device with a security association * @xs: pointer to transformer state struct * @extack: extack point to fill failure reason **/ static int ixgbe_ipsec_add_sa(struct xfrm_state *xs, struct netlink_ext_ack *extack) { struct net_device *dev = xs->xso.real_dev; struct ixgbe_adapter *adapter = netdev_priv(dev); struct ixgbe_ipsec *ipsec = adapter->ipsec; struct ixgbe_hw *hw = &adapter->hw; int checked, match, first; u16 sa_idx; int ret; int i; if (xs->id.proto != IPPROTO_ESP && xs->id.proto != IPPROTO_AH) { NL_SET_ERR_MSG_MOD(extack, "Unsupported protocol for ipsec offload"); return -EINVAL; } if (xs->props.mode != XFRM_MODE_TRANSPORT) { NL_SET_ERR_MSG_MOD(extack, "Unsupported mode for ipsec offload"); return -EINVAL; } if (ixgbe_ipsec_check_mgmt_ip(xs)) { NL_SET_ERR_MSG_MOD(extack, "IPsec IP addr clash with mgmt filters"); return -EINVAL; } if (xs->xso.type != XFRM_DEV_OFFLOAD_CRYPTO) { NL_SET_ERR_MSG_MOD(extack, "Unsupported ipsec offload type"); return -EINVAL; } if (xs->xso.dir == XFRM_DEV_OFFLOAD_IN) { struct rx_sa rsa; if (xs->calg) { NL_SET_ERR_MSG_MOD(extack, "Compression offload not supported"); return -EINVAL; } /* find the first unused index */ ret = ixgbe_ipsec_find_empty_idx(ipsec, true); if (ret < 0) { NL_SET_ERR_MSG_MOD(extack, "No space for SA in Rx table!"); return ret; } sa_idx = (u16)ret; memset(&rsa, 0, sizeof(rsa)); rsa.used = true; rsa.xs = xs; if (rsa.xs->id.proto & IPPROTO_ESP) rsa.decrypt = xs->ealg || xs->aead; /* get the key and salt */ ret = ixgbe_ipsec_parse_proto_keys(xs, rsa.key, &rsa.salt); if (ret) { NL_SET_ERR_MSG_MOD(extack, "Failed to get key data for Rx SA table"); return ret; } /* get ip for rx sa table */ if (xs->props.family == AF_INET6) memcpy(rsa.ipaddr, &xs->id.daddr.a6, 16); else memcpy(&rsa.ipaddr[3], &xs->id.daddr.a4, 4); /* The HW does not have a 1:1 mapping from keys to IP addrs, so * check for a matching IP addr entry in the table. If the addr * already exists, use it; else find an unused slot and add the * addr. If one does not exist and there are no unused table * entries, fail the request. */ /* Find an existing match or first not used, and stop looking * after we've checked all we know we have. */ checked = 0; match = -1; first = -1; for (i = 0; i < IXGBE_IPSEC_MAX_RX_IP_COUNT && (checked < ipsec->num_rx_sa || first < 0); i++) { if (ipsec->ip_tbl[i].used) { if (!memcmp(ipsec->ip_tbl[i].ipaddr, rsa.ipaddr, sizeof(rsa.ipaddr))) { match = i; break; } checked++; } else if (first < 0) { first = i; /* track the first empty seen */ } } if (ipsec->num_rx_sa == 0) first = 0; if (match >= 0) { /* addrs are the same, we should use this one */ rsa.iptbl_ind = match; ipsec->ip_tbl[match].ref_cnt++; } else if (first >= 0) { /* no matches, but here's an empty slot */ rsa.iptbl_ind = first; memcpy(ipsec->ip_tbl[first].ipaddr, rsa.ipaddr, sizeof(rsa.ipaddr)); ipsec->ip_tbl[first].ref_cnt = 1; ipsec->ip_tbl[first].used = true; ixgbe_ipsec_set_rx_ip(hw, rsa.iptbl_ind, rsa.ipaddr); } else { /* no match and no empty slot */ NL_SET_ERR_MSG_MOD(extack, "No space for SA in Rx IP SA table"); memset(&rsa, 0, sizeof(rsa)); return -ENOSPC; } rsa.mode = IXGBE_RXMOD_VALID; if (rsa.xs->id.proto & IPPROTO_ESP) rsa.mode |= IXGBE_RXMOD_PROTO_ESP; if (rsa.decrypt) rsa.mode |= IXGBE_RXMOD_DECRYPT; if (rsa.xs->props.family == AF_INET6) rsa.mode |= IXGBE_RXMOD_IPV6; /* the preparations worked, so save the info */ memcpy(&ipsec->rx_tbl[sa_idx], &rsa, sizeof(rsa)); ixgbe_ipsec_set_rx_sa(hw, sa_idx, rsa.xs->id.spi, rsa.key, rsa.salt, rsa.mode, rsa.iptbl_ind); xs->xso.offload_handle = sa_idx + IXGBE_IPSEC_BASE_RX_INDEX; ipsec->num_rx_sa++; /* hash the new entry for faster search in Rx path */ hash_add_rcu(ipsec->rx_sa_list, &ipsec->rx_tbl[sa_idx].hlist, (__force u32)rsa.xs->id.spi); } else { struct tx_sa tsa; if (adapter->num_vfs && adapter->bridge_mode != BRIDGE_MODE_VEPA) return -EOPNOTSUPP; /* find the first unused index */ ret = ixgbe_ipsec_find_empty_idx(ipsec, false); if (ret < 0) { NL_SET_ERR_MSG_MOD(extack, "No space for SA in Tx table"); return ret; } sa_idx = (u16)ret; memset(&tsa, 0, sizeof(tsa)); tsa.used = true; tsa.xs = xs; if (xs->id.proto & IPPROTO_ESP) tsa.encrypt = xs->ealg || xs->aead; ret = ixgbe_ipsec_parse_proto_keys(xs, tsa.key, &tsa.salt); if (ret) { NL_SET_ERR_MSG_MOD(extack, "Failed to get key data for Tx SA table"); memset(&tsa, 0, sizeof(tsa)); return ret; } /* the preparations worked, so save the info */ memcpy(&ipsec->tx_tbl[sa_idx], &tsa, sizeof(tsa)); ixgbe_ipsec_set_tx_sa(hw, sa_idx, tsa.key, tsa.salt); xs->xso.offload_handle = sa_idx + IXGBE_IPSEC_BASE_TX_INDEX; ipsec->num_tx_sa++; } /* enable the engine if not already warmed up */ if (!(adapter->flags2 & IXGBE_FLAG2_IPSEC_ENABLED)) { ixgbe_ipsec_start_engine(adapter); adapter->flags2 |= IXGBE_FLAG2_IPSEC_ENABLED; } return 0; } /** * ixgbe_ipsec_del_sa - clear out this specific SA * @xs: pointer to transformer state struct **/ static void ixgbe_ipsec_del_sa(struct xfrm_state *xs) { struct net_device *dev = xs->xso.real_dev; struct ixgbe_adapter *adapter = netdev_priv(dev); struct ixgbe_ipsec *ipsec = adapter->ipsec; struct ixgbe_hw *hw = &adapter->hw; u32 zerobuf[4] = {0, 0, 0, 0}; u16 sa_idx; if (xs->xso.dir == XFRM_DEV_OFFLOAD_IN) { struct rx_sa *rsa; u8 ipi; sa_idx = xs->xso.offload_handle - IXGBE_IPSEC_BASE_RX_INDEX; rsa = &ipsec->rx_tbl[sa_idx]; if (!rsa->used) { netdev_err(dev, "Invalid Rx SA selected sa_idx=%d offload_handle=%lu\n", sa_idx, xs->xso.offload_handle); return; } ixgbe_ipsec_set_rx_sa(hw, sa_idx, 0, zerobuf, 0, 0, 0); hash_del_rcu(&rsa->hlist); /* if the IP table entry is referenced by only this SA, * i.e. ref_cnt is only 1, clear the IP table entry as well */ ipi = rsa->iptbl_ind; if (ipsec->ip_tbl[ipi].ref_cnt > 0) { ipsec->ip_tbl[ipi].ref_cnt--; if (!ipsec->ip_tbl[ipi].ref_cnt) { memset(&ipsec->ip_tbl[ipi], 0, sizeof(struct rx_ip_sa)); ixgbe_ipsec_set_rx_ip(hw, ipi, (__force __be32 *)zerobuf); } } memset(rsa, 0, sizeof(struct rx_sa)); ipsec->num_rx_sa--; } else { sa_idx = xs->xso.offload_handle - IXGBE_IPSEC_BASE_TX_INDEX; if (!ipsec->tx_tbl[sa_idx].used) { netdev_err(dev, "Invalid Tx SA selected sa_idx=%d offload_handle=%lu\n", sa_idx, xs->xso.offload_handle); return; } ixgbe_ipsec_set_tx_sa(hw, sa_idx, zerobuf, 0); memset(&ipsec->tx_tbl[sa_idx], 0, sizeof(struct tx_sa)); ipsec->num_tx_sa--; } /* if there are no SAs left, stop the engine to save energy */ if (ipsec->num_rx_sa == 0 && ipsec->num_tx_sa == 0) { adapter->flags2 &= ~IXGBE_FLAG2_IPSEC_ENABLED; ixgbe_ipsec_stop_engine(adapter); } } /** * ixgbe_ipsec_offload_ok - can this packet use the xfrm hw offload * @skb: current data packet * @xs: pointer to transformer state struct **/ static bool ixgbe_ipsec_offload_ok(struct sk_buff *skb, struct xfrm_state *xs) { if (xs->props.family == AF_INET) { /* Offload with IPv4 options is not supported yet */ if (ip_hdr(skb)->ihl != 5) return false; } else { /* Offload with IPv6 extension headers is not support yet */ if (ipv6_ext_hdr(ipv6_hdr(skb)->nexthdr)) return false; } return true; } static const struct xfrmdev_ops ixgbe_xfrmdev_ops = { .xdo_dev_state_add = ixgbe_ipsec_add_sa, .xdo_dev_state_delete = ixgbe_ipsec_del_sa, .xdo_dev_offload_ok = ixgbe_ipsec_offload_ok, }; /** * ixgbe_ipsec_vf_clear - clear the tables of data for a VF * @adapter: board private structure * @vf: VF id to be removed **/ void ixgbe_ipsec_vf_clear(struct ixgbe_adapter *adapter, u32 vf) { struct ixgbe_ipsec *ipsec = adapter->ipsec; int i; if (!ipsec) return; /* search rx sa table */ for (i = 0; i < IXGBE_IPSEC_MAX_SA_COUNT && ipsec->num_rx_sa; i++) { if (!ipsec->rx_tbl[i].used) continue; if (ipsec->rx_tbl[i].mode & IXGBE_RXTXMOD_VF && ipsec->rx_tbl[i].vf == vf) ixgbe_ipsec_del_sa(ipsec->rx_tbl[i].xs); } /* search tx sa table */ for (i = 0; i < IXGBE_IPSEC_MAX_SA_COUNT && ipsec->num_tx_sa; i++) { if (!ipsec->tx_tbl[i].used) continue; if (ipsec->tx_tbl[i].mode & IXGBE_RXTXMOD_VF && ipsec->tx_tbl[i].vf == vf) ixgbe_ipsec_del_sa(ipsec->tx_tbl[i].xs); } } /** * ixgbe_ipsec_vf_add_sa - translate VF request to SA add * @adapter: board private structure * @msgbuf: The message buffer * @vf: the VF index * * Make up a new xs and algorithm info from the data sent by the VF. * We only need to sketch in just enough to set up the HW offload. * Put the resulting offload_handle into the return message to the VF. * * Returns 0 or error value **/ int ixgbe_ipsec_vf_add_sa(struct ixgbe_adapter *adapter, u32 *msgbuf, u32 vf) { struct ixgbe_ipsec *ipsec = adapter->ipsec; struct xfrm_algo_desc *algo; struct sa_mbx_msg *sam; struct xfrm_state *xs; size_t aead_len; u16 sa_idx; u32 pfsa; int err; sam = (struct sa_mbx_msg *)(&msgbuf[1]); if (!adapter->vfinfo[vf].trusted || !(adapter->flags2 & IXGBE_FLAG2_VF_IPSEC_ENABLED)) { e_warn(drv, "VF %d attempted to add an IPsec SA\n", vf); err = -EACCES; goto err_out; } /* Tx IPsec offload doesn't seem to work on this * device, so block these requests for now. */ if (sam->dir != XFRM_DEV_OFFLOAD_IN) { err = -EOPNOTSUPP; goto err_out; } xs = kzalloc(sizeof(*xs), GFP_KERNEL); if (unlikely(!xs)) { err = -ENOMEM; goto err_out; } xs->xso.dir = sam->dir; xs->id.spi = sam->spi; xs->id.proto = sam->proto; xs->props.family = sam->family; if (xs->props.family == AF_INET6) memcpy(&xs->id.daddr.a6, sam->addr, sizeof(xs->id.daddr.a6)); else memcpy(&xs->id.daddr.a4, sam->addr, sizeof(xs->id.daddr.a4)); xs->xso.dev = adapter->netdev; algo = xfrm_aead_get_byname(aes_gcm_name, IXGBE_IPSEC_AUTH_BITS, 1); if (unlikely(!algo)) { err = -ENOENT; goto err_xs; } aead_len = sizeof(*xs->aead) + IXGBE_IPSEC_KEY_BITS / 8; xs->aead = kzalloc(aead_len, GFP_KERNEL); if (unlikely(!xs->aead)) { err = -ENOMEM; goto err_xs; } xs->props.ealgo = algo->desc.sadb_alg_id; xs->geniv = algo->uinfo.aead.geniv; xs->aead->alg_icv_len = IXGBE_IPSEC_AUTH_BITS; xs->aead->alg_key_len = IXGBE_IPSEC_KEY_BITS; memcpy(xs->aead->alg_key, sam->key, sizeof(sam->key)); memcpy(xs->aead->alg_name, aes_gcm_name, sizeof(aes_gcm_name)); /* set up the HW offload */ err = ixgbe_ipsec_add_sa(xs, NULL); if (err) goto err_aead; pfsa = xs->xso.offload_handle; if (pfsa < IXGBE_IPSEC_BASE_TX_INDEX) { sa_idx = pfsa - IXGBE_IPSEC_BASE_RX_INDEX; ipsec->rx_tbl[sa_idx].vf = vf; ipsec->rx_tbl[sa_idx].mode |= IXGBE_RXTXMOD_VF; } else { sa_idx = pfsa - IXGBE_IPSEC_BASE_TX_INDEX; ipsec->tx_tbl[sa_idx].vf = vf; ipsec->tx_tbl[sa_idx].mode |= IXGBE_RXTXMOD_VF; } msgbuf[1] = xs->xso.offload_handle; return 0; err_aead: kfree_sensitive(xs->aead); err_xs: kfree_sensitive(xs); err_out: msgbuf[1] = err; return err; } /** * ixgbe_ipsec_vf_del_sa - translate VF request to SA delete * @adapter: board private structure * @msgbuf: The message buffer * @vf: the VF index * * Given the offload_handle sent by the VF, look for the related SA table * entry and use its xs field to call for a delete of the SA. * * Note: We silently ignore requests to delete entries that are already * set to unused because when a VF is set to "DOWN", the PF first * gets a reset and clears all the VF's entries; then the VF's * XFRM stack sends individual deletes for each entry, which the * reset already removed. In the future it might be good to try to * optimize this so not so many unnecessary delete messages are sent. * * Returns 0 or error value **/ int ixgbe_ipsec_vf_del_sa(struct ixgbe_adapter *adapter, u32 *msgbuf, u32 vf) { struct ixgbe_ipsec *ipsec = adapter->ipsec; struct xfrm_state *xs; u32 pfsa = msgbuf[1]; u16 sa_idx; if (!adapter->vfinfo[vf].trusted) { e_err(drv, "vf %d attempted to delete an SA\n", vf); return -EPERM; } if (pfsa < IXGBE_IPSEC_BASE_TX_INDEX) { struct rx_sa *rsa; sa_idx = pfsa - IXGBE_IPSEC_BASE_RX_INDEX; if (sa_idx >= IXGBE_IPSEC_MAX_SA_COUNT) { e_err(drv, "vf %d SA index %d out of range\n", vf, sa_idx); return -EINVAL; } rsa = &ipsec->rx_tbl[sa_idx]; if (!rsa->used) return 0; if (!(rsa->mode & IXGBE_RXTXMOD_VF) || rsa->vf != vf) { e_err(drv, "vf %d bad Rx SA index %d\n", vf, sa_idx); return -ENOENT; } xs = ipsec->rx_tbl[sa_idx].xs; } else { struct tx_sa *tsa; sa_idx = pfsa - IXGBE_IPSEC_BASE_TX_INDEX; if (sa_idx >= IXGBE_IPSEC_MAX_SA_COUNT) { e_err(drv, "vf %d SA index %d out of range\n", vf, sa_idx); return -EINVAL; } tsa = &ipsec->tx_tbl[sa_idx]; if (!tsa->used) return 0; if (!(tsa->mode & IXGBE_RXTXMOD_VF) || tsa->vf != vf) { e_err(drv, "vf %d bad Tx SA index %d\n", vf, sa_idx); return -ENOENT; } xs = ipsec->tx_tbl[sa_idx].xs; } ixgbe_ipsec_del_sa(xs); /* remove the xs that was made-up in the add request */ kfree_sensitive(xs); return 0; } /** * ixgbe_ipsec_tx - setup Tx flags for ipsec offload * @tx_ring: outgoing context * @first: current data packet * @itd: ipsec Tx data for later use in building context descriptor **/ int ixgbe_ipsec_tx(struct ixgbe_ring *tx_ring, struct ixgbe_tx_buffer *first, struct ixgbe_ipsec_tx_data *itd) { struct ixgbe_adapter *adapter = netdev_priv(tx_ring->netdev); struct ixgbe_ipsec *ipsec = adapter->ipsec; struct xfrm_state *xs; struct sec_path *sp; struct tx_sa *tsa; sp = skb_sec_path(first->skb); if (unlikely(!sp->len)) { netdev_err(tx_ring->netdev, "%s: no xfrm state len = %d\n", __func__, sp->len); return 0; } xs = xfrm_input_state(first->skb); if (unlikely(!xs)) { netdev_err(tx_ring->netdev, "%s: no xfrm_input_state() xs = %p\n", __func__, xs); return 0; } itd->sa_idx = xs->xso.offload_handle - IXGBE_IPSEC_BASE_TX_INDEX; if (unlikely(itd->sa_idx >= IXGBE_IPSEC_MAX_SA_COUNT)) { netdev_err(tx_ring->netdev, "%s: bad sa_idx=%d handle=%lu\n", __func__, itd->sa_idx, xs->xso.offload_handle); return 0; } tsa = &ipsec->tx_tbl[itd->sa_idx]; if (unlikely(!tsa->used)) { netdev_err(tx_ring->netdev, "%s: unused sa_idx=%d\n", __func__, itd->sa_idx); return 0; } first->tx_flags |= IXGBE_TX_FLAGS_IPSEC | IXGBE_TX_FLAGS_CC; if (xs->id.proto == IPPROTO_ESP) { itd->flags |= IXGBE_ADVTXD_TUCMD_IPSEC_TYPE_ESP | IXGBE_ADVTXD_TUCMD_L4T_TCP; if (first->protocol == htons(ETH_P_IP)) itd->flags |= IXGBE_ADVTXD_TUCMD_IPV4; /* The actual trailer length is authlen (16 bytes) plus * 2 bytes for the proto and the padlen values, plus * padlen bytes of padding. This ends up not the same * as the static value found in xs->props.trailer_len (21). * * ... but if we're doing GSO, don't bother as the stack * doesn't add a trailer for those. */ if (!skb_is_gso(first->skb)) { /* The "correct" way to get the auth length would be * to use * authlen = crypto_aead_authsize(xs->data); * but since we know we only have one size to worry * about * we can let the compiler use the constant * and save us a few CPU cycles. */ const int authlen = IXGBE_IPSEC_AUTH_BITS / 8; struct sk_buff *skb = first->skb; u8 padlen; int ret; ret = skb_copy_bits(skb, skb->len - (authlen + 2), &padlen, 1); if (unlikely(ret)) return 0; itd->trailer_len = authlen + 2 + padlen; } } if (tsa->encrypt) itd->flags |= IXGBE_ADVTXD_TUCMD_IPSEC_ENCRYPT_EN; return 1; } /** * ixgbe_ipsec_rx - decode ipsec bits from Rx descriptor * @rx_ring: receiving ring * @rx_desc: receive data descriptor * @skb: current data packet * * Determine if there was an ipsec encapsulation noticed, and if so set up * the resulting status for later in the receive stack. **/ void ixgbe_ipsec_rx(struct ixgbe_ring *rx_ring, union ixgbe_adv_rx_desc *rx_desc, struct sk_buff *skb) { struct ixgbe_adapter *adapter = netdev_priv(rx_ring->netdev); __le16 pkt_info = rx_desc->wb.lower.lo_dword.hs_rss.pkt_info; __le16 ipsec_pkt_types = cpu_to_le16(IXGBE_RXDADV_PKTTYPE_IPSEC_AH | IXGBE_RXDADV_PKTTYPE_IPSEC_ESP); struct ixgbe_ipsec *ipsec = adapter->ipsec; struct xfrm_offload *xo = NULL; struct xfrm_state *xs = NULL; struct ipv6hdr *ip6 = NULL; struct iphdr *ip4 = NULL; struct sec_path *sp; void *daddr; __be32 spi; u8 *c_hdr; u8 proto; /* Find the ip and crypto headers in the data. * We can assume no vlan header in the way, b/c the * hw won't recognize the IPsec packet and anyway the * currently vlan device doesn't support xfrm offload. */ if (pkt_info & cpu_to_le16(IXGBE_RXDADV_PKTTYPE_IPV4)) { ip4 = (struct iphdr *)(skb->data + ETH_HLEN); daddr = &ip4->daddr; c_hdr = (u8 *)ip4 + ip4->ihl * 4; } else if (pkt_info & cpu_to_le16(IXGBE_RXDADV_PKTTYPE_IPV6)) { ip6 = (struct ipv6hdr *)(skb->data + ETH_HLEN); daddr = &ip6->daddr; c_hdr = (u8 *)ip6 + sizeof(struct ipv6hdr); } else { return; } switch (pkt_info & ipsec_pkt_types) { case cpu_to_le16(IXGBE_RXDADV_PKTTYPE_IPSEC_AH): spi = ((struct ip_auth_hdr *)c_hdr)->spi; proto = IPPROTO_AH; break; case cpu_to_le16(IXGBE_RXDADV_PKTTYPE_IPSEC_ESP): spi = ((struct ip_esp_hdr *)c_hdr)->spi; proto = IPPROTO_ESP; break; default: return; } xs = ixgbe_ipsec_find_rx_state(ipsec, daddr, proto, spi, !!ip4); if (unlikely(!xs)) return; sp = secpath_set(skb); if (unlikely(!sp)) return; sp->xvec[sp->len++] = xs; sp->olen++; xo = xfrm_offload(skb); xo->flags = CRYPTO_DONE; xo->status = CRYPTO_SUCCESS; adapter->rx_ipsec++; } /** * ixgbe_init_ipsec_offload - initialize security registers for IPSec operation * @adapter: board private structure **/ void ixgbe_init_ipsec_offload(struct ixgbe_adapter *adapter) { struct ixgbe_hw *hw = &adapter->hw; struct ixgbe_ipsec *ipsec; u32 t_dis, r_dis; size_t size; if (hw->mac.type == ixgbe_mac_82598EB) return; /* If there is no support for either Tx or Rx offload * we should not be advertising support for IPsec. */ t_dis = IXGBE_READ_REG(hw, IXGBE_SECTXSTAT) & IXGBE_SECTXSTAT_SECTX_OFF_DIS; r_dis = IXGBE_READ_REG(hw, IXGBE_SECRXSTAT) & IXGBE_SECRXSTAT_SECRX_OFF_DIS; if (t_dis || r_dis) return; ipsec = kzalloc(sizeof(*ipsec), GFP_KERNEL); if (!ipsec) goto err1; hash_init(ipsec->rx_sa_list); size = sizeof(struct rx_sa) * IXGBE_IPSEC_MAX_SA_COUNT; ipsec->rx_tbl = kzalloc(size, GFP_KERNEL); if (!ipsec->rx_tbl) goto err2; size = sizeof(struct tx_sa) * IXGBE_IPSEC_MAX_SA_COUNT; ipsec->tx_tbl = kzalloc(size, GFP_KERNEL); if (!ipsec->tx_tbl) goto err2; size = sizeof(struct rx_ip_sa) * IXGBE_IPSEC_MAX_RX_IP_COUNT; ipsec->ip_tbl = kzalloc(size, GFP_KERNEL); if (!ipsec->ip_tbl) goto err2; ipsec->num_rx_sa = 0; ipsec->num_tx_sa = 0; adapter->ipsec = ipsec; ixgbe_ipsec_stop_engine(adapter); ixgbe_ipsec_clear_hw_tables(adapter); adapter->netdev->xfrmdev_ops = &ixgbe_xfrmdev_ops; return; err2: kfree(ipsec->ip_tbl); kfree(ipsec->rx_tbl); kfree(ipsec->tx_tbl); kfree(ipsec); err1: netdev_err(adapter->netdev, "Unable to allocate memory for SA tables"); } /** * ixgbe_stop_ipsec_offload - tear down the ipsec offload * @adapter: board private structure **/ void ixgbe_stop_ipsec_offload(struct ixgbe_adapter *adapter) { struct ixgbe_ipsec *ipsec = adapter->ipsec; adapter->ipsec = NULL; if (ipsec) { kfree(ipsec->ip_tbl); kfree(ipsec->rx_tbl); kfree(ipsec->tx_tbl); kfree(ipsec); } }
linux-master
drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c
// SPDX-License-Identifier: GPL-2.0 /* Copyright(c) 1999 - 2018 Intel Corporation. */ /* ethtool support for ixgbe */ #include <linux/interrupt.h> #include <linux/types.h> #include <linux/module.h> #include <linux/slab.h> #include <linux/pci.h> #include <linux/netdevice.h> #include <linux/ethtool.h> #include <linux/vmalloc.h> #include <linux/highmem.h> #include <linux/uaccess.h> #include "ixgbe.h" #include "ixgbe_phy.h" enum {NETDEV_STATS, IXGBE_STATS}; struct ixgbe_stats { char stat_string[ETH_GSTRING_LEN]; int type; int sizeof_stat; int stat_offset; }; #define IXGBE_STAT(m) IXGBE_STATS, \ sizeof(((struct ixgbe_adapter *)0)->m), \ offsetof(struct ixgbe_adapter, m) #define IXGBE_NETDEV_STAT(m) NETDEV_STATS, \ sizeof(((struct rtnl_link_stats64 *)0)->m), \ offsetof(struct rtnl_link_stats64, m) static const struct ixgbe_stats ixgbe_gstrings_stats[] = { {"rx_packets", IXGBE_NETDEV_STAT(rx_packets)}, {"tx_packets", IXGBE_NETDEV_STAT(tx_packets)}, {"rx_bytes", IXGBE_NETDEV_STAT(rx_bytes)}, {"tx_bytes", IXGBE_NETDEV_STAT(tx_bytes)}, {"rx_pkts_nic", IXGBE_STAT(stats.gprc)}, {"tx_pkts_nic", IXGBE_STAT(stats.gptc)}, {"rx_bytes_nic", IXGBE_STAT(stats.gorc)}, {"tx_bytes_nic", IXGBE_STAT(stats.gotc)}, {"lsc_int", IXGBE_STAT(lsc_int)}, {"tx_busy", IXGBE_STAT(tx_busy)}, {"non_eop_descs", IXGBE_STAT(non_eop_descs)}, {"rx_errors", IXGBE_NETDEV_STAT(rx_errors)}, {"tx_errors", IXGBE_NETDEV_STAT(tx_errors)}, {"rx_dropped", IXGBE_NETDEV_STAT(rx_dropped)}, {"tx_dropped", IXGBE_NETDEV_STAT(tx_dropped)}, {"multicast", IXGBE_NETDEV_STAT(multicast)}, {"broadcast", IXGBE_STAT(stats.bprc)}, {"rx_no_buffer_count", IXGBE_STAT(stats.rnbc[0]) }, {"collisions", IXGBE_NETDEV_STAT(collisions)}, {"rx_over_errors", IXGBE_NETDEV_STAT(rx_over_errors)}, {"rx_crc_errors", IXGBE_NETDEV_STAT(rx_crc_errors)}, {"rx_frame_errors", IXGBE_NETDEV_STAT(rx_frame_errors)}, {"hw_rsc_aggregated", IXGBE_STAT(rsc_total_count)}, {"hw_rsc_flushed", IXGBE_STAT(rsc_total_flush)}, {"fdir_match", IXGBE_STAT(stats.fdirmatch)}, {"fdir_miss", IXGBE_STAT(stats.fdirmiss)}, {"fdir_overflow", IXGBE_STAT(fdir_overflow)}, {"rx_fifo_errors", IXGBE_NETDEV_STAT(rx_fifo_errors)}, {"rx_missed_errors", IXGBE_NETDEV_STAT(rx_missed_errors)}, {"tx_aborted_errors", IXGBE_NETDEV_STAT(tx_aborted_errors)}, {"tx_carrier_errors", IXGBE_NETDEV_STAT(tx_carrier_errors)}, {"tx_fifo_errors", IXGBE_NETDEV_STAT(tx_fifo_errors)}, {"tx_heartbeat_errors", IXGBE_NETDEV_STAT(tx_heartbeat_errors)}, {"tx_timeout_count", IXGBE_STAT(tx_timeout_count)}, {"tx_restart_queue", IXGBE_STAT(restart_queue)}, {"rx_length_errors", IXGBE_STAT(stats.rlec)}, {"rx_long_length_errors", IXGBE_STAT(stats.roc)}, {"rx_short_length_errors", IXGBE_STAT(stats.ruc)}, {"tx_flow_control_xon", IXGBE_STAT(stats.lxontxc)}, {"rx_flow_control_xon", IXGBE_STAT(stats.lxonrxc)}, {"tx_flow_control_xoff", IXGBE_STAT(stats.lxofftxc)}, {"rx_flow_control_xoff", IXGBE_STAT(stats.lxoffrxc)}, {"rx_csum_offload_errors", IXGBE_STAT(hw_csum_rx_error)}, {"alloc_rx_page", IXGBE_STAT(alloc_rx_page)}, {"alloc_rx_page_failed", IXGBE_STAT(alloc_rx_page_failed)}, {"alloc_rx_buff_failed", IXGBE_STAT(alloc_rx_buff_failed)}, {"rx_no_dma_resources", IXGBE_STAT(hw_rx_no_dma_resources)}, {"os2bmc_rx_by_bmc", IXGBE_STAT(stats.o2bgptc)}, {"os2bmc_tx_by_bmc", IXGBE_STAT(stats.b2ospc)}, {"os2bmc_tx_by_host", IXGBE_STAT(stats.o2bspc)}, {"os2bmc_rx_by_host", IXGBE_STAT(stats.b2ogprc)}, {"tx_hwtstamp_timeouts", IXGBE_STAT(tx_hwtstamp_timeouts)}, {"tx_hwtstamp_skipped", IXGBE_STAT(tx_hwtstamp_skipped)}, {"rx_hwtstamp_cleared", IXGBE_STAT(rx_hwtstamp_cleared)}, {"tx_ipsec", IXGBE_STAT(tx_ipsec)}, {"rx_ipsec", IXGBE_STAT(rx_ipsec)}, #ifdef IXGBE_FCOE {"fcoe_bad_fccrc", IXGBE_STAT(stats.fccrc)}, {"rx_fcoe_dropped", IXGBE_STAT(stats.fcoerpdc)}, {"rx_fcoe_packets", IXGBE_STAT(stats.fcoeprc)}, {"rx_fcoe_dwords", IXGBE_STAT(stats.fcoedwrc)}, {"fcoe_noddp", IXGBE_STAT(stats.fcoe_noddp)}, {"fcoe_noddp_ext_buff", IXGBE_STAT(stats.fcoe_noddp_ext_buff)}, {"tx_fcoe_packets", IXGBE_STAT(stats.fcoeptc)}, {"tx_fcoe_dwords", IXGBE_STAT(stats.fcoedwtc)}, #endif /* IXGBE_FCOE */ }; /* ixgbe allocates num_tx_queues and num_rx_queues symmetrically so * we set the num_rx_queues to evaluate to num_tx_queues. This is * used because we do not have a good way to get the max number of * rx queues with CONFIG_RPS disabled. */ #define IXGBE_NUM_RX_QUEUES netdev->num_tx_queues #define IXGBE_QUEUE_STATS_LEN ( \ (netdev->num_tx_queues + IXGBE_NUM_RX_QUEUES) * \ (sizeof(struct ixgbe_queue_stats) / sizeof(u64))) #define IXGBE_GLOBAL_STATS_LEN ARRAY_SIZE(ixgbe_gstrings_stats) #define IXGBE_PB_STATS_LEN ( \ (sizeof(((struct ixgbe_adapter *)0)->stats.pxonrxc) + \ sizeof(((struct ixgbe_adapter *)0)->stats.pxontxc) + \ sizeof(((struct ixgbe_adapter *)0)->stats.pxoffrxc) + \ sizeof(((struct ixgbe_adapter *)0)->stats.pxofftxc)) \ / sizeof(u64)) #define IXGBE_STATS_LEN (IXGBE_GLOBAL_STATS_LEN + \ IXGBE_PB_STATS_LEN + \ IXGBE_QUEUE_STATS_LEN) static const char ixgbe_gstrings_test[][ETH_GSTRING_LEN] = { "Register test (offline)", "Eeprom test (offline)", "Interrupt test (offline)", "Loopback test (offline)", "Link test (on/offline)" }; #define IXGBE_TEST_LEN sizeof(ixgbe_gstrings_test) / ETH_GSTRING_LEN static const char ixgbe_priv_flags_strings[][ETH_GSTRING_LEN] = { #define IXGBE_PRIV_FLAGS_LEGACY_RX BIT(0) "legacy-rx", #define IXGBE_PRIV_FLAGS_VF_IPSEC_EN BIT(1) "vf-ipsec", #define IXGBE_PRIV_FLAGS_AUTO_DISABLE_VF BIT(2) "mdd-disable-vf", }; #define IXGBE_PRIV_FLAGS_STR_LEN ARRAY_SIZE(ixgbe_priv_flags_strings) #define ixgbe_isbackplane(type) ((type) == ixgbe_media_type_backplane) static void ixgbe_set_supported_10gtypes(struct ixgbe_hw *hw, struct ethtool_link_ksettings *cmd) { if (!ixgbe_isbackplane(hw->phy.media_type)) { ethtool_link_ksettings_add_link_mode(cmd, supported, 10000baseT_Full); return; } switch (hw->device_id) { case IXGBE_DEV_ID_82598: case IXGBE_DEV_ID_82599_KX4: case IXGBE_DEV_ID_82599_KX4_MEZZ: case IXGBE_DEV_ID_X550EM_X_KX4: ethtool_link_ksettings_add_link_mode (cmd, supported, 10000baseKX4_Full); break; case IXGBE_DEV_ID_82598_BX: case IXGBE_DEV_ID_82599_KR: case IXGBE_DEV_ID_X550EM_X_KR: case IXGBE_DEV_ID_X550EM_X_XFI: ethtool_link_ksettings_add_link_mode (cmd, supported, 10000baseKR_Full); break; default: ethtool_link_ksettings_add_link_mode (cmd, supported, 10000baseKX4_Full); ethtool_link_ksettings_add_link_mode (cmd, supported, 10000baseKR_Full); break; } } static void ixgbe_set_advertising_10gtypes(struct ixgbe_hw *hw, struct ethtool_link_ksettings *cmd) { if (!ixgbe_isbackplane(hw->phy.media_type)) { ethtool_link_ksettings_add_link_mode(cmd, advertising, 10000baseT_Full); return; } switch (hw->device_id) { case IXGBE_DEV_ID_82598: case IXGBE_DEV_ID_82599_KX4: case IXGBE_DEV_ID_82599_KX4_MEZZ: case IXGBE_DEV_ID_X550EM_X_KX4: ethtool_link_ksettings_add_link_mode (cmd, advertising, 10000baseKX4_Full); break; case IXGBE_DEV_ID_82598_BX: case IXGBE_DEV_ID_82599_KR: case IXGBE_DEV_ID_X550EM_X_KR: case IXGBE_DEV_ID_X550EM_X_XFI: ethtool_link_ksettings_add_link_mode (cmd, advertising, 10000baseKR_Full); break; default: ethtool_link_ksettings_add_link_mode (cmd, advertising, 10000baseKX4_Full); ethtool_link_ksettings_add_link_mode (cmd, advertising, 10000baseKR_Full); break; } } static int ixgbe_get_link_ksettings(struct net_device *netdev, struct ethtool_link_ksettings *cmd) { struct ixgbe_adapter *adapter = netdev_priv(netdev); struct ixgbe_hw *hw = &adapter->hw; ixgbe_link_speed supported_link; bool autoneg = false; ethtool_link_ksettings_zero_link_mode(cmd, supported); ethtool_link_ksettings_zero_link_mode(cmd, advertising); hw->mac.ops.get_link_capabilities(hw, &supported_link, &autoneg); /* set the supported link speeds */ if (supported_link & IXGBE_LINK_SPEED_10GB_FULL) { ixgbe_set_supported_10gtypes(hw, cmd); ixgbe_set_advertising_10gtypes(hw, cmd); } if (supported_link & IXGBE_LINK_SPEED_5GB_FULL) ethtool_link_ksettings_add_link_mode(cmd, supported, 5000baseT_Full); if (supported_link & IXGBE_LINK_SPEED_2_5GB_FULL) ethtool_link_ksettings_add_link_mode(cmd, supported, 2500baseT_Full); if (supported_link & IXGBE_LINK_SPEED_1GB_FULL) { if (ixgbe_isbackplane(hw->phy.media_type)) { ethtool_link_ksettings_add_link_mode(cmd, supported, 1000baseKX_Full); ethtool_link_ksettings_add_link_mode(cmd, advertising, 1000baseKX_Full); } else { ethtool_link_ksettings_add_link_mode(cmd, supported, 1000baseT_Full); ethtool_link_ksettings_add_link_mode(cmd, advertising, 1000baseT_Full); } } if (supported_link & IXGBE_LINK_SPEED_100_FULL) { ethtool_link_ksettings_add_link_mode(cmd, supported, 100baseT_Full); ethtool_link_ksettings_add_link_mode(cmd, advertising, 100baseT_Full); } if (supported_link & IXGBE_LINK_SPEED_10_FULL) { ethtool_link_ksettings_add_link_mode(cmd, supported, 10baseT_Full); ethtool_link_ksettings_add_link_mode(cmd, advertising, 10baseT_Full); } /* set the advertised speeds */ if (hw->phy.autoneg_advertised) { ethtool_link_ksettings_zero_link_mode(cmd, advertising); if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_10_FULL) ethtool_link_ksettings_add_link_mode(cmd, advertising, 10baseT_Full); if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_100_FULL) ethtool_link_ksettings_add_link_mode(cmd, advertising, 100baseT_Full); if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_10GB_FULL) ixgbe_set_advertising_10gtypes(hw, cmd); if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_1GB_FULL) { if (ethtool_link_ksettings_test_link_mode (cmd, supported, 1000baseKX_Full)) ethtool_link_ksettings_add_link_mode (cmd, advertising, 1000baseKX_Full); else ethtool_link_ksettings_add_link_mode (cmd, advertising, 1000baseT_Full); } if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_5GB_FULL) ethtool_link_ksettings_add_link_mode(cmd, advertising, 5000baseT_Full); if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_2_5GB_FULL) ethtool_link_ksettings_add_link_mode(cmd, advertising, 2500baseT_Full); } else { if (hw->phy.multispeed_fiber && !autoneg) { if (supported_link & IXGBE_LINK_SPEED_10GB_FULL) ethtool_link_ksettings_add_link_mode (cmd, advertising, 10000baseT_Full); } } if (autoneg) { ethtool_link_ksettings_add_link_mode(cmd, supported, Autoneg); ethtool_link_ksettings_add_link_mode(cmd, advertising, Autoneg); cmd->base.autoneg = AUTONEG_ENABLE; } else cmd->base.autoneg = AUTONEG_DISABLE; /* Determine the remaining settings based on the PHY type. */ switch (adapter->hw.phy.type) { case ixgbe_phy_tn: case ixgbe_phy_aq: case ixgbe_phy_x550em_ext_t: case ixgbe_phy_fw: case ixgbe_phy_cu_unknown: ethtool_link_ksettings_add_link_mode(cmd, supported, TP); ethtool_link_ksettings_add_link_mode(cmd, advertising, TP); cmd->base.port = PORT_TP; break; case ixgbe_phy_qt: ethtool_link_ksettings_add_link_mode(cmd, supported, FIBRE); ethtool_link_ksettings_add_link_mode(cmd, advertising, FIBRE); cmd->base.port = PORT_FIBRE; break; case ixgbe_phy_nl: case ixgbe_phy_sfp_passive_tyco: case ixgbe_phy_sfp_passive_unknown: case ixgbe_phy_sfp_ftl: case ixgbe_phy_sfp_avago: case ixgbe_phy_sfp_intel: case ixgbe_phy_sfp_unknown: case ixgbe_phy_qsfp_passive_unknown: case ixgbe_phy_qsfp_active_unknown: case ixgbe_phy_qsfp_intel: case ixgbe_phy_qsfp_unknown: /* SFP+ devices, further checking needed */ switch (adapter->hw.phy.sfp_type) { case ixgbe_sfp_type_da_cu: case ixgbe_sfp_type_da_cu_core0: case ixgbe_sfp_type_da_cu_core1: ethtool_link_ksettings_add_link_mode(cmd, supported, FIBRE); ethtool_link_ksettings_add_link_mode(cmd, advertising, FIBRE); cmd->base.port = PORT_DA; break; case ixgbe_sfp_type_sr: case ixgbe_sfp_type_lr: case ixgbe_sfp_type_srlr_core0: case ixgbe_sfp_type_srlr_core1: case ixgbe_sfp_type_1g_sx_core0: case ixgbe_sfp_type_1g_sx_core1: case ixgbe_sfp_type_1g_lx_core0: case ixgbe_sfp_type_1g_lx_core1: ethtool_link_ksettings_add_link_mode(cmd, supported, FIBRE); ethtool_link_ksettings_add_link_mode(cmd, advertising, FIBRE); cmd->base.port = PORT_FIBRE; break; case ixgbe_sfp_type_not_present: ethtool_link_ksettings_add_link_mode(cmd, supported, FIBRE); ethtool_link_ksettings_add_link_mode(cmd, advertising, FIBRE); cmd->base.port = PORT_NONE; break; case ixgbe_sfp_type_1g_cu_core0: case ixgbe_sfp_type_1g_cu_core1: ethtool_link_ksettings_add_link_mode(cmd, supported, TP); ethtool_link_ksettings_add_link_mode(cmd, advertising, TP); cmd->base.port = PORT_TP; break; case ixgbe_sfp_type_unknown: default: ethtool_link_ksettings_add_link_mode(cmd, supported, FIBRE); ethtool_link_ksettings_add_link_mode(cmd, advertising, FIBRE); cmd->base.port = PORT_OTHER; break; } break; case ixgbe_phy_xaui: ethtool_link_ksettings_add_link_mode(cmd, supported, FIBRE); ethtool_link_ksettings_add_link_mode(cmd, advertising, FIBRE); cmd->base.port = PORT_NONE; break; case ixgbe_phy_unknown: case ixgbe_phy_generic: case ixgbe_phy_sfp_unsupported: default: ethtool_link_ksettings_add_link_mode(cmd, supported, FIBRE); ethtool_link_ksettings_add_link_mode(cmd, advertising, FIBRE); cmd->base.port = PORT_OTHER; break; } /* Indicate pause support */ ethtool_link_ksettings_add_link_mode(cmd, supported, Pause); switch (hw->fc.requested_mode) { case ixgbe_fc_full: ethtool_link_ksettings_add_link_mode(cmd, advertising, Pause); break; case ixgbe_fc_rx_pause: ethtool_link_ksettings_add_link_mode(cmd, advertising, Pause); ethtool_link_ksettings_add_link_mode(cmd, advertising, Asym_Pause); break; case ixgbe_fc_tx_pause: ethtool_link_ksettings_add_link_mode(cmd, advertising, Asym_Pause); break; default: ethtool_link_ksettings_del_link_mode(cmd, advertising, Pause); ethtool_link_ksettings_del_link_mode(cmd, advertising, Asym_Pause); } if (netif_carrier_ok(netdev)) { switch (adapter->link_speed) { case IXGBE_LINK_SPEED_10GB_FULL: cmd->base.speed = SPEED_10000; break; case IXGBE_LINK_SPEED_5GB_FULL: cmd->base.speed = SPEED_5000; break; case IXGBE_LINK_SPEED_2_5GB_FULL: cmd->base.speed = SPEED_2500; break; case IXGBE_LINK_SPEED_1GB_FULL: cmd->base.speed = SPEED_1000; break; case IXGBE_LINK_SPEED_100_FULL: cmd->base.speed = SPEED_100; break; case IXGBE_LINK_SPEED_10_FULL: cmd->base.speed = SPEED_10; break; default: break; } cmd->base.duplex = DUPLEX_FULL; } else { cmd->base.speed = SPEED_UNKNOWN; cmd->base.duplex = DUPLEX_UNKNOWN; } return 0; } static int ixgbe_set_link_ksettings(struct net_device *netdev, const struct ethtool_link_ksettings *cmd) { struct ixgbe_adapter *adapter = netdev_priv(netdev); struct ixgbe_hw *hw = &adapter->hw; u32 advertised, old; s32 err = 0; if ((hw->phy.media_type == ixgbe_media_type_copper) || (hw->phy.multispeed_fiber)) { /* * this function does not support duplex forcing, but can * limit the advertising of the adapter to the specified speed */ if (!linkmode_subset(cmd->link_modes.advertising, cmd->link_modes.supported)) return -EINVAL; /* only allow one speed at a time if no autoneg */ if (!cmd->base.autoneg && hw->phy.multispeed_fiber) { if (ethtool_link_ksettings_test_link_mode(cmd, advertising, 10000baseT_Full) && ethtool_link_ksettings_test_link_mode(cmd, advertising, 1000baseT_Full)) return -EINVAL; } old = hw->phy.autoneg_advertised; advertised = 0; if (ethtool_link_ksettings_test_link_mode(cmd, advertising, 10000baseT_Full)) advertised |= IXGBE_LINK_SPEED_10GB_FULL; if (ethtool_link_ksettings_test_link_mode(cmd, advertising, 5000baseT_Full)) advertised |= IXGBE_LINK_SPEED_5GB_FULL; if (ethtool_link_ksettings_test_link_mode(cmd, advertising, 2500baseT_Full)) advertised |= IXGBE_LINK_SPEED_2_5GB_FULL; if (ethtool_link_ksettings_test_link_mode(cmd, advertising, 1000baseT_Full)) advertised |= IXGBE_LINK_SPEED_1GB_FULL; if (ethtool_link_ksettings_test_link_mode(cmd, advertising, 100baseT_Full)) advertised |= IXGBE_LINK_SPEED_100_FULL; if (ethtool_link_ksettings_test_link_mode(cmd, advertising, 10baseT_Full)) advertised |= IXGBE_LINK_SPEED_10_FULL; if (old == advertised) return err; /* this sets the link speed and restarts auto-neg */ while (test_and_set_bit(__IXGBE_IN_SFP_INIT, &adapter->state)) usleep_range(1000, 2000); hw->mac.autotry_restart = true; err = hw->mac.ops.setup_link(hw, advertised, true); if (err) { e_info(probe, "setup link failed with code %d\n", err); hw->mac.ops.setup_link(hw, old, true); } clear_bit(__IXGBE_IN_SFP_INIT, &adapter->state); } else { /* in this case we currently only support 10Gb/FULL */ u32 speed = cmd->base.speed; if ((cmd->base.autoneg == AUTONEG_ENABLE) || (!ethtool_link_ksettings_test_link_mode(cmd, advertising, 10000baseT_Full)) || (speed + cmd->base.duplex != SPEED_10000 + DUPLEX_FULL)) return -EINVAL; } return err; } static void ixgbe_get_pause_stats(struct net_device *netdev, struct ethtool_pause_stats *stats) { struct ixgbe_adapter *adapter = netdev_priv(netdev); struct ixgbe_hw_stats *hwstats = &adapter->stats; stats->tx_pause_frames = hwstats->lxontxc + hwstats->lxofftxc; stats->rx_pause_frames = hwstats->lxonrxc + hwstats->lxoffrxc; } static void ixgbe_get_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *pause) { struct ixgbe_adapter *adapter = netdev_priv(netdev); struct ixgbe_hw *hw = &adapter->hw; if (ixgbe_device_supports_autoneg_fc(hw) && !hw->fc.disable_fc_autoneg) pause->autoneg = 1; else pause->autoneg = 0; if (hw->fc.current_mode == ixgbe_fc_rx_pause) { pause->rx_pause = 1; } else if (hw->fc.current_mode == ixgbe_fc_tx_pause) { pause->tx_pause = 1; } else if (hw->fc.current_mode == ixgbe_fc_full) { pause->rx_pause = 1; pause->tx_pause = 1; } } static int ixgbe_set_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *pause) { struct ixgbe_adapter *adapter = netdev_priv(netdev); struct ixgbe_hw *hw = &adapter->hw; struct ixgbe_fc_info fc = hw->fc; /* 82598 does no support link flow control with DCB enabled */ if ((hw->mac.type == ixgbe_mac_82598EB) && (adapter->flags & IXGBE_FLAG_DCB_ENABLED)) return -EINVAL; /* some devices do not support autoneg of link flow control */ if ((pause->autoneg == AUTONEG_ENABLE) && !ixgbe_device_supports_autoneg_fc(hw)) return -EINVAL; fc.disable_fc_autoneg = (pause->autoneg != AUTONEG_ENABLE); if ((pause->rx_pause && pause->tx_pause) || pause->autoneg) fc.requested_mode = ixgbe_fc_full; else if (pause->rx_pause && !pause->tx_pause) fc.requested_mode = ixgbe_fc_rx_pause; else if (!pause->rx_pause && pause->tx_pause) fc.requested_mode = ixgbe_fc_tx_pause; else fc.requested_mode = ixgbe_fc_none; /* if the thing changed then we'll update and use new autoneg */ if (memcmp(&fc, &hw->fc, sizeof(struct ixgbe_fc_info))) { hw->fc = fc; if (netif_running(netdev)) ixgbe_reinit_locked(adapter); else ixgbe_reset(adapter); } return 0; } static u32 ixgbe_get_msglevel(struct net_device *netdev) { struct ixgbe_adapter *adapter = netdev_priv(netdev); return adapter->msg_enable; } static void ixgbe_set_msglevel(struct net_device *netdev, u32 data) { struct ixgbe_adapter *adapter = netdev_priv(netdev); adapter->msg_enable = data; } static int ixgbe_get_regs_len(struct net_device *netdev) { #define IXGBE_REGS_LEN 1145 return IXGBE_REGS_LEN * sizeof(u32); } #define IXGBE_GET_STAT(_A_, _R_) _A_->stats._R_ static void ixgbe_get_regs(struct net_device *netdev, struct ethtool_regs *regs, void *p) { struct ixgbe_adapter *adapter = netdev_priv(netdev); struct ixgbe_hw *hw = &adapter->hw; u32 *regs_buff = p; u8 i; memset(p, 0, IXGBE_REGS_LEN * sizeof(u32)); regs->version = hw->mac.type << 24 | hw->revision_id << 16 | hw->device_id; /* General Registers */ regs_buff[0] = IXGBE_READ_REG(hw, IXGBE_CTRL); regs_buff[1] = IXGBE_READ_REG(hw, IXGBE_STATUS); regs_buff[2] = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT); regs_buff[3] = IXGBE_READ_REG(hw, IXGBE_ESDP); regs_buff[4] = IXGBE_READ_REG(hw, IXGBE_EODSDP); regs_buff[5] = IXGBE_READ_REG(hw, IXGBE_LEDCTL); regs_buff[6] = IXGBE_READ_REG(hw, IXGBE_FRTIMER); regs_buff[7] = IXGBE_READ_REG(hw, IXGBE_TCPTIMER); /* NVM Register */ regs_buff[8] = IXGBE_READ_REG(hw, IXGBE_EEC(hw)); regs_buff[9] = IXGBE_READ_REG(hw, IXGBE_EERD); regs_buff[10] = IXGBE_READ_REG(hw, IXGBE_FLA(hw)); regs_buff[11] = IXGBE_READ_REG(hw, IXGBE_EEMNGCTL); regs_buff[12] = IXGBE_READ_REG(hw, IXGBE_EEMNGDATA); regs_buff[13] = IXGBE_READ_REG(hw, IXGBE_FLMNGCTL); regs_buff[14] = IXGBE_READ_REG(hw, IXGBE_FLMNGDATA); regs_buff[15] = IXGBE_READ_REG(hw, IXGBE_FLMNGCNT); regs_buff[16] = IXGBE_READ_REG(hw, IXGBE_FLOP); regs_buff[17] = IXGBE_READ_REG(hw, IXGBE_GRC(hw)); /* Interrupt */ /* don't read EICR because it can clear interrupt causes, instead * read EICS which is a shadow but doesn't clear EICR */ regs_buff[18] = IXGBE_READ_REG(hw, IXGBE_EICS); regs_buff[19] = IXGBE_READ_REG(hw, IXGBE_EICS); regs_buff[20] = IXGBE_READ_REG(hw, IXGBE_EIMS); regs_buff[21] = IXGBE_READ_REG(hw, IXGBE_EIMC); regs_buff[22] = IXGBE_READ_REG(hw, IXGBE_EIAC); regs_buff[23] = IXGBE_READ_REG(hw, IXGBE_EIAM); regs_buff[24] = IXGBE_READ_REG(hw, IXGBE_EITR(0)); regs_buff[25] = IXGBE_READ_REG(hw, IXGBE_IVAR(0)); regs_buff[26] = IXGBE_READ_REG(hw, IXGBE_MSIXT); regs_buff[27] = IXGBE_READ_REG(hw, IXGBE_MSIXPBA); regs_buff[28] = IXGBE_READ_REG(hw, IXGBE_PBACL(0)); regs_buff[29] = IXGBE_READ_REG(hw, IXGBE_GPIE); /* Flow Control */ regs_buff[30] = IXGBE_READ_REG(hw, IXGBE_PFCTOP); for (i = 0; i < 4; i++) regs_buff[31 + i] = IXGBE_READ_REG(hw, IXGBE_FCTTV(i)); for (i = 0; i < 8; i++) { switch (hw->mac.type) { case ixgbe_mac_82598EB: regs_buff[35 + i] = IXGBE_READ_REG(hw, IXGBE_FCRTL(i)); regs_buff[43 + i] = IXGBE_READ_REG(hw, IXGBE_FCRTH(i)); break; case ixgbe_mac_82599EB: case ixgbe_mac_X540: case ixgbe_mac_X550: case ixgbe_mac_X550EM_x: case ixgbe_mac_x550em_a: regs_buff[35 + i] = IXGBE_READ_REG(hw, IXGBE_FCRTL_82599(i)); regs_buff[43 + i] = IXGBE_READ_REG(hw, IXGBE_FCRTH_82599(i)); break; default: break; } } regs_buff[51] = IXGBE_READ_REG(hw, IXGBE_FCRTV); regs_buff[52] = IXGBE_READ_REG(hw, IXGBE_TFCS); /* Receive DMA */ for (i = 0; i < 64; i++) regs_buff[53 + i] = IXGBE_READ_REG(hw, IXGBE_RDBAL(i)); for (i = 0; i < 64; i++) regs_buff[117 + i] = IXGBE_READ_REG(hw, IXGBE_RDBAH(i)); for (i = 0; i < 64; i++) regs_buff[181 + i] = IXGBE_READ_REG(hw, IXGBE_RDLEN(i)); for (i = 0; i < 64; i++) regs_buff[245 + i] = IXGBE_READ_REG(hw, IXGBE_RDH(i)); for (i = 0; i < 64; i++) regs_buff[309 + i] = IXGBE_READ_REG(hw, IXGBE_RDT(i)); for (i = 0; i < 64; i++) regs_buff[373 + i] = IXGBE_READ_REG(hw, IXGBE_RXDCTL(i)); for (i = 0; i < 16; i++) regs_buff[437 + i] = IXGBE_READ_REG(hw, IXGBE_SRRCTL(i)); for (i = 0; i < 16; i++) regs_buff[453 + i] = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i)); regs_buff[469] = IXGBE_READ_REG(hw, IXGBE_RDRXCTL); for (i = 0; i < 8; i++) regs_buff[470 + i] = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(i)); regs_buff[478] = IXGBE_READ_REG(hw, IXGBE_RXCTRL); regs_buff[479] = IXGBE_READ_REG(hw, IXGBE_DROPEN); /* Receive */ regs_buff[480] = IXGBE_READ_REG(hw, IXGBE_RXCSUM); regs_buff[481] = IXGBE_READ_REG(hw, IXGBE_RFCTL); for (i = 0; i < 16; i++) regs_buff[482 + i] = IXGBE_READ_REG(hw, IXGBE_RAL(i)); for (i = 0; i < 16; i++) regs_buff[498 + i] = IXGBE_READ_REG(hw, IXGBE_RAH(i)); regs_buff[514] = IXGBE_READ_REG(hw, IXGBE_PSRTYPE(0)); regs_buff[515] = IXGBE_READ_REG(hw, IXGBE_FCTRL); regs_buff[516] = IXGBE_READ_REG(hw, IXGBE_VLNCTRL); regs_buff[517] = IXGBE_READ_REG(hw, IXGBE_MCSTCTRL); regs_buff[518] = IXGBE_READ_REG(hw, IXGBE_MRQC); regs_buff[519] = IXGBE_READ_REG(hw, IXGBE_VMD_CTL); for (i = 0; i < 8; i++) regs_buff[520 + i] = IXGBE_READ_REG(hw, IXGBE_IMIR(i)); for (i = 0; i < 8; i++) regs_buff[528 + i] = IXGBE_READ_REG(hw, IXGBE_IMIREXT(i)); regs_buff[536] = IXGBE_READ_REG(hw, IXGBE_IMIRVP); /* Transmit */ for (i = 0; i < 32; i++) regs_buff[537 + i] = IXGBE_READ_REG(hw, IXGBE_TDBAL(i)); for (i = 0; i < 32; i++) regs_buff[569 + i] = IXGBE_READ_REG(hw, IXGBE_TDBAH(i)); for (i = 0; i < 32; i++) regs_buff[601 + i] = IXGBE_READ_REG(hw, IXGBE_TDLEN(i)); for (i = 0; i < 32; i++) regs_buff[633 + i] = IXGBE_READ_REG(hw, IXGBE_TDH(i)); for (i = 0; i < 32; i++) regs_buff[665 + i] = IXGBE_READ_REG(hw, IXGBE_TDT(i)); for (i = 0; i < 32; i++) regs_buff[697 + i] = IXGBE_READ_REG(hw, IXGBE_TXDCTL(i)); for (i = 0; i < 32; i++) regs_buff[729 + i] = IXGBE_READ_REG(hw, IXGBE_TDWBAL(i)); for (i = 0; i < 32; i++) regs_buff[761 + i] = IXGBE_READ_REG(hw, IXGBE_TDWBAH(i)); regs_buff[793] = IXGBE_READ_REG(hw, IXGBE_DTXCTL); for (i = 0; i < 16; i++) regs_buff[794 + i] = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(i)); regs_buff[810] = IXGBE_READ_REG(hw, IXGBE_TIPG); for (i = 0; i < 8; i++) regs_buff[811 + i] = IXGBE_READ_REG(hw, IXGBE_TXPBSIZE(i)); regs_buff[819] = IXGBE_READ_REG(hw, IXGBE_MNGTXMAP); /* Wake Up */ regs_buff[820] = IXGBE_READ_REG(hw, IXGBE_WUC); regs_buff[821] = IXGBE_READ_REG(hw, IXGBE_WUFC); regs_buff[822] = IXGBE_READ_REG(hw, IXGBE_WUS); regs_buff[823] = IXGBE_READ_REG(hw, IXGBE_IPAV); regs_buff[824] = IXGBE_READ_REG(hw, IXGBE_IP4AT); regs_buff[825] = IXGBE_READ_REG(hw, IXGBE_IP6AT); regs_buff[826] = IXGBE_READ_REG(hw, IXGBE_WUPL); regs_buff[827] = IXGBE_READ_REG(hw, IXGBE_WUPM); regs_buff[828] = IXGBE_READ_REG(hw, IXGBE_FHFT(0)); /* DCB */ regs_buff[829] = IXGBE_READ_REG(hw, IXGBE_RMCS); /* same as FCCFG */ regs_buff[831] = IXGBE_READ_REG(hw, IXGBE_PDPMCS); /* same as RTTPCS */ switch (hw->mac.type) { case ixgbe_mac_82598EB: regs_buff[830] = IXGBE_READ_REG(hw, IXGBE_DPMCS); regs_buff[832] = IXGBE_READ_REG(hw, IXGBE_RUPPBMR); for (i = 0; i < 8; i++) regs_buff[833 + i] = IXGBE_READ_REG(hw, IXGBE_RT2CR(i)); for (i = 0; i < 8; i++) regs_buff[841 + i] = IXGBE_READ_REG(hw, IXGBE_RT2SR(i)); for (i = 0; i < 8; i++) regs_buff[849 + i] = IXGBE_READ_REG(hw, IXGBE_TDTQ2TCCR(i)); for (i = 0; i < 8; i++) regs_buff[857 + i] = IXGBE_READ_REG(hw, IXGBE_TDTQ2TCSR(i)); break; case ixgbe_mac_82599EB: case ixgbe_mac_X540: case ixgbe_mac_X550: case ixgbe_mac_X550EM_x: case ixgbe_mac_x550em_a: regs_buff[830] = IXGBE_READ_REG(hw, IXGBE_RTTDCS); regs_buff[832] = IXGBE_READ_REG(hw, IXGBE_RTRPCS); for (i = 0; i < 8; i++) regs_buff[833 + i] = IXGBE_READ_REG(hw, IXGBE_RTRPT4C(i)); for (i = 0; i < 8; i++) regs_buff[841 + i] = IXGBE_READ_REG(hw, IXGBE_RTRPT4S(i)); for (i = 0; i < 8; i++) regs_buff[849 + i] = IXGBE_READ_REG(hw, IXGBE_RTTDT2C(i)); for (i = 0; i < 8; i++) regs_buff[857 + i] = IXGBE_READ_REG(hw, IXGBE_RTTDT2S(i)); break; default: break; } for (i = 0; i < 8; i++) regs_buff[865 + i] = IXGBE_READ_REG(hw, IXGBE_TDPT2TCCR(i)); /* same as RTTPT2C */ for (i = 0; i < 8; i++) regs_buff[873 + i] = IXGBE_READ_REG(hw, IXGBE_TDPT2TCSR(i)); /* same as RTTPT2S */ /* Statistics */ regs_buff[881] = IXGBE_GET_STAT(adapter, crcerrs); regs_buff[882] = IXGBE_GET_STAT(adapter, illerrc); regs_buff[883] = IXGBE_GET_STAT(adapter, errbc); regs_buff[884] = IXGBE_GET_STAT(adapter, mspdc); for (i = 0; i < 8; i++) regs_buff[885 + i] = IXGBE_GET_STAT(adapter, mpc[i]); regs_buff[893] = IXGBE_GET_STAT(adapter, mlfc); regs_buff[894] = IXGBE_GET_STAT(adapter, mrfc); regs_buff[895] = IXGBE_GET_STAT(adapter, rlec); regs_buff[896] = IXGBE_GET_STAT(adapter, lxontxc); regs_buff[897] = IXGBE_GET_STAT(adapter, lxonrxc); regs_buff[898] = IXGBE_GET_STAT(adapter, lxofftxc); regs_buff[899] = IXGBE_GET_STAT(adapter, lxoffrxc); for (i = 0; i < 8; i++) regs_buff[900 + i] = IXGBE_GET_STAT(adapter, pxontxc[i]); for (i = 0; i < 8; i++) regs_buff[908 + i] = IXGBE_GET_STAT(adapter, pxonrxc[i]); for (i = 0; i < 8; i++) regs_buff[916 + i] = IXGBE_GET_STAT(adapter, pxofftxc[i]); for (i = 0; i < 8; i++) regs_buff[924 + i] = IXGBE_GET_STAT(adapter, pxoffrxc[i]); regs_buff[932] = IXGBE_GET_STAT(adapter, prc64); regs_buff[933] = IXGBE_GET_STAT(adapter, prc127); regs_buff[934] = IXGBE_GET_STAT(adapter, prc255); regs_buff[935] = IXGBE_GET_STAT(adapter, prc511); regs_buff[936] = IXGBE_GET_STAT(adapter, prc1023); regs_buff[937] = IXGBE_GET_STAT(adapter, prc1522); regs_buff[938] = IXGBE_GET_STAT(adapter, gprc); regs_buff[939] = IXGBE_GET_STAT(adapter, bprc); regs_buff[940] = IXGBE_GET_STAT(adapter, mprc); regs_buff[941] = IXGBE_GET_STAT(adapter, gptc); regs_buff[942] = (u32)IXGBE_GET_STAT(adapter, gorc); regs_buff[943] = (u32)(IXGBE_GET_STAT(adapter, gorc) >> 32); regs_buff[944] = (u32)IXGBE_GET_STAT(adapter, gotc); regs_buff[945] = (u32)(IXGBE_GET_STAT(adapter, gotc) >> 32); for (i = 0; i < 8; i++) regs_buff[946 + i] = IXGBE_GET_STAT(adapter, rnbc[i]); regs_buff[954] = IXGBE_GET_STAT(adapter, ruc); regs_buff[955] = IXGBE_GET_STAT(adapter, rfc); regs_buff[956] = IXGBE_GET_STAT(adapter, roc); regs_buff[957] = IXGBE_GET_STAT(adapter, rjc); regs_buff[958] = IXGBE_GET_STAT(adapter, mngprc); regs_buff[959] = IXGBE_GET_STAT(adapter, mngpdc); regs_buff[960] = IXGBE_GET_STAT(adapter, mngptc); regs_buff[961] = (u32)IXGBE_GET_STAT(adapter, tor); regs_buff[962] = (u32)(IXGBE_GET_STAT(adapter, tor) >> 32); regs_buff[963] = IXGBE_GET_STAT(adapter, tpr); regs_buff[964] = IXGBE_GET_STAT(adapter, tpt); regs_buff[965] = IXGBE_GET_STAT(adapter, ptc64); regs_buff[966] = IXGBE_GET_STAT(adapter, ptc127); regs_buff[967] = IXGBE_GET_STAT(adapter, ptc255); regs_buff[968] = IXGBE_GET_STAT(adapter, ptc511); regs_buff[969] = IXGBE_GET_STAT(adapter, ptc1023); regs_buff[970] = IXGBE_GET_STAT(adapter, ptc1522); regs_buff[971] = IXGBE_GET_STAT(adapter, mptc); regs_buff[972] = IXGBE_GET_STAT(adapter, bptc); regs_buff[973] = IXGBE_GET_STAT(adapter, xec); for (i = 0; i < 16; i++) regs_buff[974 + i] = IXGBE_GET_STAT(adapter, qprc[i]); for (i = 0; i < 16; i++) regs_buff[990 + i] = IXGBE_GET_STAT(adapter, qptc[i]); for (i = 0; i < 16; i++) regs_buff[1006 + i] = IXGBE_GET_STAT(adapter, qbrc[i]); for (i = 0; i < 16; i++) regs_buff[1022 + i] = IXGBE_GET_STAT(adapter, qbtc[i]); /* MAC */ regs_buff[1038] = IXGBE_READ_REG(hw, IXGBE_PCS1GCFIG); regs_buff[1039] = IXGBE_READ_REG(hw, IXGBE_PCS1GLCTL); regs_buff[1040] = IXGBE_READ_REG(hw, IXGBE_PCS1GLSTA); regs_buff[1041] = IXGBE_READ_REG(hw, IXGBE_PCS1GDBG0); regs_buff[1042] = IXGBE_READ_REG(hw, IXGBE_PCS1GDBG1); regs_buff[1043] = IXGBE_READ_REG(hw, IXGBE_PCS1GANA); regs_buff[1044] = IXGBE_READ_REG(hw, IXGBE_PCS1GANLP); regs_buff[1045] = IXGBE_READ_REG(hw, IXGBE_PCS1GANNP); regs_buff[1046] = IXGBE_READ_REG(hw, IXGBE_PCS1GANLPNP); regs_buff[1047] = IXGBE_READ_REG(hw, IXGBE_HLREG0); regs_buff[1048] = IXGBE_READ_REG(hw, IXGBE_HLREG1); regs_buff[1049] = IXGBE_READ_REG(hw, IXGBE_PAP); regs_buff[1050] = IXGBE_READ_REG(hw, IXGBE_MACA); regs_buff[1051] = IXGBE_READ_REG(hw, IXGBE_APAE); regs_buff[1052] = IXGBE_READ_REG(hw, IXGBE_ARD); regs_buff[1053] = IXGBE_READ_REG(hw, IXGBE_AIS); regs_buff[1054] = IXGBE_READ_REG(hw, IXGBE_MSCA); regs_buff[1055] = IXGBE_READ_REG(hw, IXGBE_MSRWD); regs_buff[1056] = IXGBE_READ_REG(hw, IXGBE_MLADD); regs_buff[1057] = IXGBE_READ_REG(hw, IXGBE_MHADD); regs_buff[1058] = IXGBE_READ_REG(hw, IXGBE_TREG); regs_buff[1059] = IXGBE_READ_REG(hw, IXGBE_PCSS1); regs_buff[1060] = IXGBE_READ_REG(hw, IXGBE_PCSS2); regs_buff[1061] = IXGBE_READ_REG(hw, IXGBE_XPCSS); regs_buff[1062] = IXGBE_READ_REG(hw, IXGBE_SERDESC); regs_buff[1063] = IXGBE_READ_REG(hw, IXGBE_MACS); regs_buff[1064] = IXGBE_READ_REG(hw, IXGBE_AUTOC); regs_buff[1065] = IXGBE_READ_REG(hw, IXGBE_LINKS); regs_buff[1066] = IXGBE_READ_REG(hw, IXGBE_AUTOC2); regs_buff[1067] = IXGBE_READ_REG(hw, IXGBE_AUTOC3); regs_buff[1068] = IXGBE_READ_REG(hw, IXGBE_ANLP1); regs_buff[1069] = IXGBE_READ_REG(hw, IXGBE_ANLP2); regs_buff[1070] = IXGBE_READ_REG(hw, IXGBE_ATLASCTL); /* Diagnostic */ regs_buff[1071] = IXGBE_READ_REG(hw, IXGBE_RDSTATCTL); for (i = 0; i < 8; i++) regs_buff[1072 + i] = IXGBE_READ_REG(hw, IXGBE_RDSTAT(i)); regs_buff[1080] = IXGBE_READ_REG(hw, IXGBE_RDHMPN); for (i = 0; i < 4; i++) regs_buff[1081 + i] = IXGBE_READ_REG(hw, IXGBE_RIC_DW(i)); regs_buff[1085] = IXGBE_READ_REG(hw, IXGBE_RDPROBE); regs_buff[1086] = IXGBE_READ_REG(hw, IXGBE_TDSTATCTL); for (i = 0; i < 8; i++) regs_buff[1087 + i] = IXGBE_READ_REG(hw, IXGBE_TDSTAT(i)); regs_buff[1095] = IXGBE_READ_REG(hw, IXGBE_TDHMPN); for (i = 0; i < 4; i++) regs_buff[1096 + i] = IXGBE_READ_REG(hw, IXGBE_TIC_DW(i)); regs_buff[1100] = IXGBE_READ_REG(hw, IXGBE_TDPROBE); regs_buff[1101] = IXGBE_READ_REG(hw, IXGBE_TXBUFCTRL); for (i = 0; i < 4; i++) regs_buff[1102 + i] = IXGBE_READ_REG(hw, IXGBE_TXBUFDATA(i)); regs_buff[1106] = IXGBE_READ_REG(hw, IXGBE_RXBUFCTRL); for (i = 0; i < 4; i++) regs_buff[1107 + i] = IXGBE_READ_REG(hw, IXGBE_RXBUFDATA(i)); for (i = 0; i < 8; i++) regs_buff[1111 + i] = IXGBE_READ_REG(hw, IXGBE_PCIE_DIAG(i)); regs_buff[1119] = IXGBE_READ_REG(hw, IXGBE_RFVAL); regs_buff[1120] = IXGBE_READ_REG(hw, IXGBE_MDFTC1); regs_buff[1121] = IXGBE_READ_REG(hw, IXGBE_MDFTC2); regs_buff[1122] = IXGBE_READ_REG(hw, IXGBE_MDFTFIFO1); regs_buff[1123] = IXGBE_READ_REG(hw, IXGBE_MDFTFIFO2); regs_buff[1124] = IXGBE_READ_REG(hw, IXGBE_MDFTS); regs_buff[1125] = IXGBE_READ_REG(hw, IXGBE_PCIEECCCTL); regs_buff[1126] = IXGBE_READ_REG(hw, IXGBE_PBTXECC); regs_buff[1127] = IXGBE_READ_REG(hw, IXGBE_PBRXECC); /* 82599 X540 specific registers */ regs_buff[1128] = IXGBE_READ_REG(hw, IXGBE_MFLCN); /* 82599 X540 specific DCB registers */ regs_buff[1129] = IXGBE_READ_REG(hw, IXGBE_RTRUP2TC); regs_buff[1130] = IXGBE_READ_REG(hw, IXGBE_RTTUP2TC); for (i = 0; i < 4; i++) regs_buff[1131 + i] = IXGBE_READ_REG(hw, IXGBE_TXLLQ(i)); regs_buff[1135] = IXGBE_READ_REG(hw, IXGBE_RTTBCNRM); /* same as RTTQCNRM */ regs_buff[1136] = IXGBE_READ_REG(hw, IXGBE_RTTBCNRD); /* same as RTTQCNRR */ /* X540 specific DCB registers */ regs_buff[1137] = IXGBE_READ_REG(hw, IXGBE_RTTQCNCR); regs_buff[1138] = IXGBE_READ_REG(hw, IXGBE_RTTQCNTG); /* Security config registers */ regs_buff[1139] = IXGBE_READ_REG(hw, IXGBE_SECTXCTRL); regs_buff[1140] = IXGBE_READ_REG(hw, IXGBE_SECTXSTAT); regs_buff[1141] = IXGBE_READ_REG(hw, IXGBE_SECTXBUFFAF); regs_buff[1142] = IXGBE_READ_REG(hw, IXGBE_SECTXMINIFG); regs_buff[1143] = IXGBE_READ_REG(hw, IXGBE_SECRXCTRL); regs_buff[1144] = IXGBE_READ_REG(hw, IXGBE_SECRXSTAT); } static int ixgbe_get_eeprom_len(struct net_device *netdev) { struct ixgbe_adapter *adapter = netdev_priv(netdev); return adapter->hw.eeprom.word_size * 2; } static int ixgbe_get_eeprom(struct net_device *netdev, struct ethtool_eeprom *eeprom, u8 *bytes) { struct ixgbe_adapter *adapter = netdev_priv(netdev); struct ixgbe_hw *hw = &adapter->hw; u16 *eeprom_buff; int first_word, last_word, eeprom_len; int ret_val = 0; u16 i; if (eeprom->len == 0) return -EINVAL; eeprom->magic = hw->vendor_id | (hw->device_id << 16); first_word = eeprom->offset >> 1; last_word = (eeprom->offset + eeprom->len - 1) >> 1; eeprom_len = last_word - first_word + 1; eeprom_buff = kmalloc_array(eeprom_len, sizeof(u16), GFP_KERNEL); if (!eeprom_buff) return -ENOMEM; ret_val = hw->eeprom.ops.read_buffer(hw, first_word, eeprom_len, eeprom_buff); /* Device's eeprom is always little-endian, word addressable */ for (i = 0; i < eeprom_len; i++) le16_to_cpus(&eeprom_buff[i]); memcpy(bytes, (u8 *)eeprom_buff + (eeprom->offset & 1), eeprom->len); kfree(eeprom_buff); return ret_val; } static int ixgbe_set_eeprom(struct net_device *netdev, struct ethtool_eeprom *eeprom, u8 *bytes) { struct ixgbe_adapter *adapter = netdev_priv(netdev); struct ixgbe_hw *hw = &adapter->hw; u16 *eeprom_buff; void *ptr; int max_len, first_word, last_word, ret_val = 0; u16 i; if (eeprom->len == 0) return -EINVAL; if (eeprom->magic != (hw->vendor_id | (hw->device_id << 16))) return -EINVAL; max_len = hw->eeprom.word_size * 2; first_word = eeprom->offset >> 1; last_word = (eeprom->offset + eeprom->len - 1) >> 1; eeprom_buff = kmalloc(max_len, GFP_KERNEL); if (!eeprom_buff) return -ENOMEM; ptr = eeprom_buff; if (eeprom->offset & 1) { /* * need read/modify/write of first changed EEPROM word * only the second byte of the word is being modified */ ret_val = hw->eeprom.ops.read(hw, first_word, &eeprom_buff[0]); if (ret_val) goto err; ptr++; } if ((eeprom->offset + eeprom->len) & 1) { /* * need read/modify/write of last changed EEPROM word * only the first byte of the word is being modified */ ret_val = hw->eeprom.ops.read(hw, last_word, &eeprom_buff[last_word - first_word]); if (ret_val) goto err; } /* Device's eeprom is always little-endian, word addressable */ for (i = 0; i < last_word - first_word + 1; i++) le16_to_cpus(&eeprom_buff[i]); memcpy(ptr, bytes, eeprom->len); for (i = 0; i < last_word - first_word + 1; i++) cpu_to_le16s(&eeprom_buff[i]); ret_val = hw->eeprom.ops.write_buffer(hw, first_word, last_word - first_word + 1, eeprom_buff); /* Update the checksum */ if (ret_val == 0) hw->eeprom.ops.update_checksum(hw); err: kfree(eeprom_buff); return ret_val; } static void ixgbe_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo) { struct ixgbe_adapter *adapter = netdev_priv(netdev); strscpy(drvinfo->driver, ixgbe_driver_name, sizeof(drvinfo->driver)); strscpy(drvinfo->fw_version, adapter->eeprom_id, sizeof(drvinfo->fw_version)); strscpy(drvinfo->bus_info, pci_name(adapter->pdev), sizeof(drvinfo->bus_info)); drvinfo->n_priv_flags = IXGBE_PRIV_FLAGS_STR_LEN; } static u32 ixgbe_get_max_rxd(struct ixgbe_adapter *adapter) { switch (adapter->hw.mac.type) { case ixgbe_mac_82598EB: return IXGBE_MAX_RXD_82598; case ixgbe_mac_82599EB: return IXGBE_MAX_RXD_82599; case ixgbe_mac_X540: return IXGBE_MAX_RXD_X540; case ixgbe_mac_X550: case ixgbe_mac_X550EM_x: case ixgbe_mac_x550em_a: return IXGBE_MAX_RXD_X550; default: return IXGBE_MAX_RXD_82598; } } static u32 ixgbe_get_max_txd(struct ixgbe_adapter *adapter) { switch (adapter->hw.mac.type) { case ixgbe_mac_82598EB: return IXGBE_MAX_TXD_82598; case ixgbe_mac_82599EB: return IXGBE_MAX_TXD_82599; case ixgbe_mac_X540: return IXGBE_MAX_TXD_X540; case ixgbe_mac_X550: case ixgbe_mac_X550EM_x: case ixgbe_mac_x550em_a: return IXGBE_MAX_TXD_X550; default: return IXGBE_MAX_TXD_82598; } } static void ixgbe_get_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring, struct kernel_ethtool_ringparam *kernel_ring, struct netlink_ext_ack *extack) { struct ixgbe_adapter *adapter = netdev_priv(netdev); struct ixgbe_ring *tx_ring = adapter->tx_ring[0]; struct ixgbe_ring *rx_ring = adapter->rx_ring[0]; ring->rx_max_pending = ixgbe_get_max_rxd(adapter); ring->tx_max_pending = ixgbe_get_max_txd(adapter); ring->rx_pending = rx_ring->count; ring->tx_pending = tx_ring->count; } static int ixgbe_set_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring, struct kernel_ethtool_ringparam *kernel_ring, struct netlink_ext_ack *extack) { struct ixgbe_adapter *adapter = netdev_priv(netdev); struct ixgbe_ring *temp_ring; int i, j, err = 0; u32 new_rx_count, new_tx_count; if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending)) return -EINVAL; new_tx_count = clamp_t(u32, ring->tx_pending, IXGBE_MIN_TXD, ixgbe_get_max_txd(adapter)); new_tx_count = ALIGN(new_tx_count, IXGBE_REQ_TX_DESCRIPTOR_MULTIPLE); new_rx_count = clamp_t(u32, ring->rx_pending, IXGBE_MIN_RXD, ixgbe_get_max_rxd(adapter)); new_rx_count = ALIGN(new_rx_count, IXGBE_REQ_RX_DESCRIPTOR_MULTIPLE); if ((new_tx_count == adapter->tx_ring_count) && (new_rx_count == adapter->rx_ring_count)) { /* nothing to do */ return 0; } while (test_and_set_bit(__IXGBE_RESETTING, &adapter->state)) usleep_range(1000, 2000); if (!netif_running(adapter->netdev)) { for (i = 0; i < adapter->num_tx_queues; i++) adapter->tx_ring[i]->count = new_tx_count; for (i = 0; i < adapter->num_xdp_queues; i++) adapter->xdp_ring[i]->count = new_tx_count; for (i = 0; i < adapter->num_rx_queues; i++) adapter->rx_ring[i]->count = new_rx_count; adapter->tx_ring_count = new_tx_count; adapter->xdp_ring_count = new_tx_count; adapter->rx_ring_count = new_rx_count; goto clear_reset; } /* allocate temporary buffer to store rings in */ i = max_t(int, adapter->num_tx_queues + adapter->num_xdp_queues, adapter->num_rx_queues); temp_ring = vmalloc(array_size(i, sizeof(struct ixgbe_ring))); if (!temp_ring) { err = -ENOMEM; goto clear_reset; } ixgbe_down(adapter); /* * Setup new Tx resources and free the old Tx resources in that order. * We can then assign the new resources to the rings via a memcpy. * The advantage to this approach is that we are guaranteed to still * have resources even in the case of an allocation failure. */ if (new_tx_count != adapter->tx_ring_count) { for (i = 0; i < adapter->num_tx_queues; i++) { memcpy(&temp_ring[i], adapter->tx_ring[i], sizeof(struct ixgbe_ring)); temp_ring[i].count = new_tx_count; err = ixgbe_setup_tx_resources(&temp_ring[i]); if (err) { while (i) { i--; ixgbe_free_tx_resources(&temp_ring[i]); } goto err_setup; } } for (j = 0; j < adapter->num_xdp_queues; j++, i++) { memcpy(&temp_ring[i], adapter->xdp_ring[j], sizeof(struct ixgbe_ring)); temp_ring[i].count = new_tx_count; err = ixgbe_setup_tx_resources(&temp_ring[i]); if (err) { while (i) { i--; ixgbe_free_tx_resources(&temp_ring[i]); } goto err_setup; } } for (i = 0; i < adapter->num_tx_queues; i++) { ixgbe_free_tx_resources(adapter->tx_ring[i]); memcpy(adapter->tx_ring[i], &temp_ring[i], sizeof(struct ixgbe_ring)); } for (j = 0; j < adapter->num_xdp_queues; j++, i++) { ixgbe_free_tx_resources(adapter->xdp_ring[j]); memcpy(adapter->xdp_ring[j], &temp_ring[i], sizeof(struct ixgbe_ring)); } adapter->tx_ring_count = new_tx_count; } /* Repeat the process for the Rx rings if needed */ if (new_rx_count != adapter->rx_ring_count) { for (i = 0; i < adapter->num_rx_queues; i++) { memcpy(&temp_ring[i], adapter->rx_ring[i], sizeof(struct ixgbe_ring)); /* Clear copied XDP RX-queue info */ memset(&temp_ring[i].xdp_rxq, 0, sizeof(temp_ring[i].xdp_rxq)); temp_ring[i].count = new_rx_count; err = ixgbe_setup_rx_resources(adapter, &temp_ring[i]); if (err) { while (i) { i--; ixgbe_free_rx_resources(&temp_ring[i]); } goto err_setup; } } for (i = 0; i < adapter->num_rx_queues; i++) { ixgbe_free_rx_resources(adapter->rx_ring[i]); memcpy(adapter->rx_ring[i], &temp_ring[i], sizeof(struct ixgbe_ring)); } adapter->rx_ring_count = new_rx_count; } err_setup: ixgbe_up(adapter); vfree(temp_ring); clear_reset: clear_bit(__IXGBE_RESETTING, &adapter->state); return err; } static int ixgbe_get_sset_count(struct net_device *netdev, int sset) { switch (sset) { case ETH_SS_TEST: return IXGBE_TEST_LEN; case ETH_SS_STATS: return IXGBE_STATS_LEN; case ETH_SS_PRIV_FLAGS: return IXGBE_PRIV_FLAGS_STR_LEN; default: return -EOPNOTSUPP; } } static void ixgbe_get_ethtool_stats(struct net_device *netdev, struct ethtool_stats *stats, u64 *data) { struct ixgbe_adapter *adapter = netdev_priv(netdev); struct rtnl_link_stats64 temp; const struct rtnl_link_stats64 *net_stats; unsigned int start; struct ixgbe_ring *ring; int i, j; char *p = NULL; ixgbe_update_stats(adapter); net_stats = dev_get_stats(netdev, &temp); for (i = 0; i < IXGBE_GLOBAL_STATS_LEN; i++) { switch (ixgbe_gstrings_stats[i].type) { case NETDEV_STATS: p = (char *) net_stats + ixgbe_gstrings_stats[i].stat_offset; break; case IXGBE_STATS: p = (char *) adapter + ixgbe_gstrings_stats[i].stat_offset; break; default: data[i] = 0; continue; } data[i] = (ixgbe_gstrings_stats[i].sizeof_stat == sizeof(u64)) ? *(u64 *)p : *(u32 *)p; } for (j = 0; j < netdev->num_tx_queues; j++) { ring = adapter->tx_ring[j]; if (!ring) { data[i] = 0; data[i+1] = 0; i += 2; continue; } do { start = u64_stats_fetch_begin(&ring->syncp); data[i] = ring->stats.packets; data[i+1] = ring->stats.bytes; } while (u64_stats_fetch_retry(&ring->syncp, start)); i += 2; } for (j = 0; j < IXGBE_NUM_RX_QUEUES; j++) { ring = adapter->rx_ring[j]; if (!ring) { data[i] = 0; data[i+1] = 0; i += 2; continue; } do { start = u64_stats_fetch_begin(&ring->syncp); data[i] = ring->stats.packets; data[i+1] = ring->stats.bytes; } while (u64_stats_fetch_retry(&ring->syncp, start)); i += 2; } for (j = 0; j < IXGBE_MAX_PACKET_BUFFERS; j++) { data[i++] = adapter->stats.pxontxc[j]; data[i++] = adapter->stats.pxofftxc[j]; } for (j = 0; j < IXGBE_MAX_PACKET_BUFFERS; j++) { data[i++] = adapter->stats.pxonrxc[j]; data[i++] = adapter->stats.pxoffrxc[j]; } } static void ixgbe_get_strings(struct net_device *netdev, u32 stringset, u8 *data) { unsigned int i; u8 *p = data; switch (stringset) { case ETH_SS_TEST: for (i = 0; i < IXGBE_TEST_LEN; i++) ethtool_sprintf(&p, ixgbe_gstrings_test[i]); break; case ETH_SS_STATS: for (i = 0; i < IXGBE_GLOBAL_STATS_LEN; i++) ethtool_sprintf(&p, ixgbe_gstrings_stats[i].stat_string); for (i = 0; i < netdev->num_tx_queues; i++) { ethtool_sprintf(&p, "tx_queue_%u_packets", i); ethtool_sprintf(&p, "tx_queue_%u_bytes", i); } for (i = 0; i < IXGBE_NUM_RX_QUEUES; i++) { ethtool_sprintf(&p, "rx_queue_%u_packets", i); ethtool_sprintf(&p, "rx_queue_%u_bytes", i); } for (i = 0; i < IXGBE_MAX_PACKET_BUFFERS; i++) { ethtool_sprintf(&p, "tx_pb_%u_pxon", i); ethtool_sprintf(&p, "tx_pb_%u_pxoff", i); } for (i = 0; i < IXGBE_MAX_PACKET_BUFFERS; i++) { ethtool_sprintf(&p, "rx_pb_%u_pxon", i); ethtool_sprintf(&p, "rx_pb_%u_pxoff", i); } /* BUG_ON(p - data != IXGBE_STATS_LEN * ETH_GSTRING_LEN); */ break; case ETH_SS_PRIV_FLAGS: memcpy(data, ixgbe_priv_flags_strings, IXGBE_PRIV_FLAGS_STR_LEN * ETH_GSTRING_LEN); } } static int ixgbe_link_test(struct ixgbe_adapter *adapter, u64 *data) { struct ixgbe_hw *hw = &adapter->hw; bool link_up; u32 link_speed = 0; if (ixgbe_removed(hw->hw_addr)) { *data = 1; return 1; } *data = 0; hw->mac.ops.check_link(hw, &link_speed, &link_up, true); if (link_up) return *data; else *data = 1; return *data; } /* ethtool register test data */ struct ixgbe_reg_test { u16 reg; u8 array_len; u8 test_type; u32 mask; u32 write; }; /* In the hardware, registers are laid out either singly, in arrays * spaced 0x40 bytes apart, or in contiguous tables. We assume * most tests take place on arrays or single registers (handled * as a single-element array) and special-case the tables. * Table tests are always pattern tests. * * We also make provision for some required setup steps by specifying * registers to be written without any read-back testing. */ #define PATTERN_TEST 1 #define SET_READ_TEST 2 #define WRITE_NO_TEST 3 #define TABLE32_TEST 4 #define TABLE64_TEST_LO 5 #define TABLE64_TEST_HI 6 /* default 82599 register test */ static const struct ixgbe_reg_test reg_test_82599[] = { { IXGBE_FCRTL_82599(0), 1, PATTERN_TEST, 0x8007FFF0, 0x8007FFF0 }, { IXGBE_FCRTH_82599(0), 1, PATTERN_TEST, 0x8007FFF0, 0x8007FFF0 }, { IXGBE_PFCTOP, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, { IXGBE_VLNCTRL, 1, PATTERN_TEST, 0x00000000, 0x00000000 }, { IXGBE_RDBAL(0), 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFF80 }, { IXGBE_RDBAH(0), 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, { IXGBE_RDLEN(0), 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF }, { IXGBE_RXDCTL(0), 4, WRITE_NO_TEST, 0, IXGBE_RXDCTL_ENABLE }, { IXGBE_RDT(0), 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, { IXGBE_RXDCTL(0), 4, WRITE_NO_TEST, 0, 0 }, { IXGBE_FCRTH(0), 1, PATTERN_TEST, 0x8007FFF0, 0x8007FFF0 }, { IXGBE_FCTTV(0), 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, { IXGBE_TDBAL(0), 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF }, { IXGBE_TDBAH(0), 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, { IXGBE_TDLEN(0), 4, PATTERN_TEST, 0x000FFF80, 0x000FFF80 }, { IXGBE_RXCTRL, 1, SET_READ_TEST, 0x00000001, 0x00000001 }, { IXGBE_RAL(0), 16, TABLE64_TEST_LO, 0xFFFFFFFF, 0xFFFFFFFF }, { IXGBE_RAL(0), 16, TABLE64_TEST_HI, 0x8001FFFF, 0x800CFFFF }, { IXGBE_MTA(0), 128, TABLE32_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, { .reg = 0 } }; /* default 82598 register test */ static const struct ixgbe_reg_test reg_test_82598[] = { { IXGBE_FCRTL(0), 1, PATTERN_TEST, 0x8007FFF0, 0x8007FFF0 }, { IXGBE_FCRTH(0), 1, PATTERN_TEST, 0x8007FFF0, 0x8007FFF0 }, { IXGBE_PFCTOP, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, { IXGBE_VLNCTRL, 1, PATTERN_TEST, 0x00000000, 0x00000000 }, { IXGBE_RDBAL(0), 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF }, { IXGBE_RDBAH(0), 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, { IXGBE_RDLEN(0), 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF }, /* Enable all four RX queues before testing. */ { IXGBE_RXDCTL(0), 4, WRITE_NO_TEST, 0, IXGBE_RXDCTL_ENABLE }, /* RDH is read-only for 82598, only test RDT. */ { IXGBE_RDT(0), 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, { IXGBE_RXDCTL(0), 4, WRITE_NO_TEST, 0, 0 }, { IXGBE_FCRTH(0), 1, PATTERN_TEST, 0x8007FFF0, 0x8007FFF0 }, { IXGBE_FCTTV(0), 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, { IXGBE_TIPG, 1, PATTERN_TEST, 0x000000FF, 0x000000FF }, { IXGBE_TDBAL(0), 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF }, { IXGBE_TDBAH(0), 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, { IXGBE_TDLEN(0), 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF }, { IXGBE_RXCTRL, 1, SET_READ_TEST, 0x00000003, 0x00000003 }, { IXGBE_DTXCTL, 1, SET_READ_TEST, 0x00000005, 0x00000005 }, { IXGBE_RAL(0), 16, TABLE64_TEST_LO, 0xFFFFFFFF, 0xFFFFFFFF }, { IXGBE_RAL(0), 16, TABLE64_TEST_HI, 0x800CFFFF, 0x800CFFFF }, { IXGBE_MTA(0), 128, TABLE32_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, { .reg = 0 } }; static bool reg_pattern_test(struct ixgbe_adapter *adapter, u64 *data, int reg, u32 mask, u32 write) { u32 pat, val, before; static const u32 test_pattern[] = { 0x5A5A5A5A, 0xA5A5A5A5, 0x00000000, 0xFFFFFFFF}; if (ixgbe_removed(adapter->hw.hw_addr)) { *data = 1; return true; } for (pat = 0; pat < ARRAY_SIZE(test_pattern); pat++) { before = ixgbe_read_reg(&adapter->hw, reg); ixgbe_write_reg(&adapter->hw, reg, test_pattern[pat] & write); val = ixgbe_read_reg(&adapter->hw, reg); if (val != (test_pattern[pat] & write & mask)) { e_err(drv, "pattern test reg %04X failed: got 0x%08X expected 0x%08X\n", reg, val, (test_pattern[pat] & write & mask)); *data = reg; ixgbe_write_reg(&adapter->hw, reg, before); return true; } ixgbe_write_reg(&adapter->hw, reg, before); } return false; } static bool reg_set_and_check(struct ixgbe_adapter *adapter, u64 *data, int reg, u32 mask, u32 write) { u32 val, before; if (ixgbe_removed(adapter->hw.hw_addr)) { *data = 1; return true; } before = ixgbe_read_reg(&adapter->hw, reg); ixgbe_write_reg(&adapter->hw, reg, write & mask); val = ixgbe_read_reg(&adapter->hw, reg); if ((write & mask) != (val & mask)) { e_err(drv, "set/check reg %04X test failed: got 0x%08X expected 0x%08X\n", reg, (val & mask), (write & mask)); *data = reg; ixgbe_write_reg(&adapter->hw, reg, before); return true; } ixgbe_write_reg(&adapter->hw, reg, before); return false; } static int ixgbe_reg_test(struct ixgbe_adapter *adapter, u64 *data) { const struct ixgbe_reg_test *test; u32 value, before, after; u32 i, toggle; if (ixgbe_removed(adapter->hw.hw_addr)) { e_err(drv, "Adapter removed - register test blocked\n"); *data = 1; return 1; } switch (adapter->hw.mac.type) { case ixgbe_mac_82598EB: toggle = 0x7FFFF3FF; test = reg_test_82598; break; case ixgbe_mac_82599EB: case ixgbe_mac_X540: case ixgbe_mac_X550: case ixgbe_mac_X550EM_x: case ixgbe_mac_x550em_a: toggle = 0x7FFFF30F; test = reg_test_82599; break; default: *data = 1; return 1; } /* * Because the status register is such a special case, * we handle it separately from the rest of the register * tests. Some bits are read-only, some toggle, and some * are writeable on newer MACs. */ before = ixgbe_read_reg(&adapter->hw, IXGBE_STATUS); value = (ixgbe_read_reg(&adapter->hw, IXGBE_STATUS) & toggle); ixgbe_write_reg(&adapter->hw, IXGBE_STATUS, toggle); after = ixgbe_read_reg(&adapter->hw, IXGBE_STATUS) & toggle; if (value != after) { e_err(drv, "failed STATUS register test got: 0x%08X expected: 0x%08X\n", after, value); *data = 1; return 1; } /* restore previous status */ ixgbe_write_reg(&adapter->hw, IXGBE_STATUS, before); /* * Perform the remainder of the register test, looping through * the test table until we either fail or reach the null entry. */ while (test->reg) { for (i = 0; i < test->array_len; i++) { bool b = false; switch (test->test_type) { case PATTERN_TEST: b = reg_pattern_test(adapter, data, test->reg + (i * 0x40), test->mask, test->write); break; case SET_READ_TEST: b = reg_set_and_check(adapter, data, test->reg + (i * 0x40), test->mask, test->write); break; case WRITE_NO_TEST: ixgbe_write_reg(&adapter->hw, test->reg + (i * 0x40), test->write); break; case TABLE32_TEST: b = reg_pattern_test(adapter, data, test->reg + (i * 4), test->mask, test->write); break; case TABLE64_TEST_LO: b = reg_pattern_test(adapter, data, test->reg + (i * 8), test->mask, test->write); break; case TABLE64_TEST_HI: b = reg_pattern_test(adapter, data, (test->reg + 4) + (i * 8), test->mask, test->write); break; } if (b) return 1; } test++; } *data = 0; return 0; } static int ixgbe_eeprom_test(struct ixgbe_adapter *adapter, u64 *data) { struct ixgbe_hw *hw = &adapter->hw; if (hw->eeprom.ops.validate_checksum(hw, NULL)) *data = 1; else *data = 0; return *data; } static irqreturn_t ixgbe_test_intr(int irq, void *data) { struct net_device *netdev = (struct net_device *) data; struct ixgbe_adapter *adapter = netdev_priv(netdev); adapter->test_icr |= IXGBE_READ_REG(&adapter->hw, IXGBE_EICR); return IRQ_HANDLED; } static int ixgbe_intr_test(struct ixgbe_adapter *adapter, u64 *data) { struct net_device *netdev = adapter->netdev; u32 mask, i = 0, shared_int = true; u32 irq = adapter->pdev->irq; *data = 0; /* Hook up test interrupt handler just for this test */ if (adapter->msix_entries) { /* NOTE: we don't test MSI-X interrupts here, yet */ return 0; } else if (adapter->flags & IXGBE_FLAG_MSI_ENABLED) { shared_int = false; if (request_irq(irq, ixgbe_test_intr, 0, netdev->name, netdev)) { *data = 1; return -1; } } else if (!request_irq(irq, ixgbe_test_intr, IRQF_PROBE_SHARED, netdev->name, netdev)) { shared_int = false; } else if (request_irq(irq, ixgbe_test_intr, IRQF_SHARED, netdev->name, netdev)) { *data = 1; return -1; } e_info(hw, "testing %s interrupt\n", shared_int ? "shared" : "unshared"); /* Disable all the interrupts */ IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 0xFFFFFFFF); IXGBE_WRITE_FLUSH(&adapter->hw); usleep_range(10000, 20000); /* Test each interrupt */ for (; i < 10; i++) { /* Interrupt to test */ mask = BIT(i); if (!shared_int) { /* * Disable the interrupts to be reported in * the cause register and then force the same * interrupt and see if one gets posted. If * an interrupt was posted to the bus, the * test failed. */ adapter->test_icr = 0; IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, ~mask & 0x00007FFF); IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, ~mask & 0x00007FFF); IXGBE_WRITE_FLUSH(&adapter->hw); usleep_range(10000, 20000); if (adapter->test_icr & mask) { *data = 3; break; } } /* * Enable the interrupt to be reported in the cause * register and then force the same interrupt and see * if one gets posted. If an interrupt was not posted * to the bus, the test failed. */ adapter->test_icr = 0; IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, mask); IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, mask); IXGBE_WRITE_FLUSH(&adapter->hw); usleep_range(10000, 20000); if (!(adapter->test_icr & mask)) { *data = 4; break; } if (!shared_int) { /* * Disable the other interrupts to be reported in * the cause register and then force the other * interrupts and see if any get posted. If * an interrupt was posted to the bus, the * test failed. */ adapter->test_icr = 0; IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, ~mask & 0x00007FFF); IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, ~mask & 0x00007FFF); IXGBE_WRITE_FLUSH(&adapter->hw); usleep_range(10000, 20000); if (adapter->test_icr) { *data = 5; break; } } } /* Disable all the interrupts */ IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 0xFFFFFFFF); IXGBE_WRITE_FLUSH(&adapter->hw); usleep_range(10000, 20000); /* Unhook test interrupt handler */ free_irq(irq, netdev); return *data; } static void ixgbe_free_desc_rings(struct ixgbe_adapter *adapter) { /* Shut down the DMA engines now so they can be reinitialized later, * since the test rings and normally used rings should overlap on * queue 0 we can just use the standard disable Rx/Tx calls and they * will take care of disabling the test rings for us. */ /* first Rx */ ixgbe_disable_rx(adapter); /* now Tx */ ixgbe_disable_tx(adapter); ixgbe_reset(adapter); ixgbe_free_tx_resources(&adapter->test_tx_ring); ixgbe_free_rx_resources(&adapter->test_rx_ring); } static int ixgbe_setup_desc_rings(struct ixgbe_adapter *adapter) { struct ixgbe_ring *tx_ring = &adapter->test_tx_ring; struct ixgbe_ring *rx_ring = &adapter->test_rx_ring; struct ixgbe_hw *hw = &adapter->hw; u32 rctl, reg_data; int ret_val; int err; /* Setup Tx descriptor ring and Tx buffers */ tx_ring->count = IXGBE_DEFAULT_TXD; tx_ring->queue_index = 0; tx_ring->dev = &adapter->pdev->dev; tx_ring->netdev = adapter->netdev; tx_ring->reg_idx = adapter->tx_ring[0]->reg_idx; err = ixgbe_setup_tx_resources(tx_ring); if (err) return 1; switch (adapter->hw.mac.type) { case ixgbe_mac_82599EB: case ixgbe_mac_X540: case ixgbe_mac_X550: case ixgbe_mac_X550EM_x: case ixgbe_mac_x550em_a: reg_data = IXGBE_READ_REG(&adapter->hw, IXGBE_DMATXCTL); reg_data |= IXGBE_DMATXCTL_TE; IXGBE_WRITE_REG(&adapter->hw, IXGBE_DMATXCTL, reg_data); break; default: break; } ixgbe_configure_tx_ring(adapter, tx_ring); /* Setup Rx Descriptor ring and Rx buffers */ rx_ring->count = IXGBE_DEFAULT_RXD; rx_ring->queue_index = 0; rx_ring->dev = &adapter->pdev->dev; rx_ring->netdev = adapter->netdev; rx_ring->reg_idx = adapter->rx_ring[0]->reg_idx; err = ixgbe_setup_rx_resources(adapter, rx_ring); if (err) { ret_val = 4; goto err_nomem; } hw->mac.ops.disable_rx(hw); ixgbe_configure_rx_ring(adapter, rx_ring); rctl = IXGBE_READ_REG(&adapter->hw, IXGBE_RXCTRL); rctl |= IXGBE_RXCTRL_DMBYPS; IXGBE_WRITE_REG(&adapter->hw, IXGBE_RXCTRL, rctl); hw->mac.ops.enable_rx(hw); return 0; err_nomem: ixgbe_free_desc_rings(adapter); return ret_val; } static int ixgbe_setup_loopback_test(struct ixgbe_adapter *adapter) { struct ixgbe_hw *hw = &adapter->hw; u32 reg_data; /* Setup MAC loopback */ reg_data = IXGBE_READ_REG(hw, IXGBE_HLREG0); reg_data |= IXGBE_HLREG0_LPBK; IXGBE_WRITE_REG(hw, IXGBE_HLREG0, reg_data); reg_data = IXGBE_READ_REG(hw, IXGBE_FCTRL); reg_data |= IXGBE_FCTRL_BAM | IXGBE_FCTRL_SBP | IXGBE_FCTRL_MPE; IXGBE_WRITE_REG(hw, IXGBE_FCTRL, reg_data); /* X540 and X550 needs to set the MACC.FLU bit to force link up */ switch (adapter->hw.mac.type) { case ixgbe_mac_X540: case ixgbe_mac_X550: case ixgbe_mac_X550EM_x: case ixgbe_mac_x550em_a: reg_data = IXGBE_READ_REG(hw, IXGBE_MACC); reg_data |= IXGBE_MACC_FLU; IXGBE_WRITE_REG(hw, IXGBE_MACC, reg_data); break; default: if (hw->mac.orig_autoc) { reg_data = hw->mac.orig_autoc | IXGBE_AUTOC_FLU; IXGBE_WRITE_REG(hw, IXGBE_AUTOC, reg_data); } else { return 10; } } IXGBE_WRITE_FLUSH(hw); usleep_range(10000, 20000); /* Disable Atlas Tx lanes; re-enabled in reset path */ if (hw->mac.type == ixgbe_mac_82598EB) { u8 atlas; hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_LPBK, &atlas); atlas |= IXGBE_ATLAS_PDN_TX_REG_EN; hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_LPBK, atlas); hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_10G, &atlas); atlas |= IXGBE_ATLAS_PDN_TX_10G_QL_ALL; hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_10G, atlas); hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_1G, &atlas); atlas |= IXGBE_ATLAS_PDN_TX_1G_QL_ALL; hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_1G, atlas); hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_AN, &atlas); atlas |= IXGBE_ATLAS_PDN_TX_AN_QL_ALL; hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_AN, atlas); } return 0; } static void ixgbe_loopback_cleanup(struct ixgbe_adapter *adapter) { u32 reg_data; reg_data = IXGBE_READ_REG(&adapter->hw, IXGBE_HLREG0); reg_data &= ~IXGBE_HLREG0_LPBK; IXGBE_WRITE_REG(&adapter->hw, IXGBE_HLREG0, reg_data); } static void ixgbe_create_lbtest_frame(struct sk_buff *skb, unsigned int frame_size) { memset(skb->data, 0xFF, frame_size); frame_size >>= 1; memset(&skb->data[frame_size], 0xAA, frame_size / 2 - 1); skb->data[frame_size + 10] = 0xBE; skb->data[frame_size + 12] = 0xAF; } static bool ixgbe_check_lbtest_frame(struct ixgbe_rx_buffer *rx_buffer, unsigned int frame_size) { unsigned char *data; frame_size >>= 1; data = page_address(rx_buffer->page) + rx_buffer->page_offset; return data[3] == 0xFF && data[frame_size + 10] == 0xBE && data[frame_size + 12] == 0xAF; } static u16 ixgbe_clean_test_rings(struct ixgbe_ring *rx_ring, struct ixgbe_ring *tx_ring, unsigned int size) { union ixgbe_adv_rx_desc *rx_desc; u16 rx_ntc, tx_ntc, count = 0; /* initialize next to clean and descriptor values */ rx_ntc = rx_ring->next_to_clean; tx_ntc = tx_ring->next_to_clean; rx_desc = IXGBE_RX_DESC(rx_ring, rx_ntc); while (tx_ntc != tx_ring->next_to_use) { union ixgbe_adv_tx_desc *tx_desc; struct ixgbe_tx_buffer *tx_buffer; tx_desc = IXGBE_TX_DESC(tx_ring, tx_ntc); /* if DD is not set transmit has not completed */ if (!(tx_desc->wb.status & cpu_to_le32(IXGBE_TXD_STAT_DD))) return count; /* unmap buffer on Tx side */ tx_buffer = &tx_ring->tx_buffer_info[tx_ntc]; /* Free all the Tx ring sk_buffs */ dev_kfree_skb_any(tx_buffer->skb); /* unmap skb header data */ dma_unmap_single(tx_ring->dev, dma_unmap_addr(tx_buffer, dma), dma_unmap_len(tx_buffer, len), DMA_TO_DEVICE); dma_unmap_len_set(tx_buffer, len, 0); /* increment Tx next to clean counter */ tx_ntc++; if (tx_ntc == tx_ring->count) tx_ntc = 0; } while (rx_desc->wb.upper.length) { struct ixgbe_rx_buffer *rx_buffer; /* check Rx buffer */ rx_buffer = &rx_ring->rx_buffer_info[rx_ntc]; /* sync Rx buffer for CPU read */ dma_sync_single_for_cpu(rx_ring->dev, rx_buffer->dma, ixgbe_rx_bufsz(rx_ring), DMA_FROM_DEVICE); /* verify contents of skb */ if (ixgbe_check_lbtest_frame(rx_buffer, size)) count++; else break; /* sync Rx buffer for device write */ dma_sync_single_for_device(rx_ring->dev, rx_buffer->dma, ixgbe_rx_bufsz(rx_ring), DMA_FROM_DEVICE); /* increment Rx next to clean counter */ rx_ntc++; if (rx_ntc == rx_ring->count) rx_ntc = 0; /* fetch next descriptor */ rx_desc = IXGBE_RX_DESC(rx_ring, rx_ntc); } netdev_tx_reset_queue(txring_txq(tx_ring)); /* re-map buffers to ring, store next to clean values */ ixgbe_alloc_rx_buffers(rx_ring, count); rx_ring->next_to_clean = rx_ntc; tx_ring->next_to_clean = tx_ntc; return count; } static int ixgbe_run_loopback_test(struct ixgbe_adapter *adapter) { struct ixgbe_ring *tx_ring = &adapter->test_tx_ring; struct ixgbe_ring *rx_ring = &adapter->test_rx_ring; int i, j, lc, good_cnt, ret_val = 0; unsigned int size = 1024; netdev_tx_t tx_ret_val; struct sk_buff *skb; u32 flags_orig = adapter->flags; /* DCB can modify the frames on Tx */ adapter->flags &= ~IXGBE_FLAG_DCB_ENABLED; /* allocate test skb */ skb = alloc_skb(size, GFP_KERNEL); if (!skb) return 11; /* place data into test skb */ ixgbe_create_lbtest_frame(skb, size); skb_put(skb, size); /* * Calculate the loop count based on the largest descriptor ring * The idea is to wrap the largest ring a number of times using 64 * send/receive pairs during each loop */ if (rx_ring->count <= tx_ring->count) lc = ((tx_ring->count / 64) * 2) + 1; else lc = ((rx_ring->count / 64) * 2) + 1; for (j = 0; j <= lc; j++) { /* reset count of good packets */ good_cnt = 0; /* place 64 packets on the transmit queue*/ for (i = 0; i < 64; i++) { skb_get(skb); tx_ret_val = ixgbe_xmit_frame_ring(skb, adapter, tx_ring); if (tx_ret_val == NETDEV_TX_OK) good_cnt++; } if (good_cnt != 64) { ret_val = 12; break; } /* allow 200 milliseconds for packets to go from Tx to Rx */ msleep(200); good_cnt = ixgbe_clean_test_rings(rx_ring, tx_ring, size); if (good_cnt != 64) { ret_val = 13; break; } } /* free the original skb */ kfree_skb(skb); adapter->flags = flags_orig; return ret_val; } static int ixgbe_loopback_test(struct ixgbe_adapter *adapter, u64 *data) { *data = ixgbe_setup_desc_rings(adapter); if (*data) goto out; *data = ixgbe_setup_loopback_test(adapter); if (*data) goto err_loopback; *data = ixgbe_run_loopback_test(adapter); ixgbe_loopback_cleanup(adapter); err_loopback: ixgbe_free_desc_rings(adapter); out: return *data; } static void ixgbe_diag_test(struct net_device *netdev, struct ethtool_test *eth_test, u64 *data) { struct ixgbe_adapter *adapter = netdev_priv(netdev); bool if_running = netif_running(netdev); if (ixgbe_removed(adapter->hw.hw_addr)) { e_err(hw, "Adapter removed - test blocked\n"); data[0] = 1; data[1] = 1; data[2] = 1; data[3] = 1; data[4] = 1; eth_test->flags |= ETH_TEST_FL_FAILED; return; } set_bit(__IXGBE_TESTING, &adapter->state); if (eth_test->flags == ETH_TEST_FL_OFFLINE) { struct ixgbe_hw *hw = &adapter->hw; if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) { int i; for (i = 0; i < adapter->num_vfs; i++) { if (adapter->vfinfo[i].clear_to_send) { netdev_warn(netdev, "offline diagnostic is not supported when VFs are present\n"); data[0] = 1; data[1] = 1; data[2] = 1; data[3] = 1; data[4] = 1; eth_test->flags |= ETH_TEST_FL_FAILED; clear_bit(__IXGBE_TESTING, &adapter->state); return; } } } /* Offline tests */ e_info(hw, "offline testing starting\n"); /* Link test performed before hardware reset so autoneg doesn't * interfere with test result */ if (ixgbe_link_test(adapter, &data[4])) eth_test->flags |= ETH_TEST_FL_FAILED; if (if_running) /* indicate we're in test mode */ ixgbe_close(netdev); else ixgbe_reset(adapter); e_info(hw, "register testing starting\n"); if (ixgbe_reg_test(adapter, &data[0])) eth_test->flags |= ETH_TEST_FL_FAILED; ixgbe_reset(adapter); e_info(hw, "eeprom testing starting\n"); if (ixgbe_eeprom_test(adapter, &data[1])) eth_test->flags |= ETH_TEST_FL_FAILED; ixgbe_reset(adapter); e_info(hw, "interrupt testing starting\n"); if (ixgbe_intr_test(adapter, &data[2])) eth_test->flags |= ETH_TEST_FL_FAILED; /* If SRIOV or VMDq is enabled then skip MAC * loopback diagnostic. */ if (adapter->flags & (IXGBE_FLAG_SRIOV_ENABLED | IXGBE_FLAG_VMDQ_ENABLED)) { e_info(hw, "Skip MAC loopback diagnostic in VT mode\n"); data[3] = 0; goto skip_loopback; } ixgbe_reset(adapter); e_info(hw, "loopback testing starting\n"); if (ixgbe_loopback_test(adapter, &data[3])) eth_test->flags |= ETH_TEST_FL_FAILED; skip_loopback: ixgbe_reset(adapter); /* clear testing bit and return adapter to previous state */ clear_bit(__IXGBE_TESTING, &adapter->state); if (if_running) ixgbe_open(netdev); else if (hw->mac.ops.disable_tx_laser) hw->mac.ops.disable_tx_laser(hw); } else { e_info(hw, "online testing starting\n"); /* Online tests */ if (ixgbe_link_test(adapter, &data[4])) eth_test->flags |= ETH_TEST_FL_FAILED; /* Offline tests aren't run; pass by default */ data[0] = 0; data[1] = 0; data[2] = 0; data[3] = 0; clear_bit(__IXGBE_TESTING, &adapter->state); } } static int ixgbe_wol_exclusion(struct ixgbe_adapter *adapter, struct ethtool_wolinfo *wol) { struct ixgbe_hw *hw = &adapter->hw; int retval = 0; /* WOL not supported for all devices */ if (!ixgbe_wol_supported(adapter, hw->device_id, hw->subsystem_device_id)) { retval = 1; wol->supported = 0; } return retval; } static void ixgbe_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) { struct ixgbe_adapter *adapter = netdev_priv(netdev); wol->supported = WAKE_UCAST | WAKE_MCAST | WAKE_BCAST | WAKE_MAGIC; wol->wolopts = 0; if (ixgbe_wol_exclusion(adapter, wol) || !device_can_wakeup(&adapter->pdev->dev)) return; if (adapter->wol & IXGBE_WUFC_EX) wol->wolopts |= WAKE_UCAST; if (adapter->wol & IXGBE_WUFC_MC) wol->wolopts |= WAKE_MCAST; if (adapter->wol & IXGBE_WUFC_BC) wol->wolopts |= WAKE_BCAST; if (adapter->wol & IXGBE_WUFC_MAG) wol->wolopts |= WAKE_MAGIC; } static int ixgbe_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) { struct ixgbe_adapter *adapter = netdev_priv(netdev); if (wol->wolopts & (WAKE_PHY | WAKE_ARP | WAKE_MAGICSECURE | WAKE_FILTER)) return -EOPNOTSUPP; if (ixgbe_wol_exclusion(adapter, wol)) return wol->wolopts ? -EOPNOTSUPP : 0; adapter->wol = 0; if (wol->wolopts & WAKE_UCAST) adapter->wol |= IXGBE_WUFC_EX; if (wol->wolopts & WAKE_MCAST) adapter->wol |= IXGBE_WUFC_MC; if (wol->wolopts & WAKE_BCAST) adapter->wol |= IXGBE_WUFC_BC; if (wol->wolopts & WAKE_MAGIC) adapter->wol |= IXGBE_WUFC_MAG; device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol); return 0; } static int ixgbe_nway_reset(struct net_device *netdev) { struct ixgbe_adapter *adapter = netdev_priv(netdev); if (netif_running(netdev)) ixgbe_reinit_locked(adapter); return 0; } static int ixgbe_set_phys_id(struct net_device *netdev, enum ethtool_phys_id_state state) { struct ixgbe_adapter *adapter = netdev_priv(netdev); struct ixgbe_hw *hw = &adapter->hw; if (!hw->mac.ops.led_on || !hw->mac.ops.led_off) return -EOPNOTSUPP; switch (state) { case ETHTOOL_ID_ACTIVE: adapter->led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL); return 2; case ETHTOOL_ID_ON: hw->mac.ops.led_on(hw, hw->mac.led_link_act); break; case ETHTOOL_ID_OFF: hw->mac.ops.led_off(hw, hw->mac.led_link_act); break; case ETHTOOL_ID_INACTIVE: /* Restore LED settings */ IXGBE_WRITE_REG(&adapter->hw, IXGBE_LEDCTL, adapter->led_reg); break; } return 0; } static int ixgbe_get_coalesce(struct net_device *netdev, struct ethtool_coalesce *ec, struct kernel_ethtool_coalesce *kernel_coal, struct netlink_ext_ack *extack) { struct ixgbe_adapter *adapter = netdev_priv(netdev); /* only valid if in constant ITR mode */ if (adapter->rx_itr_setting <= 1) ec->rx_coalesce_usecs = adapter->rx_itr_setting; else ec->rx_coalesce_usecs = adapter->rx_itr_setting >> 2; /* if in mixed tx/rx queues per vector mode, report only rx settings */ if (adapter->q_vector[0]->tx.count && adapter->q_vector[0]->rx.count) return 0; /* only valid if in constant ITR mode */ if (adapter->tx_itr_setting <= 1) ec->tx_coalesce_usecs = adapter->tx_itr_setting; else ec->tx_coalesce_usecs = adapter->tx_itr_setting >> 2; return 0; } /* * this function must be called before setting the new value of * rx_itr_setting */ static bool ixgbe_update_rsc(struct ixgbe_adapter *adapter) { struct net_device *netdev = adapter->netdev; /* nothing to do if LRO or RSC are not enabled */ if (!(adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE) || !(netdev->features & NETIF_F_LRO)) return false; /* check the feature flag value and enable RSC if necessary */ if (adapter->rx_itr_setting == 1 || adapter->rx_itr_setting > IXGBE_MIN_RSC_ITR) { if (!(adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED)) { adapter->flags2 |= IXGBE_FLAG2_RSC_ENABLED; e_info(probe, "rx-usecs value high enough to re-enable RSC\n"); return true; } /* if interrupt rate is too high then disable RSC */ } else if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) { adapter->flags2 &= ~IXGBE_FLAG2_RSC_ENABLED; e_info(probe, "rx-usecs set too low, disabling RSC\n"); return true; } return false; } static int ixgbe_set_coalesce(struct net_device *netdev, struct ethtool_coalesce *ec, struct kernel_ethtool_coalesce *kernel_coal, struct netlink_ext_ack *extack) { struct ixgbe_adapter *adapter = netdev_priv(netdev); struct ixgbe_q_vector *q_vector; int i; u16 tx_itr_param, rx_itr_param, tx_itr_prev; bool need_reset = false; if (adapter->q_vector[0]->tx.count && adapter->q_vector[0]->rx.count) { /* reject Tx specific changes in case of mixed RxTx vectors */ if (ec->tx_coalesce_usecs) return -EINVAL; tx_itr_prev = adapter->rx_itr_setting; } else { tx_itr_prev = adapter->tx_itr_setting; } if ((ec->rx_coalesce_usecs > (IXGBE_MAX_EITR >> 2)) || (ec->tx_coalesce_usecs > (IXGBE_MAX_EITR >> 2))) return -EINVAL; if (ec->rx_coalesce_usecs > 1) adapter->rx_itr_setting = ec->rx_coalesce_usecs << 2; else adapter->rx_itr_setting = ec->rx_coalesce_usecs; if (adapter->rx_itr_setting == 1) rx_itr_param = IXGBE_20K_ITR; else rx_itr_param = adapter->rx_itr_setting; if (ec->tx_coalesce_usecs > 1) adapter->tx_itr_setting = ec->tx_coalesce_usecs << 2; else adapter->tx_itr_setting = ec->tx_coalesce_usecs; if (adapter->tx_itr_setting == 1) tx_itr_param = IXGBE_12K_ITR; else tx_itr_param = adapter->tx_itr_setting; /* mixed Rx/Tx */ if (adapter->q_vector[0]->tx.count && adapter->q_vector[0]->rx.count) adapter->tx_itr_setting = adapter->rx_itr_setting; /* detect ITR changes that require update of TXDCTL.WTHRESH */ if ((adapter->tx_itr_setting != 1) && (adapter->tx_itr_setting < IXGBE_100K_ITR)) { if ((tx_itr_prev == 1) || (tx_itr_prev >= IXGBE_100K_ITR)) need_reset = true; } else { if ((tx_itr_prev != 1) && (tx_itr_prev < IXGBE_100K_ITR)) need_reset = true; } /* check the old value and enable RSC if necessary */ need_reset |= ixgbe_update_rsc(adapter); for (i = 0; i < adapter->num_q_vectors; i++) { q_vector = adapter->q_vector[i]; if (q_vector->tx.count && !q_vector->rx.count) /* tx only */ q_vector->itr = tx_itr_param; else /* rx only or mixed */ q_vector->itr = rx_itr_param; ixgbe_write_eitr(q_vector); } /* * do reset here at the end to make sure EITR==0 case is handled * correctly w.r.t stopping tx, and changing TXDCTL.WTHRESH settings * also locks in RSC enable/disable which requires reset */ if (need_reset) ixgbe_do_reset(netdev); return 0; } static int ixgbe_get_ethtool_fdir_entry(struct ixgbe_adapter *adapter, struct ethtool_rxnfc *cmd) { union ixgbe_atr_input *mask = &adapter->fdir_mask; struct ethtool_rx_flow_spec *fsp = (struct ethtool_rx_flow_spec *)&cmd->fs; struct hlist_node *node2; struct ixgbe_fdir_filter *rule = NULL; /* report total rule count */ cmd->data = (1024 << adapter->fdir_pballoc) - 2; hlist_for_each_entry_safe(rule, node2, &adapter->fdir_filter_list, fdir_node) { if (fsp->location <= rule->sw_idx) break; } if (!rule || fsp->location != rule->sw_idx) return -EINVAL; /* fill out the flow spec entry */ /* set flow type field */ switch (rule->filter.formatted.flow_type) { case IXGBE_ATR_FLOW_TYPE_TCPV4: fsp->flow_type = TCP_V4_FLOW; break; case IXGBE_ATR_FLOW_TYPE_UDPV4: fsp->flow_type = UDP_V4_FLOW; break; case IXGBE_ATR_FLOW_TYPE_SCTPV4: fsp->flow_type = SCTP_V4_FLOW; break; case IXGBE_ATR_FLOW_TYPE_IPV4: fsp->flow_type = IP_USER_FLOW; fsp->h_u.usr_ip4_spec.ip_ver = ETH_RX_NFC_IP4; fsp->h_u.usr_ip4_spec.proto = 0; fsp->m_u.usr_ip4_spec.proto = 0; break; default: return -EINVAL; } fsp->h_u.tcp_ip4_spec.psrc = rule->filter.formatted.src_port; fsp->m_u.tcp_ip4_spec.psrc = mask->formatted.src_port; fsp->h_u.tcp_ip4_spec.pdst = rule->filter.formatted.dst_port; fsp->m_u.tcp_ip4_spec.pdst = mask->formatted.dst_port; fsp->h_u.tcp_ip4_spec.ip4src = rule->filter.formatted.src_ip[0]; fsp->m_u.tcp_ip4_spec.ip4src = mask->formatted.src_ip[0]; fsp->h_u.tcp_ip4_spec.ip4dst = rule->filter.formatted.dst_ip[0]; fsp->m_u.tcp_ip4_spec.ip4dst = mask->formatted.dst_ip[0]; fsp->h_ext.vlan_tci = rule->filter.formatted.vlan_id; fsp->m_ext.vlan_tci = mask->formatted.vlan_id; fsp->h_ext.vlan_etype = rule->filter.formatted.flex_bytes; fsp->m_ext.vlan_etype = mask->formatted.flex_bytes; fsp->h_ext.data[1] = htonl(rule->filter.formatted.vm_pool); fsp->m_ext.data[1] = htonl(mask->formatted.vm_pool); fsp->flow_type |= FLOW_EXT; /* record action */ if (rule->action == IXGBE_FDIR_DROP_QUEUE) fsp->ring_cookie = RX_CLS_FLOW_DISC; else fsp->ring_cookie = rule->action; return 0; } static int ixgbe_get_ethtool_fdir_all(struct ixgbe_adapter *adapter, struct ethtool_rxnfc *cmd, u32 *rule_locs) { struct hlist_node *node2; struct ixgbe_fdir_filter *rule; int cnt = 0; /* report total rule count */ cmd->data = (1024 << adapter->fdir_pballoc) - 2; hlist_for_each_entry_safe(rule, node2, &adapter->fdir_filter_list, fdir_node) { if (cnt == cmd->rule_cnt) return -EMSGSIZE; rule_locs[cnt] = rule->sw_idx; cnt++; } cmd->rule_cnt = cnt; return 0; } static int ixgbe_get_rss_hash_opts(struct ixgbe_adapter *adapter, struct ethtool_rxnfc *cmd) { cmd->data = 0; /* Report default options for RSS on ixgbe */ switch (cmd->flow_type) { case TCP_V4_FLOW: cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; fallthrough; case UDP_V4_FLOW: if (adapter->flags2 & IXGBE_FLAG2_RSS_FIELD_IPV4_UDP) cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; fallthrough; case SCTP_V4_FLOW: case AH_ESP_V4_FLOW: case AH_V4_FLOW: case ESP_V4_FLOW: case IPV4_FLOW: cmd->data |= RXH_IP_SRC | RXH_IP_DST; break; case TCP_V6_FLOW: cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; fallthrough; case UDP_V6_FLOW: if (adapter->flags2 & IXGBE_FLAG2_RSS_FIELD_IPV6_UDP) cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; fallthrough; case SCTP_V6_FLOW: case AH_ESP_V6_FLOW: case AH_V6_FLOW: case ESP_V6_FLOW: case IPV6_FLOW: cmd->data |= RXH_IP_SRC | RXH_IP_DST; break; default: return -EINVAL; } return 0; } static int ixgbe_rss_indir_tbl_max(struct ixgbe_adapter *adapter) { if (adapter->hw.mac.type < ixgbe_mac_X550) return 16; else return 64; } static int ixgbe_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd, u32 *rule_locs) { struct ixgbe_adapter *adapter = netdev_priv(dev); int ret = -EOPNOTSUPP; switch (cmd->cmd) { case ETHTOOL_GRXRINGS: cmd->data = min_t(int, adapter->num_rx_queues, ixgbe_rss_indir_tbl_max(adapter)); ret = 0; break; case ETHTOOL_GRXCLSRLCNT: cmd->rule_cnt = adapter->fdir_filter_count; ret = 0; break; case ETHTOOL_GRXCLSRULE: ret = ixgbe_get_ethtool_fdir_entry(adapter, cmd); break; case ETHTOOL_GRXCLSRLALL: ret = ixgbe_get_ethtool_fdir_all(adapter, cmd, rule_locs); break; case ETHTOOL_GRXFH: ret = ixgbe_get_rss_hash_opts(adapter, cmd); break; default: break; } return ret; } int ixgbe_update_ethtool_fdir_entry(struct ixgbe_adapter *adapter, struct ixgbe_fdir_filter *input, u16 sw_idx) { struct ixgbe_hw *hw = &adapter->hw; struct hlist_node *node2; struct ixgbe_fdir_filter *rule, *parent; int err = -EINVAL; parent = NULL; rule = NULL; hlist_for_each_entry_safe(rule, node2, &adapter->fdir_filter_list, fdir_node) { /* hash found, or no matching entry */ if (rule->sw_idx >= sw_idx) break; parent = rule; } /* if there is an old rule occupying our place remove it */ if (rule && (rule->sw_idx == sw_idx)) { if (!input || (rule->filter.formatted.bkt_hash != input->filter.formatted.bkt_hash)) { err = ixgbe_fdir_erase_perfect_filter_82599(hw, &rule->filter, sw_idx); } hlist_del(&rule->fdir_node); kfree(rule); adapter->fdir_filter_count--; } /* * If no input this was a delete, err should be 0 if a rule was * successfully found and removed from the list else -EINVAL */ if (!input) return err; /* initialize node and set software index */ INIT_HLIST_NODE(&input->fdir_node); /* add filter to the list */ if (parent) hlist_add_behind(&input->fdir_node, &parent->fdir_node); else hlist_add_head(&input->fdir_node, &adapter->fdir_filter_list); /* update counts */ adapter->fdir_filter_count++; return 0; } static int ixgbe_flowspec_to_flow_type(struct ethtool_rx_flow_spec *fsp, u8 *flow_type) { switch (fsp->flow_type & ~FLOW_EXT) { case TCP_V4_FLOW: *flow_type = IXGBE_ATR_FLOW_TYPE_TCPV4; break; case UDP_V4_FLOW: *flow_type = IXGBE_ATR_FLOW_TYPE_UDPV4; break; case SCTP_V4_FLOW: *flow_type = IXGBE_ATR_FLOW_TYPE_SCTPV4; break; case IP_USER_FLOW: switch (fsp->h_u.usr_ip4_spec.proto) { case IPPROTO_TCP: *flow_type = IXGBE_ATR_FLOW_TYPE_TCPV4; break; case IPPROTO_UDP: *flow_type = IXGBE_ATR_FLOW_TYPE_UDPV4; break; case IPPROTO_SCTP: *flow_type = IXGBE_ATR_FLOW_TYPE_SCTPV4; break; case 0: if (!fsp->m_u.usr_ip4_spec.proto) { *flow_type = IXGBE_ATR_FLOW_TYPE_IPV4; break; } fallthrough; default: return 0; } break; default: return 0; } return 1; } static int ixgbe_add_ethtool_fdir_entry(struct ixgbe_adapter *adapter, struct ethtool_rxnfc *cmd) { struct ethtool_rx_flow_spec *fsp = (struct ethtool_rx_flow_spec *)&cmd->fs; struct ixgbe_hw *hw = &adapter->hw; struct ixgbe_fdir_filter *input; union ixgbe_atr_input mask; u8 queue; int err; if (!(adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)) return -EOPNOTSUPP; /* ring_cookie is a masked into a set of queues and ixgbe pools or * we use the drop index. */ if (fsp->ring_cookie == RX_CLS_FLOW_DISC) { queue = IXGBE_FDIR_DROP_QUEUE; } else { u32 ring = ethtool_get_flow_spec_ring(fsp->ring_cookie); u8 vf = ethtool_get_flow_spec_ring_vf(fsp->ring_cookie); if (!vf && (ring >= adapter->num_rx_queues)) return -EINVAL; else if (vf && ((vf > adapter->num_vfs) || ring >= adapter->num_rx_queues_per_pool)) return -EINVAL; /* Map the ring onto the absolute queue index */ if (!vf) queue = adapter->rx_ring[ring]->reg_idx; else queue = ((vf - 1) * adapter->num_rx_queues_per_pool) + ring; } /* Don't allow indexes to exist outside of available space */ if (fsp->location >= ((1024 << adapter->fdir_pballoc) - 2)) { e_err(drv, "Location out of range\n"); return -EINVAL; } input = kzalloc(sizeof(*input), GFP_ATOMIC); if (!input) return -ENOMEM; memset(&mask, 0, sizeof(union ixgbe_atr_input)); /* set SW index */ input->sw_idx = fsp->location; /* record flow type */ if (!ixgbe_flowspec_to_flow_type(fsp, &input->filter.formatted.flow_type)) { e_err(drv, "Unrecognized flow type\n"); goto err_out; } mask.formatted.flow_type = IXGBE_ATR_L4TYPE_IPV6_MASK | IXGBE_ATR_L4TYPE_MASK; if (input->filter.formatted.flow_type == IXGBE_ATR_FLOW_TYPE_IPV4) mask.formatted.flow_type &= IXGBE_ATR_L4TYPE_IPV6_MASK; /* Copy input into formatted structures */ input->filter.formatted.src_ip[0] = fsp->h_u.tcp_ip4_spec.ip4src; mask.formatted.src_ip[0] = fsp->m_u.tcp_ip4_spec.ip4src; input->filter.formatted.dst_ip[0] = fsp->h_u.tcp_ip4_spec.ip4dst; mask.formatted.dst_ip[0] = fsp->m_u.tcp_ip4_spec.ip4dst; input->filter.formatted.src_port = fsp->h_u.tcp_ip4_spec.psrc; mask.formatted.src_port = fsp->m_u.tcp_ip4_spec.psrc; input->filter.formatted.dst_port = fsp->h_u.tcp_ip4_spec.pdst; mask.formatted.dst_port = fsp->m_u.tcp_ip4_spec.pdst; if (fsp->flow_type & FLOW_EXT) { input->filter.formatted.vm_pool = (unsigned char)ntohl(fsp->h_ext.data[1]); mask.formatted.vm_pool = (unsigned char)ntohl(fsp->m_ext.data[1]); input->filter.formatted.vlan_id = fsp->h_ext.vlan_tci; mask.formatted.vlan_id = fsp->m_ext.vlan_tci; input->filter.formatted.flex_bytes = fsp->h_ext.vlan_etype; mask.formatted.flex_bytes = fsp->m_ext.vlan_etype; } /* determine if we need to drop or route the packet */ if (fsp->ring_cookie == RX_CLS_FLOW_DISC) input->action = IXGBE_FDIR_DROP_QUEUE; else input->action = fsp->ring_cookie; spin_lock(&adapter->fdir_perfect_lock); if (hlist_empty(&adapter->fdir_filter_list)) { /* save mask and program input mask into HW */ memcpy(&adapter->fdir_mask, &mask, sizeof(mask)); err = ixgbe_fdir_set_input_mask_82599(hw, &mask); if (err) { e_err(drv, "Error writing mask\n"); goto err_out_w_lock; } } else if (memcmp(&adapter->fdir_mask, &mask, sizeof(mask))) { e_err(drv, "Only one mask supported per port\n"); goto err_out_w_lock; } /* apply mask and compute/store hash */ ixgbe_atr_compute_perfect_hash_82599(&input->filter, &mask); /* program filters to filter memory */ err = ixgbe_fdir_write_perfect_filter_82599(hw, &input->filter, input->sw_idx, queue); if (err) goto err_out_w_lock; ixgbe_update_ethtool_fdir_entry(adapter, input, input->sw_idx); spin_unlock(&adapter->fdir_perfect_lock); return err; err_out_w_lock: spin_unlock(&adapter->fdir_perfect_lock); err_out: kfree(input); return -EINVAL; } static int ixgbe_del_ethtool_fdir_entry(struct ixgbe_adapter *adapter, struct ethtool_rxnfc *cmd) { struct ethtool_rx_flow_spec *fsp = (struct ethtool_rx_flow_spec *)&cmd->fs; int err; spin_lock(&adapter->fdir_perfect_lock); err = ixgbe_update_ethtool_fdir_entry(adapter, NULL, fsp->location); spin_unlock(&adapter->fdir_perfect_lock); return err; } #define UDP_RSS_FLAGS (IXGBE_FLAG2_RSS_FIELD_IPV4_UDP | \ IXGBE_FLAG2_RSS_FIELD_IPV6_UDP) static int ixgbe_set_rss_hash_opt(struct ixgbe_adapter *adapter, struct ethtool_rxnfc *nfc) { u32 flags2 = adapter->flags2; /* * RSS does not support anything other than hashing * to queues on src and dst IPs and ports */ if (nfc->data & ~(RXH_IP_SRC | RXH_IP_DST | RXH_L4_B_0_1 | RXH_L4_B_2_3)) return -EINVAL; switch (nfc->flow_type) { case TCP_V4_FLOW: case TCP_V6_FLOW: if (!(nfc->data & RXH_IP_SRC) || !(nfc->data & RXH_IP_DST) || !(nfc->data & RXH_L4_B_0_1) || !(nfc->data & RXH_L4_B_2_3)) return -EINVAL; break; case UDP_V4_FLOW: if (!(nfc->data & RXH_IP_SRC) || !(nfc->data & RXH_IP_DST)) return -EINVAL; switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) { case 0: flags2 &= ~IXGBE_FLAG2_RSS_FIELD_IPV4_UDP; break; case (RXH_L4_B_0_1 | RXH_L4_B_2_3): flags2 |= IXGBE_FLAG2_RSS_FIELD_IPV4_UDP; break; default: return -EINVAL; } break; case UDP_V6_FLOW: if (!(nfc->data & RXH_IP_SRC) || !(nfc->data & RXH_IP_DST)) return -EINVAL; switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) { case 0: flags2 &= ~IXGBE_FLAG2_RSS_FIELD_IPV6_UDP; break; case (RXH_L4_B_0_1 | RXH_L4_B_2_3): flags2 |= IXGBE_FLAG2_RSS_FIELD_IPV6_UDP; break; default: return -EINVAL; } break; case AH_ESP_V4_FLOW: case AH_V4_FLOW: case ESP_V4_FLOW: case SCTP_V4_FLOW: case AH_ESP_V6_FLOW: case AH_V6_FLOW: case ESP_V6_FLOW: case SCTP_V6_FLOW: if (!(nfc->data & RXH_IP_SRC) || !(nfc->data & RXH_IP_DST) || (nfc->data & RXH_L4_B_0_1) || (nfc->data & RXH_L4_B_2_3)) return -EINVAL; break; default: return -EINVAL; } /* if we changed something we need to update flags */ if (flags2 != adapter->flags2) { struct ixgbe_hw *hw = &adapter->hw; u32 mrqc; unsigned int pf_pool = adapter->num_vfs; if ((hw->mac.type >= ixgbe_mac_X550) && (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)) mrqc = IXGBE_READ_REG(hw, IXGBE_PFVFMRQC(pf_pool)); else mrqc = IXGBE_READ_REG(hw, IXGBE_MRQC); if ((flags2 & UDP_RSS_FLAGS) && !(adapter->flags2 & UDP_RSS_FLAGS)) e_warn(drv, "enabling UDP RSS: fragmented packets may arrive out of order to the stack above\n"); adapter->flags2 = flags2; /* Perform hash on these packet types */ mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4 | IXGBE_MRQC_RSS_FIELD_IPV4_TCP | IXGBE_MRQC_RSS_FIELD_IPV6 | IXGBE_MRQC_RSS_FIELD_IPV6_TCP; mrqc &= ~(IXGBE_MRQC_RSS_FIELD_IPV4_UDP | IXGBE_MRQC_RSS_FIELD_IPV6_UDP); if (flags2 & IXGBE_FLAG2_RSS_FIELD_IPV4_UDP) mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_UDP; if (flags2 & IXGBE_FLAG2_RSS_FIELD_IPV6_UDP) mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_UDP; if ((hw->mac.type >= ixgbe_mac_X550) && (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)) IXGBE_WRITE_REG(hw, IXGBE_PFVFMRQC(pf_pool), mrqc); else IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc); } return 0; } static int ixgbe_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd) { struct ixgbe_adapter *adapter = netdev_priv(dev); int ret = -EOPNOTSUPP; switch (cmd->cmd) { case ETHTOOL_SRXCLSRLINS: ret = ixgbe_add_ethtool_fdir_entry(adapter, cmd); break; case ETHTOOL_SRXCLSRLDEL: ret = ixgbe_del_ethtool_fdir_entry(adapter, cmd); break; case ETHTOOL_SRXFH: ret = ixgbe_set_rss_hash_opt(adapter, cmd); break; default: break; } return ret; } static u32 ixgbe_get_rxfh_key_size(struct net_device *netdev) { return IXGBE_RSS_KEY_SIZE; } static u32 ixgbe_rss_indir_size(struct net_device *netdev) { struct ixgbe_adapter *adapter = netdev_priv(netdev); return ixgbe_rss_indir_tbl_entries(adapter); } static void ixgbe_get_reta(struct ixgbe_adapter *adapter, u32 *indir) { int i, reta_size = ixgbe_rss_indir_tbl_entries(adapter); u16 rss_m = adapter->ring_feature[RING_F_RSS].mask; if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) rss_m = adapter->ring_feature[RING_F_RSS].indices - 1; for (i = 0; i < reta_size; i++) indir[i] = adapter->rss_indir_tbl[i] & rss_m; } static int ixgbe_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key, u8 *hfunc) { struct ixgbe_adapter *adapter = netdev_priv(netdev); if (hfunc) *hfunc = ETH_RSS_HASH_TOP; if (indir) ixgbe_get_reta(adapter, indir); if (key) memcpy(key, adapter->rss_key, ixgbe_get_rxfh_key_size(netdev)); return 0; } static int ixgbe_set_rxfh(struct net_device *netdev, const u32 *indir, const u8 *key, const u8 hfunc) { struct ixgbe_adapter *adapter = netdev_priv(netdev); int i; u32 reta_entries = ixgbe_rss_indir_tbl_entries(adapter); if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP) return -EOPNOTSUPP; /* Fill out the redirection table */ if (indir) { int max_queues = min_t(int, adapter->num_rx_queues, ixgbe_rss_indir_tbl_max(adapter)); /*Allow at least 2 queues w/ SR-IOV.*/ if ((adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) && (max_queues < 2)) max_queues = 2; /* Verify user input. */ for (i = 0; i < reta_entries; i++) if (indir[i] >= max_queues) return -EINVAL; for (i = 0; i < reta_entries; i++) adapter->rss_indir_tbl[i] = indir[i]; ixgbe_store_reta(adapter); } /* Fill out the rss hash key */ if (key) { memcpy(adapter->rss_key, key, ixgbe_get_rxfh_key_size(netdev)); ixgbe_store_key(adapter); } return 0; } static int ixgbe_get_ts_info(struct net_device *dev, struct ethtool_ts_info *info) { struct ixgbe_adapter *adapter = netdev_priv(dev); /* we always support timestamping disabled */ info->rx_filters = BIT(HWTSTAMP_FILTER_NONE); switch (adapter->hw.mac.type) { case ixgbe_mac_X550: case ixgbe_mac_X550EM_x: case ixgbe_mac_x550em_a: info->rx_filters |= BIT(HWTSTAMP_FILTER_ALL); break; case ixgbe_mac_X540: case ixgbe_mac_82599EB: info->rx_filters |= BIT(HWTSTAMP_FILTER_PTP_V1_L4_SYNC) | BIT(HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ) | BIT(HWTSTAMP_FILTER_PTP_V2_EVENT); break; default: return ethtool_op_get_ts_info(dev, info); } info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE | SOF_TIMESTAMPING_RX_SOFTWARE | SOF_TIMESTAMPING_SOFTWARE | SOF_TIMESTAMPING_TX_HARDWARE | SOF_TIMESTAMPING_RX_HARDWARE | SOF_TIMESTAMPING_RAW_HARDWARE; if (adapter->ptp_clock) info->phc_index = ptp_clock_index(adapter->ptp_clock); else info->phc_index = -1; info->tx_types = BIT(HWTSTAMP_TX_OFF) | BIT(HWTSTAMP_TX_ON); return 0; } static unsigned int ixgbe_max_channels(struct ixgbe_adapter *adapter) { unsigned int max_combined; u8 tcs = adapter->hw_tcs; if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED)) { /* We only support one q_vector without MSI-X */ max_combined = 1; } else if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) { /* Limit value based on the queue mask */ max_combined = adapter->ring_feature[RING_F_RSS].mask + 1; } else if (tcs > 1) { /* For DCB report channels per traffic class */ if (adapter->hw.mac.type == ixgbe_mac_82598EB) { /* 8 TC w/ 4 queues per TC */ max_combined = 4; } else if (tcs > 4) { /* 8 TC w/ 8 queues per TC */ max_combined = 8; } else { /* 4 TC w/ 16 queues per TC */ max_combined = 16; } } else if (adapter->atr_sample_rate) { /* support up to 64 queues with ATR */ max_combined = IXGBE_MAX_FDIR_INDICES; } else { /* support up to 16 queues with RSS */ max_combined = ixgbe_max_rss_indices(adapter); } return min_t(int, max_combined, num_online_cpus()); } static void ixgbe_get_channels(struct net_device *dev, struct ethtool_channels *ch) { struct ixgbe_adapter *adapter = netdev_priv(dev); /* report maximum channels */ ch->max_combined = ixgbe_max_channels(adapter); /* report info for other vector */ if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) { ch->max_other = NON_Q_VECTORS; ch->other_count = NON_Q_VECTORS; } /* record RSS queues */ ch->combined_count = adapter->ring_feature[RING_F_RSS].indices; /* nothing else to report if RSS is disabled */ if (ch->combined_count == 1) return; /* we do not support ATR queueing if SR-IOV is enabled */ if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) return; /* same thing goes for being DCB enabled */ if (adapter->hw_tcs > 1) return; /* if ATR is disabled we can exit */ if (!adapter->atr_sample_rate) return; /* report flow director queues as maximum channels */ ch->combined_count = adapter->ring_feature[RING_F_FDIR].indices; } static int ixgbe_set_channels(struct net_device *dev, struct ethtool_channels *ch) { struct ixgbe_adapter *adapter = netdev_priv(dev); unsigned int count = ch->combined_count; u8 max_rss_indices = ixgbe_max_rss_indices(adapter); /* verify they are not requesting separate vectors */ if (!count || ch->rx_count || ch->tx_count) return -EINVAL; /* verify other_count has not changed */ if (ch->other_count != NON_Q_VECTORS) return -EINVAL; /* verify the number of channels does not exceed hardware limits */ if (count > ixgbe_max_channels(adapter)) return -EINVAL; /* update feature limits from largest to smallest supported values */ adapter->ring_feature[RING_F_FDIR].limit = count; /* cap RSS limit */ if (count > max_rss_indices) count = max_rss_indices; adapter->ring_feature[RING_F_RSS].limit = count; #ifdef IXGBE_FCOE /* cap FCoE limit at 8 */ if (count > IXGBE_FCRETA_SIZE) count = IXGBE_FCRETA_SIZE; adapter->ring_feature[RING_F_FCOE].limit = count; #endif /* use setup TC to update any traffic class queue mapping */ return ixgbe_setup_tc(dev, adapter->hw_tcs); } static int ixgbe_get_module_info(struct net_device *dev, struct ethtool_modinfo *modinfo) { struct ixgbe_adapter *adapter = netdev_priv(dev); struct ixgbe_hw *hw = &adapter->hw; s32 status; u8 sff8472_rev, addr_mode; bool page_swap = false; if (hw->phy.type == ixgbe_phy_fw) return -ENXIO; /* Check whether we support SFF-8472 or not */ status = hw->phy.ops.read_i2c_eeprom(hw, IXGBE_SFF_SFF_8472_COMP, &sff8472_rev); if (status) return -EIO; /* addressing mode is not supported */ status = hw->phy.ops.read_i2c_eeprom(hw, IXGBE_SFF_SFF_8472_SWAP, &addr_mode); if (status) return -EIO; if (addr_mode & IXGBE_SFF_ADDRESSING_MODE) { e_err(drv, "Address change required to access page 0xA2, but not supported. Please report the module type to the driver maintainers.\n"); page_swap = true; } if (sff8472_rev == IXGBE_SFF_SFF_8472_UNSUP || page_swap || !(addr_mode & IXGBE_SFF_DDM_IMPLEMENTED)) { /* We have a SFP, but it does not support SFF-8472 */ modinfo->type = ETH_MODULE_SFF_8079; modinfo->eeprom_len = ETH_MODULE_SFF_8079_LEN; } else { /* We have a SFP which supports a revision of SFF-8472. */ modinfo->type = ETH_MODULE_SFF_8472; modinfo->eeprom_len = ETH_MODULE_SFF_8472_LEN; } return 0; } static int ixgbe_get_module_eeprom(struct net_device *dev, struct ethtool_eeprom *ee, u8 *data) { struct ixgbe_adapter *adapter = netdev_priv(dev); struct ixgbe_hw *hw = &adapter->hw; s32 status = IXGBE_ERR_PHY_ADDR_INVALID; u8 databyte = 0xFF; int i = 0; if (ee->len == 0) return -EINVAL; if (hw->phy.type == ixgbe_phy_fw) return -ENXIO; for (i = ee->offset; i < ee->offset + ee->len; i++) { /* I2C reads can take long time */ if (test_bit(__IXGBE_IN_SFP_INIT, &adapter->state)) return -EBUSY; if (i < ETH_MODULE_SFF_8079_LEN) status = hw->phy.ops.read_i2c_eeprom(hw, i, &databyte); else status = hw->phy.ops.read_i2c_sff8472(hw, i, &databyte); if (status) return -EIO; data[i - ee->offset] = databyte; } return 0; } static const struct { ixgbe_link_speed mac_speed; u32 supported; } ixgbe_ls_map[] = { { IXGBE_LINK_SPEED_10_FULL, SUPPORTED_10baseT_Full }, { IXGBE_LINK_SPEED_100_FULL, SUPPORTED_100baseT_Full }, { IXGBE_LINK_SPEED_1GB_FULL, SUPPORTED_1000baseT_Full }, { IXGBE_LINK_SPEED_2_5GB_FULL, SUPPORTED_2500baseX_Full }, { IXGBE_LINK_SPEED_10GB_FULL, SUPPORTED_10000baseT_Full }, }; static const struct { u32 lp_advertised; u32 mac_speed; } ixgbe_lp_map[] = { { FW_PHY_ACT_UD_2_100M_TX_EEE, SUPPORTED_100baseT_Full }, { FW_PHY_ACT_UD_2_1G_T_EEE, SUPPORTED_1000baseT_Full }, { FW_PHY_ACT_UD_2_10G_T_EEE, SUPPORTED_10000baseT_Full }, { FW_PHY_ACT_UD_2_1G_KX_EEE, SUPPORTED_1000baseKX_Full }, { FW_PHY_ACT_UD_2_10G_KX4_EEE, SUPPORTED_10000baseKX4_Full }, { FW_PHY_ACT_UD_2_10G_KR_EEE, SUPPORTED_10000baseKR_Full}, }; static int ixgbe_get_eee_fw(struct ixgbe_adapter *adapter, struct ethtool_eee *edata) { u32 info[FW_PHY_ACT_DATA_COUNT] = { 0 }; struct ixgbe_hw *hw = &adapter->hw; s32 rc; u16 i; rc = ixgbe_fw_phy_activity(hw, FW_PHY_ACT_UD_2, &info); if (rc) return rc; edata->lp_advertised = 0; for (i = 0; i < ARRAY_SIZE(ixgbe_lp_map); ++i) { if (info[0] & ixgbe_lp_map[i].lp_advertised) edata->lp_advertised |= ixgbe_lp_map[i].mac_speed; } edata->supported = 0; for (i = 0; i < ARRAY_SIZE(ixgbe_ls_map); ++i) { if (hw->phy.eee_speeds_supported & ixgbe_ls_map[i].mac_speed) edata->supported |= ixgbe_ls_map[i].supported; } edata->advertised = 0; for (i = 0; i < ARRAY_SIZE(ixgbe_ls_map); ++i) { if (hw->phy.eee_speeds_advertised & ixgbe_ls_map[i].mac_speed) edata->advertised |= ixgbe_ls_map[i].supported; } edata->eee_enabled = !!edata->advertised; edata->tx_lpi_enabled = edata->eee_enabled; if (edata->advertised & edata->lp_advertised) edata->eee_active = true; return 0; } static int ixgbe_get_eee(struct net_device *netdev, struct ethtool_eee *edata) { struct ixgbe_adapter *adapter = netdev_priv(netdev); struct ixgbe_hw *hw = &adapter->hw; if (!(adapter->flags2 & IXGBE_FLAG2_EEE_CAPABLE)) return -EOPNOTSUPP; if (hw->phy.eee_speeds_supported && hw->phy.type == ixgbe_phy_fw) return ixgbe_get_eee_fw(adapter, edata); return -EOPNOTSUPP; } static int ixgbe_set_eee(struct net_device *netdev, struct ethtool_eee *edata) { struct ixgbe_adapter *adapter = netdev_priv(netdev); struct ixgbe_hw *hw = &adapter->hw; struct ethtool_eee eee_data; s32 ret_val; if (!(adapter->flags2 & IXGBE_FLAG2_EEE_CAPABLE)) return -EOPNOTSUPP; memset(&eee_data, 0, sizeof(struct ethtool_eee)); ret_val = ixgbe_get_eee(netdev, &eee_data); if (ret_val) return ret_val; if (eee_data.eee_enabled && !edata->eee_enabled) { if (eee_data.tx_lpi_enabled != edata->tx_lpi_enabled) { e_err(drv, "Setting EEE tx-lpi is not supported\n"); return -EINVAL; } if (eee_data.tx_lpi_timer != edata->tx_lpi_timer) { e_err(drv, "Setting EEE Tx LPI timer is not supported\n"); return -EINVAL; } if (eee_data.advertised != edata->advertised) { e_err(drv, "Setting EEE advertised speeds is not supported\n"); return -EINVAL; } } if (eee_data.eee_enabled != edata->eee_enabled) { if (edata->eee_enabled) { adapter->flags2 |= IXGBE_FLAG2_EEE_ENABLED; hw->phy.eee_speeds_advertised = hw->phy.eee_speeds_supported; } else { adapter->flags2 &= ~IXGBE_FLAG2_EEE_ENABLED; hw->phy.eee_speeds_advertised = 0; } /* reset link */ if (netif_running(netdev)) ixgbe_reinit_locked(adapter); else ixgbe_reset(adapter); } return 0; } static u32 ixgbe_get_priv_flags(struct net_device *netdev) { struct ixgbe_adapter *adapter = netdev_priv(netdev); u32 priv_flags = 0; if (adapter->flags2 & IXGBE_FLAG2_RX_LEGACY) priv_flags |= IXGBE_PRIV_FLAGS_LEGACY_RX; if (adapter->flags2 & IXGBE_FLAG2_VF_IPSEC_ENABLED) priv_flags |= IXGBE_PRIV_FLAGS_VF_IPSEC_EN; if (adapter->flags2 & IXGBE_FLAG2_AUTO_DISABLE_VF) priv_flags |= IXGBE_PRIV_FLAGS_AUTO_DISABLE_VF; return priv_flags; } static int ixgbe_set_priv_flags(struct net_device *netdev, u32 priv_flags) { struct ixgbe_adapter *adapter = netdev_priv(netdev); unsigned int flags2 = adapter->flags2; unsigned int i; flags2 &= ~IXGBE_FLAG2_RX_LEGACY; if (priv_flags & IXGBE_PRIV_FLAGS_LEGACY_RX) flags2 |= IXGBE_FLAG2_RX_LEGACY; flags2 &= ~IXGBE_FLAG2_VF_IPSEC_ENABLED; if (priv_flags & IXGBE_PRIV_FLAGS_VF_IPSEC_EN) flags2 |= IXGBE_FLAG2_VF_IPSEC_ENABLED; flags2 &= ~IXGBE_FLAG2_AUTO_DISABLE_VF; if (priv_flags & IXGBE_PRIV_FLAGS_AUTO_DISABLE_VF) { if (adapter->hw.mac.type == ixgbe_mac_82599EB) { /* Reset primary abort counter */ for (i = 0; i < adapter->num_vfs; i++) adapter->vfinfo[i].primary_abort_count = 0; flags2 |= IXGBE_FLAG2_AUTO_DISABLE_VF; } else { e_info(probe, "Cannot set private flags: Operation not supported\n"); return -EOPNOTSUPP; } } if (flags2 != adapter->flags2) { adapter->flags2 = flags2; /* reset interface to repopulate queues */ if (netif_running(netdev)) ixgbe_reinit_locked(adapter); } return 0; } static const struct ethtool_ops ixgbe_ethtool_ops = { .supported_coalesce_params = ETHTOOL_COALESCE_USECS, .get_drvinfo = ixgbe_get_drvinfo, .get_regs_len = ixgbe_get_regs_len, .get_regs = ixgbe_get_regs, .get_wol = ixgbe_get_wol, .set_wol = ixgbe_set_wol, .nway_reset = ixgbe_nway_reset, .get_link = ethtool_op_get_link, .get_eeprom_len = ixgbe_get_eeprom_len, .get_eeprom = ixgbe_get_eeprom, .set_eeprom = ixgbe_set_eeprom, .get_ringparam = ixgbe_get_ringparam, .set_ringparam = ixgbe_set_ringparam, .get_pause_stats = ixgbe_get_pause_stats, .get_pauseparam = ixgbe_get_pauseparam, .set_pauseparam = ixgbe_set_pauseparam, .get_msglevel = ixgbe_get_msglevel, .set_msglevel = ixgbe_set_msglevel, .self_test = ixgbe_diag_test, .get_strings = ixgbe_get_strings, .set_phys_id = ixgbe_set_phys_id, .get_sset_count = ixgbe_get_sset_count, .get_ethtool_stats = ixgbe_get_ethtool_stats, .get_coalesce = ixgbe_get_coalesce, .set_coalesce = ixgbe_set_coalesce, .get_rxnfc = ixgbe_get_rxnfc, .set_rxnfc = ixgbe_set_rxnfc, .get_rxfh_indir_size = ixgbe_rss_indir_size, .get_rxfh_key_size = ixgbe_get_rxfh_key_size, .get_rxfh = ixgbe_get_rxfh, .set_rxfh = ixgbe_set_rxfh, .get_eee = ixgbe_get_eee, .set_eee = ixgbe_set_eee, .get_channels = ixgbe_get_channels, .set_channels = ixgbe_set_channels, .get_priv_flags = ixgbe_get_priv_flags, .set_priv_flags = ixgbe_set_priv_flags, .get_ts_info = ixgbe_get_ts_info, .get_module_info = ixgbe_get_module_info, .get_module_eeprom = ixgbe_get_module_eeprom, .get_link_ksettings = ixgbe_get_link_ksettings, .set_link_ksettings = ixgbe_set_link_ksettings, }; void ixgbe_set_ethtool_ops(struct net_device *netdev) { netdev->ethtool_ops = &ixgbe_ethtool_ops; }
linux-master
drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
// SPDX-License-Identifier: GPL-2.0 /* Copyright(c) 1999 - 2018 Intel Corporation. */ #include <linux/debugfs.h> #include <linux/module.h> #include "ixgbe.h" static struct dentry *ixgbe_dbg_root; static char ixgbe_dbg_reg_ops_buf[256] = ""; static ssize_t ixgbe_dbg_common_ops_read(struct file *filp, char __user *buffer, size_t count, loff_t *ppos, char *dbg_buf) { struct ixgbe_adapter *adapter = filp->private_data; char *buf; int len; /* don't allow partial reads */ if (*ppos != 0) return 0; buf = kasprintf(GFP_KERNEL, "%s: %s\n", adapter->netdev->name, dbg_buf); if (!buf) return -ENOMEM; if (count < strlen(buf)) { kfree(buf); return -ENOSPC; } len = simple_read_from_buffer(buffer, count, ppos, buf, strlen(buf)); kfree(buf); return len; } /** * ixgbe_dbg_reg_ops_read - read for reg_ops datum * @filp: the opened file * @buffer: where to write the data for the user to read * @count: the size of the user's buffer * @ppos: file position offset **/ static ssize_t ixgbe_dbg_reg_ops_read(struct file *filp, char __user *buffer, size_t count, loff_t *ppos) { return ixgbe_dbg_common_ops_read(filp, buffer, count, ppos, ixgbe_dbg_reg_ops_buf); } /** * ixgbe_dbg_reg_ops_write - write into reg_ops datum * @filp: the opened file * @buffer: where to find the user's data * @count: the length of the user's data * @ppos: file position offset **/ static ssize_t ixgbe_dbg_reg_ops_write(struct file *filp, const char __user *buffer, size_t count, loff_t *ppos) { struct ixgbe_adapter *adapter = filp->private_data; int len; /* don't allow partial writes */ if (*ppos != 0) return 0; if (count >= sizeof(ixgbe_dbg_reg_ops_buf)) return -ENOSPC; len = simple_write_to_buffer(ixgbe_dbg_reg_ops_buf, sizeof(ixgbe_dbg_reg_ops_buf)-1, ppos, buffer, count); if (len < 0) return len; ixgbe_dbg_reg_ops_buf[len] = '\0'; if (strncmp(ixgbe_dbg_reg_ops_buf, "write", 5) == 0) { u32 reg, value; int cnt; cnt = sscanf(&ixgbe_dbg_reg_ops_buf[5], "%x %x", &reg, &value); if (cnt == 2) { IXGBE_WRITE_REG(&adapter->hw, reg, value); value = IXGBE_READ_REG(&adapter->hw, reg); e_dev_info("write: 0x%08x = 0x%08x\n", reg, value); } else { e_dev_info("write <reg> <value>\n"); } } else if (strncmp(ixgbe_dbg_reg_ops_buf, "read", 4) == 0) { u32 reg, value; int cnt; cnt = sscanf(&ixgbe_dbg_reg_ops_buf[4], "%x", &reg); if (cnt == 1) { value = IXGBE_READ_REG(&adapter->hw, reg); e_dev_info("read 0x%08x = 0x%08x\n", reg, value); } else { e_dev_info("read <reg>\n"); } } else { e_dev_info("Unknown command %s\n", ixgbe_dbg_reg_ops_buf); e_dev_info("Available commands:\n"); e_dev_info(" read <reg>\n"); e_dev_info(" write <reg> <value>\n"); } return count; } static const struct file_operations ixgbe_dbg_reg_ops_fops = { .owner = THIS_MODULE, .open = simple_open, .read = ixgbe_dbg_reg_ops_read, .write = ixgbe_dbg_reg_ops_write, }; static char ixgbe_dbg_netdev_ops_buf[256] = ""; /** * ixgbe_dbg_netdev_ops_read - read for netdev_ops datum * @filp: the opened file * @buffer: where to write the data for the user to read * @count: the size of the user's buffer * @ppos: file position offset **/ static ssize_t ixgbe_dbg_netdev_ops_read(struct file *filp, char __user *buffer, size_t count, loff_t *ppos) { return ixgbe_dbg_common_ops_read(filp, buffer, count, ppos, ixgbe_dbg_netdev_ops_buf); } /** * ixgbe_dbg_netdev_ops_write - write into netdev_ops datum * @filp: the opened file * @buffer: where to find the user's data * @count: the length of the user's data * @ppos: file position offset **/ static ssize_t ixgbe_dbg_netdev_ops_write(struct file *filp, const char __user *buffer, size_t count, loff_t *ppos) { struct ixgbe_adapter *adapter = filp->private_data; int len; /* don't allow partial writes */ if (*ppos != 0) return 0; if (count >= sizeof(ixgbe_dbg_netdev_ops_buf)) return -ENOSPC; len = simple_write_to_buffer(ixgbe_dbg_netdev_ops_buf, sizeof(ixgbe_dbg_netdev_ops_buf)-1, ppos, buffer, count); if (len < 0) return len; ixgbe_dbg_netdev_ops_buf[len] = '\0'; if (strncmp(ixgbe_dbg_netdev_ops_buf, "tx_timeout", 10) == 0) { /* TX Queue number below is wrong, but ixgbe does not use it */ adapter->netdev->netdev_ops->ndo_tx_timeout(adapter->netdev, UINT_MAX); e_dev_info("tx_timeout called\n"); } else { e_dev_info("Unknown command: %s\n", ixgbe_dbg_netdev_ops_buf); e_dev_info("Available commands:\n"); e_dev_info(" tx_timeout\n"); } return count; } static const struct file_operations ixgbe_dbg_netdev_ops_fops = { .owner = THIS_MODULE, .open = simple_open, .read = ixgbe_dbg_netdev_ops_read, .write = ixgbe_dbg_netdev_ops_write, }; /** * ixgbe_dbg_adapter_init - setup the debugfs directory for the adapter * @adapter: the adapter that is starting up **/ void ixgbe_dbg_adapter_init(struct ixgbe_adapter *adapter) { const char *name = pci_name(adapter->pdev); adapter->ixgbe_dbg_adapter = debugfs_create_dir(name, ixgbe_dbg_root); debugfs_create_file("reg_ops", 0600, adapter->ixgbe_dbg_adapter, adapter, &ixgbe_dbg_reg_ops_fops); debugfs_create_file("netdev_ops", 0600, adapter->ixgbe_dbg_adapter, adapter, &ixgbe_dbg_netdev_ops_fops); } /** * ixgbe_dbg_adapter_exit - clear out the adapter's debugfs entries * @adapter: the adapter that is exiting **/ void ixgbe_dbg_adapter_exit(struct ixgbe_adapter *adapter) { debugfs_remove_recursive(adapter->ixgbe_dbg_adapter); adapter->ixgbe_dbg_adapter = NULL; } /** * ixgbe_dbg_init - start up debugfs for the driver **/ void ixgbe_dbg_init(void) { ixgbe_dbg_root = debugfs_create_dir(ixgbe_driver_name, NULL); } /** * ixgbe_dbg_exit - clean out the driver's debugfs entries **/ void ixgbe_dbg_exit(void) { debugfs_remove_recursive(ixgbe_dbg_root); }
linux-master
drivers/net/ethernet/intel/ixgbe/ixgbe_debugfs.c
// SPDX-License-Identifier: GPL-2.0 /* Copyright(c) 1999 - 2018 Intel Corporation. */ #include <linux/pci.h> #include <linux/delay.h> #include <linux/iopoll.h> #include <linux/sched.h> #include "ixgbe.h" #include "ixgbe_phy.h" static void ixgbe_i2c_start(struct ixgbe_hw *hw); static void ixgbe_i2c_stop(struct ixgbe_hw *hw); static s32 ixgbe_clock_in_i2c_byte(struct ixgbe_hw *hw, u8 *data); static s32 ixgbe_clock_out_i2c_byte(struct ixgbe_hw *hw, u8 data); static s32 ixgbe_get_i2c_ack(struct ixgbe_hw *hw); static s32 ixgbe_clock_in_i2c_bit(struct ixgbe_hw *hw, bool *data); static s32 ixgbe_clock_out_i2c_bit(struct ixgbe_hw *hw, bool data); static void ixgbe_raise_i2c_clk(struct ixgbe_hw *hw, u32 *i2cctl); static void ixgbe_lower_i2c_clk(struct ixgbe_hw *hw, u32 *i2cctl); static s32 ixgbe_set_i2c_data(struct ixgbe_hw *hw, u32 *i2cctl, bool data); static bool ixgbe_get_i2c_data(struct ixgbe_hw *hw, u32 *i2cctl); static void ixgbe_i2c_bus_clear(struct ixgbe_hw *hw); static enum ixgbe_phy_type ixgbe_get_phy_type_from_id(u32 phy_id); static s32 ixgbe_get_phy_id(struct ixgbe_hw *hw); static s32 ixgbe_identify_qsfp_module_generic(struct ixgbe_hw *hw); /** * ixgbe_out_i2c_byte_ack - Send I2C byte with ack * @hw: pointer to the hardware structure * @byte: byte to send * * Returns an error code on error. **/ static s32 ixgbe_out_i2c_byte_ack(struct ixgbe_hw *hw, u8 byte) { s32 status; status = ixgbe_clock_out_i2c_byte(hw, byte); if (status) return status; return ixgbe_get_i2c_ack(hw); } /** * ixgbe_in_i2c_byte_ack - Receive an I2C byte and send ack * @hw: pointer to the hardware structure * @byte: pointer to a u8 to receive the byte * * Returns an error code on error. **/ static s32 ixgbe_in_i2c_byte_ack(struct ixgbe_hw *hw, u8 *byte) { s32 status; status = ixgbe_clock_in_i2c_byte(hw, byte); if (status) return status; /* ACK */ return ixgbe_clock_out_i2c_bit(hw, false); } /** * ixgbe_ones_comp_byte_add - Perform one's complement addition * @add1: addend 1 * @add2: addend 2 * * Returns one's complement 8-bit sum. **/ static u8 ixgbe_ones_comp_byte_add(u8 add1, u8 add2) { u16 sum = add1 + add2; sum = (sum & 0xFF) + (sum >> 8); return sum & 0xFF; } /** * ixgbe_read_i2c_combined_generic_int - Perform I2C read combined operation * @hw: pointer to the hardware structure * @addr: I2C bus address to read from * @reg: I2C device register to read from * @val: pointer to location to receive read value * @lock: true if to take and release semaphore * * Returns an error code on error. */ s32 ixgbe_read_i2c_combined_generic_int(struct ixgbe_hw *hw, u8 addr, u16 reg, u16 *val, bool lock) { u32 swfw_mask = hw->phy.phy_semaphore_mask; int max_retry = 3; int retry = 0; u8 csum_byte; u8 high_bits; u8 low_bits; u8 reg_high; u8 csum; reg_high = ((reg >> 7) & 0xFE) | 1; /* Indicate read combined */ csum = ixgbe_ones_comp_byte_add(reg_high, reg & 0xFF); csum = ~csum; do { if (lock && hw->mac.ops.acquire_swfw_sync(hw, swfw_mask)) return IXGBE_ERR_SWFW_SYNC; ixgbe_i2c_start(hw); /* Device Address and write indication */ if (ixgbe_out_i2c_byte_ack(hw, addr)) goto fail; /* Write bits 14:8 */ if (ixgbe_out_i2c_byte_ack(hw, reg_high)) goto fail; /* Write bits 7:0 */ if (ixgbe_out_i2c_byte_ack(hw, reg & 0xFF)) goto fail; /* Write csum */ if (ixgbe_out_i2c_byte_ack(hw, csum)) goto fail; /* Re-start condition */ ixgbe_i2c_start(hw); /* Device Address and read indication */ if (ixgbe_out_i2c_byte_ack(hw, addr | 1)) goto fail; /* Get upper bits */ if (ixgbe_in_i2c_byte_ack(hw, &high_bits)) goto fail; /* Get low bits */ if (ixgbe_in_i2c_byte_ack(hw, &low_bits)) goto fail; /* Get csum */ if (ixgbe_clock_in_i2c_byte(hw, &csum_byte)) goto fail; /* NACK */ if (ixgbe_clock_out_i2c_bit(hw, false)) goto fail; ixgbe_i2c_stop(hw); if (lock) hw->mac.ops.release_swfw_sync(hw, swfw_mask); *val = (high_bits << 8) | low_bits; return 0; fail: ixgbe_i2c_bus_clear(hw); if (lock) hw->mac.ops.release_swfw_sync(hw, swfw_mask); retry++; if (retry < max_retry) hw_dbg(hw, "I2C byte read combined error - Retry.\n"); else hw_dbg(hw, "I2C byte read combined error.\n"); } while (retry < max_retry); return IXGBE_ERR_I2C; } /** * ixgbe_write_i2c_combined_generic_int - Perform I2C write combined operation * @hw: pointer to the hardware structure * @addr: I2C bus address to write to * @reg: I2C device register to write to * @val: value to write * @lock: true if to take and release semaphore * * Returns an error code on error. */ s32 ixgbe_write_i2c_combined_generic_int(struct ixgbe_hw *hw, u8 addr, u16 reg, u16 val, bool lock) { u32 swfw_mask = hw->phy.phy_semaphore_mask; int max_retry = 1; int retry = 0; u8 reg_high; u8 csum; reg_high = (reg >> 7) & 0xFE; /* Indicate write combined */ csum = ixgbe_ones_comp_byte_add(reg_high, reg & 0xFF); csum = ixgbe_ones_comp_byte_add(csum, val >> 8); csum = ixgbe_ones_comp_byte_add(csum, val & 0xFF); csum = ~csum; do { if (lock && hw->mac.ops.acquire_swfw_sync(hw, swfw_mask)) return IXGBE_ERR_SWFW_SYNC; ixgbe_i2c_start(hw); /* Device Address and write indication */ if (ixgbe_out_i2c_byte_ack(hw, addr)) goto fail; /* Write bits 14:8 */ if (ixgbe_out_i2c_byte_ack(hw, reg_high)) goto fail; /* Write bits 7:0 */ if (ixgbe_out_i2c_byte_ack(hw, reg & 0xFF)) goto fail; /* Write data 15:8 */ if (ixgbe_out_i2c_byte_ack(hw, val >> 8)) goto fail; /* Write data 7:0 */ if (ixgbe_out_i2c_byte_ack(hw, val & 0xFF)) goto fail; /* Write csum */ if (ixgbe_out_i2c_byte_ack(hw, csum)) goto fail; ixgbe_i2c_stop(hw); if (lock) hw->mac.ops.release_swfw_sync(hw, swfw_mask); return 0; fail: ixgbe_i2c_bus_clear(hw); if (lock) hw->mac.ops.release_swfw_sync(hw, swfw_mask); retry++; if (retry < max_retry) hw_dbg(hw, "I2C byte write combined error - Retry.\n"); else hw_dbg(hw, "I2C byte write combined error.\n"); } while (retry < max_retry); return IXGBE_ERR_I2C; } /** * ixgbe_probe_phy - Probe a single address for a PHY * @hw: pointer to hardware structure * @phy_addr: PHY address to probe * * Returns true if PHY found **/ static bool ixgbe_probe_phy(struct ixgbe_hw *hw, u16 phy_addr) { u16 ext_ability = 0; hw->phy.mdio.prtad = phy_addr; if (mdio45_probe(&hw->phy.mdio, phy_addr) != 0) return false; if (ixgbe_get_phy_id(hw)) return false; hw->phy.type = ixgbe_get_phy_type_from_id(hw->phy.id); if (hw->phy.type == ixgbe_phy_unknown) { hw->phy.ops.read_reg(hw, MDIO_PMA_EXTABLE, MDIO_MMD_PMAPMD, &ext_ability); if (ext_ability & (MDIO_PMA_EXTABLE_10GBT | MDIO_PMA_EXTABLE_1000BT)) hw->phy.type = ixgbe_phy_cu_unknown; else hw->phy.type = ixgbe_phy_generic; } return true; } /** * ixgbe_identify_phy_generic - Get physical layer module * @hw: pointer to hardware structure * * Determines the physical layer module found on the current adapter. **/ s32 ixgbe_identify_phy_generic(struct ixgbe_hw *hw) { u32 phy_addr; u32 status = IXGBE_ERR_PHY_ADDR_INVALID; if (!hw->phy.phy_semaphore_mask) { if (hw->bus.lan_id) hw->phy.phy_semaphore_mask = IXGBE_GSSR_PHY1_SM; else hw->phy.phy_semaphore_mask = IXGBE_GSSR_PHY0_SM; } if (hw->phy.type != ixgbe_phy_unknown) return 0; if (hw->phy.nw_mng_if_sel) { phy_addr = (hw->phy.nw_mng_if_sel & IXGBE_NW_MNG_IF_SEL_MDIO_PHY_ADD) >> IXGBE_NW_MNG_IF_SEL_MDIO_PHY_ADD_SHIFT; if (ixgbe_probe_phy(hw, phy_addr)) return 0; else return IXGBE_ERR_PHY_ADDR_INVALID; } for (phy_addr = 0; phy_addr < IXGBE_MAX_PHY_ADDR; phy_addr++) { if (ixgbe_probe_phy(hw, phy_addr)) { status = 0; break; } } /* Certain media types do not have a phy so an address will not * be found and the code will take this path. Caller has to * decide if it is an error or not. */ if (status) hw->phy.mdio.prtad = MDIO_PRTAD_NONE; return status; } /** * ixgbe_check_reset_blocked - check status of MNG FW veto bit * @hw: pointer to the hardware structure * * This function checks the MMNGC.MNG_VETO bit to see if there are * any constraints on link from manageability. For MAC's that don't * have this bit just return false since the link can not be blocked * via this method. **/ bool ixgbe_check_reset_blocked(struct ixgbe_hw *hw) { u32 mmngc; /* If we don't have this bit, it can't be blocking */ if (hw->mac.type == ixgbe_mac_82598EB) return false; mmngc = IXGBE_READ_REG(hw, IXGBE_MMNGC); if (mmngc & IXGBE_MMNGC_MNG_VETO) { hw_dbg(hw, "MNG_VETO bit detected.\n"); return true; } return false; } /** * ixgbe_get_phy_id - Get the phy type * @hw: pointer to hardware structure * **/ static s32 ixgbe_get_phy_id(struct ixgbe_hw *hw) { s32 status; u16 phy_id_high = 0; u16 phy_id_low = 0; status = hw->phy.ops.read_reg(hw, MDIO_DEVID1, MDIO_MMD_PMAPMD, &phy_id_high); if (!status) { hw->phy.id = (u32)(phy_id_high << 16); status = hw->phy.ops.read_reg(hw, MDIO_DEVID2, MDIO_MMD_PMAPMD, &phy_id_low); hw->phy.id |= (u32)(phy_id_low & IXGBE_PHY_REVISION_MASK); hw->phy.revision = (u32)(phy_id_low & ~IXGBE_PHY_REVISION_MASK); } return status; } /** * ixgbe_get_phy_type_from_id - Get the phy type * @phy_id: hardware phy id * **/ static enum ixgbe_phy_type ixgbe_get_phy_type_from_id(u32 phy_id) { enum ixgbe_phy_type phy_type; switch (phy_id) { case TN1010_PHY_ID: phy_type = ixgbe_phy_tn; break; case X550_PHY_ID2: case X550_PHY_ID3: case X540_PHY_ID: phy_type = ixgbe_phy_aq; break; case QT2022_PHY_ID: phy_type = ixgbe_phy_qt; break; case ATH_PHY_ID: phy_type = ixgbe_phy_nl; break; case X557_PHY_ID: case X557_PHY_ID2: phy_type = ixgbe_phy_x550em_ext_t; break; case BCM54616S_E_PHY_ID: phy_type = ixgbe_phy_ext_1g_t; break; default: phy_type = ixgbe_phy_unknown; break; } return phy_type; } /** * ixgbe_reset_phy_generic - Performs a PHY reset * @hw: pointer to hardware structure **/ s32 ixgbe_reset_phy_generic(struct ixgbe_hw *hw) { u32 i; u16 ctrl = 0; s32 status = 0; if (hw->phy.type == ixgbe_phy_unknown) status = ixgbe_identify_phy_generic(hw); if (status != 0 || hw->phy.type == ixgbe_phy_none) return status; /* Don't reset PHY if it's shut down due to overtemp. */ if (!hw->phy.reset_if_overtemp && (IXGBE_ERR_OVERTEMP == hw->phy.ops.check_overtemp(hw))) return 0; /* Blocked by MNG FW so bail */ if (ixgbe_check_reset_blocked(hw)) return 0; /* * Perform soft PHY reset to the PHY_XS. * This will cause a soft reset to the PHY */ hw->phy.ops.write_reg(hw, MDIO_CTRL1, MDIO_MMD_PHYXS, MDIO_CTRL1_RESET); /* * Poll for reset bit to self-clear indicating reset is complete. * Some PHYs could take up to 3 seconds to complete and need about * 1.7 usec delay after the reset is complete. */ for (i = 0; i < 30; i++) { msleep(100); if (hw->phy.type == ixgbe_phy_x550em_ext_t) { status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_TX_VENDOR_ALARMS_3, MDIO_MMD_PMAPMD, &ctrl); if (status) return status; if (ctrl & IXGBE_MDIO_TX_VENDOR_ALARMS_3_RST_MASK) { udelay(2); break; } } else { status = hw->phy.ops.read_reg(hw, MDIO_CTRL1, MDIO_MMD_PHYXS, &ctrl); if (status) return status; if (!(ctrl & MDIO_CTRL1_RESET)) { udelay(2); break; } } } if (ctrl & MDIO_CTRL1_RESET) { hw_dbg(hw, "PHY reset polling failed to complete.\n"); return IXGBE_ERR_RESET_FAILED; } return 0; } /** * ixgbe_read_phy_reg_mdi - read PHY register * @hw: pointer to hardware structure * @reg_addr: 32 bit address of PHY register to read * @device_type: 5 bit device type * @phy_data: Pointer to read data from PHY register * * Reads a value from a specified PHY register without the SWFW lock **/ s32 ixgbe_read_phy_reg_mdi(struct ixgbe_hw *hw, u32 reg_addr, u32 device_type, u16 *phy_data) { u32 i, data, command; /* Setup and write the address cycle command */ command = ((reg_addr << IXGBE_MSCA_NP_ADDR_SHIFT) | (device_type << IXGBE_MSCA_DEV_TYPE_SHIFT) | (hw->phy.mdio.prtad << IXGBE_MSCA_PHY_ADDR_SHIFT) | (IXGBE_MSCA_ADDR_CYCLE | IXGBE_MSCA_MDI_COMMAND)); IXGBE_WRITE_REG(hw, IXGBE_MSCA, command); /* Check every 10 usec to see if the address cycle completed. * The MDI Command bit will clear when the operation is * complete */ for (i = 0; i < IXGBE_MDIO_COMMAND_TIMEOUT; i++) { udelay(10); command = IXGBE_READ_REG(hw, IXGBE_MSCA); if ((command & IXGBE_MSCA_MDI_COMMAND) == 0) break; } if ((command & IXGBE_MSCA_MDI_COMMAND) != 0) { hw_dbg(hw, "PHY address command did not complete.\n"); return IXGBE_ERR_PHY; } /* Address cycle complete, setup and write the read * command */ command = ((reg_addr << IXGBE_MSCA_NP_ADDR_SHIFT) | (device_type << IXGBE_MSCA_DEV_TYPE_SHIFT) | (hw->phy.mdio.prtad << IXGBE_MSCA_PHY_ADDR_SHIFT) | (IXGBE_MSCA_READ | IXGBE_MSCA_MDI_COMMAND)); IXGBE_WRITE_REG(hw, IXGBE_MSCA, command); /* Check every 10 usec to see if the address cycle * completed. The MDI Command bit will clear when the * operation is complete */ for (i = 0; i < IXGBE_MDIO_COMMAND_TIMEOUT; i++) { udelay(10); command = IXGBE_READ_REG(hw, IXGBE_MSCA); if ((command & IXGBE_MSCA_MDI_COMMAND) == 0) break; } if ((command & IXGBE_MSCA_MDI_COMMAND) != 0) { hw_dbg(hw, "PHY read command didn't complete\n"); return IXGBE_ERR_PHY; } /* Read operation is complete. Get the data * from MSRWD */ data = IXGBE_READ_REG(hw, IXGBE_MSRWD); data >>= IXGBE_MSRWD_READ_DATA_SHIFT; *phy_data = (u16)(data); return 0; } /** * ixgbe_read_phy_reg_generic - Reads a value from a specified PHY register * using the SWFW lock - this function is needed in most cases * @hw: pointer to hardware structure * @reg_addr: 32 bit address of PHY register to read * @device_type: 5 bit device type * @phy_data: Pointer to read data from PHY register **/ s32 ixgbe_read_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr, u32 device_type, u16 *phy_data) { s32 status; u32 gssr = hw->phy.phy_semaphore_mask; if (hw->mac.ops.acquire_swfw_sync(hw, gssr) == 0) { status = ixgbe_read_phy_reg_mdi(hw, reg_addr, device_type, phy_data); hw->mac.ops.release_swfw_sync(hw, gssr); } else { return IXGBE_ERR_SWFW_SYNC; } return status; } /** * ixgbe_write_phy_reg_mdi - Writes a value to specified PHY register * without SWFW lock * @hw: pointer to hardware structure * @reg_addr: 32 bit PHY register to write * @device_type: 5 bit device type * @phy_data: Data to write to the PHY register **/ s32 ixgbe_write_phy_reg_mdi(struct ixgbe_hw *hw, u32 reg_addr, u32 device_type, u16 phy_data) { u32 i, command; /* Put the data in the MDI single read and write data register*/ IXGBE_WRITE_REG(hw, IXGBE_MSRWD, (u32)phy_data); /* Setup and write the address cycle command */ command = ((reg_addr << IXGBE_MSCA_NP_ADDR_SHIFT) | (device_type << IXGBE_MSCA_DEV_TYPE_SHIFT) | (hw->phy.mdio.prtad << IXGBE_MSCA_PHY_ADDR_SHIFT) | (IXGBE_MSCA_ADDR_CYCLE | IXGBE_MSCA_MDI_COMMAND)); IXGBE_WRITE_REG(hw, IXGBE_MSCA, command); /* * Check every 10 usec to see if the address cycle completed. * The MDI Command bit will clear when the operation is * complete */ for (i = 0; i < IXGBE_MDIO_COMMAND_TIMEOUT; i++) { udelay(10); command = IXGBE_READ_REG(hw, IXGBE_MSCA); if ((command & IXGBE_MSCA_MDI_COMMAND) == 0) break; } if ((command & IXGBE_MSCA_MDI_COMMAND) != 0) { hw_dbg(hw, "PHY address cmd didn't complete\n"); return IXGBE_ERR_PHY; } /* * Address cycle complete, setup and write the write * command */ command = ((reg_addr << IXGBE_MSCA_NP_ADDR_SHIFT) | (device_type << IXGBE_MSCA_DEV_TYPE_SHIFT) | (hw->phy.mdio.prtad << IXGBE_MSCA_PHY_ADDR_SHIFT) | (IXGBE_MSCA_WRITE | IXGBE_MSCA_MDI_COMMAND)); IXGBE_WRITE_REG(hw, IXGBE_MSCA, command); /* Check every 10 usec to see if the address cycle * completed. The MDI Command bit will clear when the * operation is complete */ for (i = 0; i < IXGBE_MDIO_COMMAND_TIMEOUT; i++) { udelay(10); command = IXGBE_READ_REG(hw, IXGBE_MSCA); if ((command & IXGBE_MSCA_MDI_COMMAND) == 0) break; } if ((command & IXGBE_MSCA_MDI_COMMAND) != 0) { hw_dbg(hw, "PHY write cmd didn't complete\n"); return IXGBE_ERR_PHY; } return 0; } /** * ixgbe_write_phy_reg_generic - Writes a value to specified PHY register * using SWFW lock- this function is needed in most cases * @hw: pointer to hardware structure * @reg_addr: 32 bit PHY register to write * @device_type: 5 bit device type * @phy_data: Data to write to the PHY register **/ s32 ixgbe_write_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr, u32 device_type, u16 phy_data) { s32 status; u32 gssr = hw->phy.phy_semaphore_mask; if (hw->mac.ops.acquire_swfw_sync(hw, gssr) == 0) { status = ixgbe_write_phy_reg_mdi(hw, reg_addr, device_type, phy_data); hw->mac.ops.release_swfw_sync(hw, gssr); } else { return IXGBE_ERR_SWFW_SYNC; } return status; } #define IXGBE_HW_READ_REG(addr) IXGBE_READ_REG(hw, addr) /** * ixgbe_msca_cmd - Write the command register and poll for completion/timeout * @hw: pointer to hardware structure * @cmd: command register value to write **/ static s32 ixgbe_msca_cmd(struct ixgbe_hw *hw, u32 cmd) { IXGBE_WRITE_REG(hw, IXGBE_MSCA, cmd); return readx_poll_timeout(IXGBE_HW_READ_REG, IXGBE_MSCA, cmd, !(cmd & IXGBE_MSCA_MDI_COMMAND), 10, 10 * IXGBE_MDIO_COMMAND_TIMEOUT); } /** * ixgbe_mii_bus_read_generic_c22 - Read a clause 22 register with gssr flags * @hw: pointer to hardware structure * @addr: address * @regnum: register number * @gssr: semaphore flags to acquire **/ static s32 ixgbe_mii_bus_read_generic_c22(struct ixgbe_hw *hw, int addr, int regnum, u32 gssr) { u32 hwaddr, cmd; s32 data; if (hw->mac.ops.acquire_swfw_sync(hw, gssr)) return -EBUSY; hwaddr = addr << IXGBE_MSCA_PHY_ADDR_SHIFT; hwaddr |= (regnum & GENMASK(5, 0)) << IXGBE_MSCA_DEV_TYPE_SHIFT; cmd = hwaddr | IXGBE_MSCA_OLD_PROTOCOL | IXGBE_MSCA_READ_AUTOINC | IXGBE_MSCA_MDI_COMMAND; data = ixgbe_msca_cmd(hw, cmd); if (data < 0) goto mii_bus_read_done; data = IXGBE_READ_REG(hw, IXGBE_MSRWD); data = (data >> IXGBE_MSRWD_READ_DATA_SHIFT) & GENMASK(16, 0); mii_bus_read_done: hw->mac.ops.release_swfw_sync(hw, gssr); return data; } /** * ixgbe_mii_bus_read_generic_c45 - Read a clause 45 register with gssr flags * @hw: pointer to hardware structure * @addr: address * @devad: device address to read * @regnum: register number * @gssr: semaphore flags to acquire **/ static s32 ixgbe_mii_bus_read_generic_c45(struct ixgbe_hw *hw, int addr, int devad, int regnum, u32 gssr) { u32 hwaddr, cmd; s32 data; if (hw->mac.ops.acquire_swfw_sync(hw, gssr)) return -EBUSY; hwaddr = addr << IXGBE_MSCA_PHY_ADDR_SHIFT; hwaddr |= devad << 16 | regnum; cmd = hwaddr | IXGBE_MSCA_ADDR_CYCLE | IXGBE_MSCA_MDI_COMMAND; data = ixgbe_msca_cmd(hw, cmd); if (data < 0) goto mii_bus_read_done; cmd = hwaddr | IXGBE_MSCA_READ | IXGBE_MSCA_MDI_COMMAND; data = ixgbe_msca_cmd(hw, cmd); if (data < 0) goto mii_bus_read_done; data = IXGBE_READ_REG(hw, IXGBE_MSRWD); data = (data >> IXGBE_MSRWD_READ_DATA_SHIFT) & GENMASK(16, 0); mii_bus_read_done: hw->mac.ops.release_swfw_sync(hw, gssr); return data; } /** * ixgbe_mii_bus_write_generic_c22 - Write a clause 22 register with gssr flags * @hw: pointer to hardware structure * @addr: address * @regnum: register number * @val: value to write * @gssr: semaphore flags to acquire **/ static s32 ixgbe_mii_bus_write_generic_c22(struct ixgbe_hw *hw, int addr, int regnum, u16 val, u32 gssr) { u32 hwaddr, cmd; s32 err; if (hw->mac.ops.acquire_swfw_sync(hw, gssr)) return -EBUSY; IXGBE_WRITE_REG(hw, IXGBE_MSRWD, (u32)val); hwaddr = addr << IXGBE_MSCA_PHY_ADDR_SHIFT; hwaddr |= (regnum & GENMASK(5, 0)) << IXGBE_MSCA_DEV_TYPE_SHIFT; cmd = hwaddr | IXGBE_MSCA_OLD_PROTOCOL | IXGBE_MSCA_WRITE | IXGBE_MSCA_MDI_COMMAND; err = ixgbe_msca_cmd(hw, cmd); hw->mac.ops.release_swfw_sync(hw, gssr); return err; } /** * ixgbe_mii_bus_write_generic_c45 - Write a clause 45 register with gssr flags * @hw: pointer to hardware structure * @addr: address * @devad: device address to read * @regnum: register number * @val: value to write * @gssr: semaphore flags to acquire **/ static s32 ixgbe_mii_bus_write_generic_c45(struct ixgbe_hw *hw, int addr, int devad, int regnum, u16 val, u32 gssr) { u32 hwaddr, cmd; s32 err; if (hw->mac.ops.acquire_swfw_sync(hw, gssr)) return -EBUSY; IXGBE_WRITE_REG(hw, IXGBE_MSRWD, (u32)val); hwaddr = addr << IXGBE_MSCA_PHY_ADDR_SHIFT; hwaddr |= devad << 16 | regnum; cmd = hwaddr | IXGBE_MSCA_ADDR_CYCLE | IXGBE_MSCA_MDI_COMMAND; err = ixgbe_msca_cmd(hw, cmd); if (err < 0) goto mii_bus_write_done; cmd = hwaddr | IXGBE_MSCA_WRITE | IXGBE_MSCA_MDI_COMMAND; err = ixgbe_msca_cmd(hw, cmd); mii_bus_write_done: hw->mac.ops.release_swfw_sync(hw, gssr); return err; } /** * ixgbe_mii_bus_read_c22 - Read a clause 22 register * @bus: pointer to mii_bus structure which points to our driver private * @addr: address * @regnum: register number **/ static s32 ixgbe_mii_bus_read_c22(struct mii_bus *bus, int addr, int regnum) { struct ixgbe_adapter *adapter = bus->priv; struct ixgbe_hw *hw = &adapter->hw; u32 gssr = hw->phy.phy_semaphore_mask; return ixgbe_mii_bus_read_generic_c22(hw, addr, regnum, gssr); } /** * ixgbe_mii_bus_read_c45 - Read a clause 45 register * @bus: pointer to mii_bus structure which points to our driver private * @devad: device address to read * @addr: address * @regnum: register number **/ static s32 ixgbe_mii_bus_read_c45(struct mii_bus *bus, int devad, int addr, int regnum) { struct ixgbe_adapter *adapter = bus->priv; struct ixgbe_hw *hw = &adapter->hw; u32 gssr = hw->phy.phy_semaphore_mask; return ixgbe_mii_bus_read_generic_c45(hw, addr, devad, regnum, gssr); } /** * ixgbe_mii_bus_write_c22 - Write a clause 22 register * @bus: pointer to mii_bus structure which points to our driver private * @addr: address * @regnum: register number * @val: value to write **/ static s32 ixgbe_mii_bus_write_c22(struct mii_bus *bus, int addr, int regnum, u16 val) { struct ixgbe_adapter *adapter = bus->priv; struct ixgbe_hw *hw = &adapter->hw; u32 gssr = hw->phy.phy_semaphore_mask; return ixgbe_mii_bus_write_generic_c22(hw, addr, regnum, val, gssr); } /** * ixgbe_mii_bus_write_c45 - Write a clause 45 register * @bus: pointer to mii_bus structure which points to our driver private * @addr: address * @devad: device address to read * @regnum: register number * @val: value to write **/ static s32 ixgbe_mii_bus_write_c45(struct mii_bus *bus, int addr, int devad, int regnum, u16 val) { struct ixgbe_adapter *adapter = bus->priv; struct ixgbe_hw *hw = &adapter->hw; u32 gssr = hw->phy.phy_semaphore_mask; return ixgbe_mii_bus_write_generic_c45(hw, addr, devad, regnum, val, gssr); } /** * ixgbe_x550em_a_mii_bus_read_c22 - Read a clause 22 register on x550em_a * @bus: pointer to mii_bus structure which points to our driver private * @addr: address * @regnum: register number **/ static s32 ixgbe_x550em_a_mii_bus_read_c22(struct mii_bus *bus, int addr, int regnum) { struct ixgbe_adapter *adapter = bus->priv; struct ixgbe_hw *hw = &adapter->hw; u32 gssr = hw->phy.phy_semaphore_mask; gssr |= IXGBE_GSSR_TOKEN_SM | IXGBE_GSSR_PHY0_SM; return ixgbe_mii_bus_read_generic_c22(hw, addr, regnum, gssr); } /** * ixgbe_x550em_a_mii_bus_read_c45 - Read a clause 45 register on x550em_a * @bus: pointer to mii_bus structure which points to our driver private * @addr: address * @devad: device address to read * @regnum: register number **/ static s32 ixgbe_x550em_a_mii_bus_read_c45(struct mii_bus *bus, int addr, int devad, int regnum) { struct ixgbe_adapter *adapter = bus->priv; struct ixgbe_hw *hw = &adapter->hw; u32 gssr = hw->phy.phy_semaphore_mask; gssr |= IXGBE_GSSR_TOKEN_SM | IXGBE_GSSR_PHY0_SM; return ixgbe_mii_bus_read_generic_c45(hw, addr, devad, regnum, gssr); } /** * ixgbe_x550em_a_mii_bus_write_c22 - Write a clause 22 register on x550em_a * @bus: pointer to mii_bus structure which points to our driver private * @addr: address * @regnum: register number * @val: value to write **/ static s32 ixgbe_x550em_a_mii_bus_write_c22(struct mii_bus *bus, int addr, int regnum, u16 val) { struct ixgbe_adapter *adapter = bus->priv; struct ixgbe_hw *hw = &adapter->hw; u32 gssr = hw->phy.phy_semaphore_mask; gssr |= IXGBE_GSSR_TOKEN_SM | IXGBE_GSSR_PHY0_SM; return ixgbe_mii_bus_write_generic_c22(hw, addr, regnum, val, gssr); } /** * ixgbe_x550em_a_mii_bus_write_c45 - Write a clause 45 register on x550em_a * @bus: pointer to mii_bus structure which points to our driver private * @addr: address * @devad: device address to read * @regnum: register number * @val: value to write **/ static s32 ixgbe_x550em_a_mii_bus_write_c45(struct mii_bus *bus, int addr, int devad, int regnum, u16 val) { struct ixgbe_adapter *adapter = bus->priv; struct ixgbe_hw *hw = &adapter->hw; u32 gssr = hw->phy.phy_semaphore_mask; gssr |= IXGBE_GSSR_TOKEN_SM | IXGBE_GSSR_PHY0_SM; return ixgbe_mii_bus_write_generic_c45(hw, addr, devad, regnum, val, gssr); } /** * ixgbe_get_first_secondary_devfn - get first device downstream of root port * @devfn: PCI_DEVFN of root port on domain 0, bus 0 * * Returns pci_dev pointer to PCI_DEVFN(0, 0) on subordinate side of root * on domain 0, bus 0, devfn = 'devfn' **/ static struct pci_dev *ixgbe_get_first_secondary_devfn(unsigned int devfn) { struct pci_dev *rp_pdev; int bus; rp_pdev = pci_get_domain_bus_and_slot(0, 0, devfn); if (rp_pdev && rp_pdev->subordinate) { bus = rp_pdev->subordinate->number; pci_dev_put(rp_pdev); return pci_get_domain_bus_and_slot(0, bus, 0); } pci_dev_put(rp_pdev); return NULL; } /** * ixgbe_x550em_a_has_mii - is this the first ixgbe x550em_a PCI function? * @hw: pointer to hardware structure * * Returns true if hw points to lowest numbered PCI B:D.F x550_em_a device in * the SoC. There are up to 4 MACs sharing a single MDIO bus on the x550em_a, * but we only want to register one MDIO bus. **/ static bool ixgbe_x550em_a_has_mii(struct ixgbe_hw *hw) { struct ixgbe_adapter *adapter = hw->back; struct pci_dev *pdev = adapter->pdev; struct pci_dev *func0_pdev; bool has_mii = false; /* For the C3000 family of SoCs (x550em_a) the internal ixgbe devices * are always downstream of root ports @ 0000:00:16.0 & 0000:00:17.0 * It's not valid for function 0 to be disabled and function 1 is up, * so the lowest numbered ixgbe dev will be device 0 function 0 on one * of those two root ports */ func0_pdev = ixgbe_get_first_secondary_devfn(PCI_DEVFN(0x16, 0)); if (func0_pdev) { if (func0_pdev == pdev) has_mii = true; goto out; } func0_pdev = ixgbe_get_first_secondary_devfn(PCI_DEVFN(0x17, 0)); if (func0_pdev == pdev) has_mii = true; out: pci_dev_put(func0_pdev); return has_mii; } /** * ixgbe_mii_bus_init - mii_bus structure setup * @hw: pointer to hardware structure * * Returns 0 on success, negative on failure * * ixgbe_mii_bus_init initializes a mii_bus structure in adapter **/ s32 ixgbe_mii_bus_init(struct ixgbe_hw *hw) { s32 (*write_c22)(struct mii_bus *bus, int addr, int regnum, u16 val); s32 (*read_c22)(struct mii_bus *bus, int addr, int regnum); s32 (*write_c45)(struct mii_bus *bus, int addr, int devad, int regnum, u16 val); s32 (*read_c45)(struct mii_bus *bus, int addr, int devad, int regnum); struct ixgbe_adapter *adapter = hw->back; struct pci_dev *pdev = adapter->pdev; struct device *dev = &adapter->netdev->dev; struct mii_bus *bus; switch (hw->device_id) { /* C3000 SoCs */ case IXGBE_DEV_ID_X550EM_A_KR: case IXGBE_DEV_ID_X550EM_A_KR_L: case IXGBE_DEV_ID_X550EM_A_SFP_N: case IXGBE_DEV_ID_X550EM_A_SGMII: case IXGBE_DEV_ID_X550EM_A_SGMII_L: case IXGBE_DEV_ID_X550EM_A_10G_T: case IXGBE_DEV_ID_X550EM_A_SFP: case IXGBE_DEV_ID_X550EM_A_1G_T: case IXGBE_DEV_ID_X550EM_A_1G_T_L: if (!ixgbe_x550em_a_has_mii(hw)) return 0; read_c22 = ixgbe_x550em_a_mii_bus_read_c22; write_c22 = ixgbe_x550em_a_mii_bus_write_c22; read_c45 = ixgbe_x550em_a_mii_bus_read_c45; write_c45 = ixgbe_x550em_a_mii_bus_write_c45; break; default: read_c22 = ixgbe_mii_bus_read_c22; write_c22 = ixgbe_mii_bus_write_c22; read_c45 = ixgbe_mii_bus_read_c45; write_c45 = ixgbe_mii_bus_write_c45; break; } bus = devm_mdiobus_alloc(dev); if (!bus) return -ENOMEM; bus->read = read_c22; bus->write = write_c22; bus->read_c45 = read_c45; bus->write_c45 = write_c45; /* Use the position of the device in the PCI hierarchy as the id */ snprintf(bus->id, MII_BUS_ID_SIZE, "%s-mdio-%s", ixgbe_driver_name, pci_name(pdev)); bus->name = "ixgbe-mdio"; bus->priv = adapter; bus->parent = dev; bus->phy_mask = GENMASK(31, 0); /* Support clause 22/45 natively. ixgbe_probe() sets MDIO_EMULATE_C22 * unfortunately that causes some clause 22 frames to be sent with * clause 45 addressing. We don't want that. */ hw->phy.mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_SUPPORTS_C22; adapter->mii_bus = bus; return mdiobus_register(bus); } /** * ixgbe_setup_phy_link_generic - Set and restart autoneg * @hw: pointer to hardware structure * * Restart autonegotiation and PHY and waits for completion. **/ s32 ixgbe_setup_phy_link_generic(struct ixgbe_hw *hw) { s32 status = 0; u16 autoneg_reg = IXGBE_MII_AUTONEG_REG; bool autoneg = false; ixgbe_link_speed speed; ixgbe_get_copper_link_capabilities_generic(hw, &speed, &autoneg); /* Set or unset auto-negotiation 10G advertisement */ hw->phy.ops.read_reg(hw, MDIO_AN_10GBT_CTRL, MDIO_MMD_AN, &autoneg_reg); autoneg_reg &= ~MDIO_AN_10GBT_CTRL_ADV10G; if ((hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_10GB_FULL) && (speed & IXGBE_LINK_SPEED_10GB_FULL)) autoneg_reg |= MDIO_AN_10GBT_CTRL_ADV10G; hw->phy.ops.write_reg(hw, MDIO_AN_10GBT_CTRL, MDIO_MMD_AN, autoneg_reg); hw->phy.ops.read_reg(hw, IXGBE_MII_AUTONEG_VENDOR_PROVISION_1_REG, MDIO_MMD_AN, &autoneg_reg); if (hw->mac.type == ixgbe_mac_X550) { /* Set or unset auto-negotiation 5G advertisement */ autoneg_reg &= ~IXGBE_MII_5GBASE_T_ADVERTISE; if ((hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_5GB_FULL) && (speed & IXGBE_LINK_SPEED_5GB_FULL)) autoneg_reg |= IXGBE_MII_5GBASE_T_ADVERTISE; /* Set or unset auto-negotiation 2.5G advertisement */ autoneg_reg &= ~IXGBE_MII_2_5GBASE_T_ADVERTISE; if ((hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_2_5GB_FULL) && (speed & IXGBE_LINK_SPEED_2_5GB_FULL)) autoneg_reg |= IXGBE_MII_2_5GBASE_T_ADVERTISE; } /* Set or unset auto-negotiation 1G advertisement */ autoneg_reg &= ~IXGBE_MII_1GBASE_T_ADVERTISE; if ((hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_1GB_FULL) && (speed & IXGBE_LINK_SPEED_1GB_FULL)) autoneg_reg |= IXGBE_MII_1GBASE_T_ADVERTISE; hw->phy.ops.write_reg(hw, IXGBE_MII_AUTONEG_VENDOR_PROVISION_1_REG, MDIO_MMD_AN, autoneg_reg); /* Set or unset auto-negotiation 100M advertisement */ hw->phy.ops.read_reg(hw, MDIO_AN_ADVERTISE, MDIO_MMD_AN, &autoneg_reg); autoneg_reg &= ~(ADVERTISE_100FULL | ADVERTISE_100HALF); if ((hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_100_FULL) && (speed & IXGBE_LINK_SPEED_100_FULL)) autoneg_reg |= ADVERTISE_100FULL; hw->phy.ops.write_reg(hw, MDIO_AN_ADVERTISE, MDIO_MMD_AN, autoneg_reg); /* Blocked by MNG FW so don't reset PHY */ if (ixgbe_check_reset_blocked(hw)) return 0; /* Restart PHY autonegotiation and wait for completion */ hw->phy.ops.read_reg(hw, MDIO_CTRL1, MDIO_MMD_AN, &autoneg_reg); autoneg_reg |= MDIO_AN_CTRL1_RESTART; hw->phy.ops.write_reg(hw, MDIO_CTRL1, MDIO_MMD_AN, autoneg_reg); return status; } /** * ixgbe_setup_phy_link_speed_generic - Sets the auto advertised capabilities * @hw: pointer to hardware structure * @speed: new link speed * @autoneg_wait_to_complete: unused **/ s32 ixgbe_setup_phy_link_speed_generic(struct ixgbe_hw *hw, ixgbe_link_speed speed, bool autoneg_wait_to_complete) { /* Clear autoneg_advertised and set new values based on input link * speed. */ hw->phy.autoneg_advertised = 0; if (speed & IXGBE_LINK_SPEED_10GB_FULL) hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_10GB_FULL; if (speed & IXGBE_LINK_SPEED_5GB_FULL) hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_5GB_FULL; if (speed & IXGBE_LINK_SPEED_2_5GB_FULL) hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_2_5GB_FULL; if (speed & IXGBE_LINK_SPEED_1GB_FULL) hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_1GB_FULL; if (speed & IXGBE_LINK_SPEED_100_FULL) hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_100_FULL; if (speed & IXGBE_LINK_SPEED_10_FULL) hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_10_FULL; /* Setup link based on the new speed settings */ if (hw->phy.ops.setup_link) hw->phy.ops.setup_link(hw); return 0; } /** * ixgbe_get_copper_speeds_supported - Get copper link speed from phy * @hw: pointer to hardware structure * * Determines the supported link capabilities by reading the PHY auto * negotiation register. */ static s32 ixgbe_get_copper_speeds_supported(struct ixgbe_hw *hw) { u16 speed_ability; s32 status; status = hw->phy.ops.read_reg(hw, MDIO_SPEED, MDIO_MMD_PMAPMD, &speed_ability); if (status) return status; if (speed_ability & MDIO_SPEED_10G) hw->phy.speeds_supported |= IXGBE_LINK_SPEED_10GB_FULL; if (speed_ability & MDIO_PMA_SPEED_1000) hw->phy.speeds_supported |= IXGBE_LINK_SPEED_1GB_FULL; if (speed_ability & MDIO_PMA_SPEED_100) hw->phy.speeds_supported |= IXGBE_LINK_SPEED_100_FULL; switch (hw->mac.type) { case ixgbe_mac_X550: hw->phy.speeds_supported |= IXGBE_LINK_SPEED_2_5GB_FULL; hw->phy.speeds_supported |= IXGBE_LINK_SPEED_5GB_FULL; break; case ixgbe_mac_X550EM_x: case ixgbe_mac_x550em_a: hw->phy.speeds_supported &= ~IXGBE_LINK_SPEED_100_FULL; break; default: break; } return 0; } /** * ixgbe_get_copper_link_capabilities_generic - Determines link capabilities * @hw: pointer to hardware structure * @speed: pointer to link speed * @autoneg: boolean auto-negotiation value */ s32 ixgbe_get_copper_link_capabilities_generic(struct ixgbe_hw *hw, ixgbe_link_speed *speed, bool *autoneg) { s32 status = 0; *autoneg = true; if (!hw->phy.speeds_supported) status = ixgbe_get_copper_speeds_supported(hw); *speed = hw->phy.speeds_supported; return status; } /** * ixgbe_check_phy_link_tnx - Determine link and speed status * @hw: pointer to hardware structure * @speed: link speed * @link_up: status of link * * Reads the VS1 register to determine if link is up and the current speed for * the PHY. **/ s32 ixgbe_check_phy_link_tnx(struct ixgbe_hw *hw, ixgbe_link_speed *speed, bool *link_up) { s32 status; u32 time_out; u32 max_time_out = 10; u16 phy_link = 0; u16 phy_speed = 0; u16 phy_data = 0; /* Initialize speed and link to default case */ *link_up = false; *speed = IXGBE_LINK_SPEED_10GB_FULL; /* * Check current speed and link status of the PHY register. * This is a vendor specific register and may have to * be changed for other copper PHYs. */ for (time_out = 0; time_out < max_time_out; time_out++) { udelay(10); status = hw->phy.ops.read_reg(hw, MDIO_STAT1, MDIO_MMD_VEND1, &phy_data); phy_link = phy_data & IXGBE_MDIO_VENDOR_SPECIFIC_1_LINK_STATUS; phy_speed = phy_data & IXGBE_MDIO_VENDOR_SPECIFIC_1_SPEED_STATUS; if (phy_link == IXGBE_MDIO_VENDOR_SPECIFIC_1_LINK_STATUS) { *link_up = true; if (phy_speed == IXGBE_MDIO_VENDOR_SPECIFIC_1_SPEED_STATUS) *speed = IXGBE_LINK_SPEED_1GB_FULL; break; } } return status; } /** * ixgbe_setup_phy_link_tnx - Set and restart autoneg * @hw: pointer to hardware structure * * Restart autonegotiation and PHY and waits for completion. * This function always returns success, this is nessary since * it is called via a function pointer that could call other * functions that could return an error. **/ s32 ixgbe_setup_phy_link_tnx(struct ixgbe_hw *hw) { u16 autoneg_reg = IXGBE_MII_AUTONEG_REG; bool autoneg = false; ixgbe_link_speed speed; ixgbe_get_copper_link_capabilities_generic(hw, &speed, &autoneg); if (speed & IXGBE_LINK_SPEED_10GB_FULL) { /* Set or unset auto-negotiation 10G advertisement */ hw->phy.ops.read_reg(hw, MDIO_AN_10GBT_CTRL, MDIO_MMD_AN, &autoneg_reg); autoneg_reg &= ~MDIO_AN_10GBT_CTRL_ADV10G; if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_10GB_FULL) autoneg_reg |= MDIO_AN_10GBT_CTRL_ADV10G; hw->phy.ops.write_reg(hw, MDIO_AN_10GBT_CTRL, MDIO_MMD_AN, autoneg_reg); } if (speed & IXGBE_LINK_SPEED_1GB_FULL) { /* Set or unset auto-negotiation 1G advertisement */ hw->phy.ops.read_reg(hw, IXGBE_MII_AUTONEG_XNP_TX_REG, MDIO_MMD_AN, &autoneg_reg); autoneg_reg &= ~IXGBE_MII_1GBASE_T_ADVERTISE_XNP_TX; if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_1GB_FULL) autoneg_reg |= IXGBE_MII_1GBASE_T_ADVERTISE_XNP_TX; hw->phy.ops.write_reg(hw, IXGBE_MII_AUTONEG_XNP_TX_REG, MDIO_MMD_AN, autoneg_reg); } if (speed & IXGBE_LINK_SPEED_100_FULL) { /* Set or unset auto-negotiation 100M advertisement */ hw->phy.ops.read_reg(hw, MDIO_AN_ADVERTISE, MDIO_MMD_AN, &autoneg_reg); autoneg_reg &= ~(ADVERTISE_100FULL | ADVERTISE_100HALF); if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_100_FULL) autoneg_reg |= ADVERTISE_100FULL; hw->phy.ops.write_reg(hw, MDIO_AN_ADVERTISE, MDIO_MMD_AN, autoneg_reg); } /* Blocked by MNG FW so don't reset PHY */ if (ixgbe_check_reset_blocked(hw)) return 0; /* Restart PHY autonegotiation and wait for completion */ hw->phy.ops.read_reg(hw, MDIO_CTRL1, MDIO_MMD_AN, &autoneg_reg); autoneg_reg |= MDIO_AN_CTRL1_RESTART; hw->phy.ops.write_reg(hw, MDIO_CTRL1, MDIO_MMD_AN, autoneg_reg); return 0; } /** * ixgbe_reset_phy_nl - Performs a PHY reset * @hw: pointer to hardware structure **/ s32 ixgbe_reset_phy_nl(struct ixgbe_hw *hw) { u16 phy_offset, control, eword, edata, block_crc; bool end_data = false; u16 list_offset, data_offset; u16 phy_data = 0; s32 ret_val; u32 i; /* Blocked by MNG FW so bail */ if (ixgbe_check_reset_blocked(hw)) return 0; hw->phy.ops.read_reg(hw, MDIO_CTRL1, MDIO_MMD_PHYXS, &phy_data); /* reset the PHY and poll for completion */ hw->phy.ops.write_reg(hw, MDIO_CTRL1, MDIO_MMD_PHYXS, (phy_data | MDIO_CTRL1_RESET)); for (i = 0; i < 100; i++) { hw->phy.ops.read_reg(hw, MDIO_CTRL1, MDIO_MMD_PHYXS, &phy_data); if ((phy_data & MDIO_CTRL1_RESET) == 0) break; usleep_range(10000, 20000); } if ((phy_data & MDIO_CTRL1_RESET) != 0) { hw_dbg(hw, "PHY reset did not complete.\n"); return IXGBE_ERR_PHY; } /* Get init offsets */ ret_val = ixgbe_get_sfp_init_sequence_offsets(hw, &list_offset, &data_offset); if (ret_val) return ret_val; ret_val = hw->eeprom.ops.read(hw, data_offset, &block_crc); data_offset++; while (!end_data) { /* * Read control word from PHY init contents offset */ ret_val = hw->eeprom.ops.read(hw, data_offset, &eword); if (ret_val) goto err_eeprom; control = (eword & IXGBE_CONTROL_MASK_NL) >> IXGBE_CONTROL_SHIFT_NL; edata = eword & IXGBE_DATA_MASK_NL; switch (control) { case IXGBE_DELAY_NL: data_offset++; hw_dbg(hw, "DELAY: %d MS\n", edata); usleep_range(edata * 1000, edata * 2000); break; case IXGBE_DATA_NL: hw_dbg(hw, "DATA:\n"); data_offset++; ret_val = hw->eeprom.ops.read(hw, data_offset++, &phy_offset); if (ret_val) goto err_eeprom; for (i = 0; i < edata; i++) { ret_val = hw->eeprom.ops.read(hw, data_offset, &eword); if (ret_val) goto err_eeprom; hw->phy.ops.write_reg(hw, phy_offset, MDIO_MMD_PMAPMD, eword); hw_dbg(hw, "Wrote %4.4x to %4.4x\n", eword, phy_offset); data_offset++; phy_offset++; } break; case IXGBE_CONTROL_NL: data_offset++; hw_dbg(hw, "CONTROL:\n"); if (edata == IXGBE_CONTROL_EOL_NL) { hw_dbg(hw, "EOL\n"); end_data = true; } else if (edata == IXGBE_CONTROL_SOL_NL) { hw_dbg(hw, "SOL\n"); } else { hw_dbg(hw, "Bad control value\n"); return IXGBE_ERR_PHY; } break; default: hw_dbg(hw, "Bad control type\n"); return IXGBE_ERR_PHY; } } return ret_val; err_eeprom: hw_err(hw, "eeprom read at offset %d failed\n", data_offset); return IXGBE_ERR_PHY; } /** * ixgbe_identify_module_generic - Identifies module type * @hw: pointer to hardware structure * * Determines HW type and calls appropriate function. **/ s32 ixgbe_identify_module_generic(struct ixgbe_hw *hw) { switch (hw->mac.ops.get_media_type(hw)) { case ixgbe_media_type_fiber: return ixgbe_identify_sfp_module_generic(hw); case ixgbe_media_type_fiber_qsfp: return ixgbe_identify_qsfp_module_generic(hw); default: hw->phy.sfp_type = ixgbe_sfp_type_not_present; return IXGBE_ERR_SFP_NOT_PRESENT; } return IXGBE_ERR_SFP_NOT_PRESENT; } /** * ixgbe_identify_sfp_module_generic - Identifies SFP modules * @hw: pointer to hardware structure * * Searches for and identifies the SFP module and assigns appropriate PHY type. **/ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw) { struct ixgbe_adapter *adapter = hw->back; s32 status; u32 vendor_oui = 0; enum ixgbe_sfp_type stored_sfp_type = hw->phy.sfp_type; u8 identifier = 0; u8 comp_codes_1g = 0; u8 comp_codes_10g = 0; u8 oui_bytes[3] = {0, 0, 0}; u8 cable_tech = 0; u8 cable_spec = 0; u16 enforce_sfp = 0; if (hw->mac.ops.get_media_type(hw) != ixgbe_media_type_fiber) { hw->phy.sfp_type = ixgbe_sfp_type_not_present; return IXGBE_ERR_SFP_NOT_PRESENT; } /* LAN ID is needed for sfp_type determination */ hw->mac.ops.set_lan_id(hw); status = hw->phy.ops.read_i2c_eeprom(hw, IXGBE_SFF_IDENTIFIER, &identifier); if (status) goto err_read_i2c_eeprom; if (identifier != IXGBE_SFF_IDENTIFIER_SFP) { hw->phy.type = ixgbe_phy_sfp_unsupported; return IXGBE_ERR_SFP_NOT_SUPPORTED; } status = hw->phy.ops.read_i2c_eeprom(hw, IXGBE_SFF_1GBE_COMP_CODES, &comp_codes_1g); if (status) goto err_read_i2c_eeprom; status = hw->phy.ops.read_i2c_eeprom(hw, IXGBE_SFF_10GBE_COMP_CODES, &comp_codes_10g); if (status) goto err_read_i2c_eeprom; status = hw->phy.ops.read_i2c_eeprom(hw, IXGBE_SFF_CABLE_TECHNOLOGY, &cable_tech); if (status) goto err_read_i2c_eeprom; /* ID Module * ========= * 0 SFP_DA_CU * 1 SFP_SR * 2 SFP_LR * 3 SFP_DA_CORE0 - 82599-specific * 4 SFP_DA_CORE1 - 82599-specific * 5 SFP_SR/LR_CORE0 - 82599-specific * 6 SFP_SR/LR_CORE1 - 82599-specific * 7 SFP_act_lmt_DA_CORE0 - 82599-specific * 8 SFP_act_lmt_DA_CORE1 - 82599-specific * 9 SFP_1g_cu_CORE0 - 82599-specific * 10 SFP_1g_cu_CORE1 - 82599-specific * 11 SFP_1g_sx_CORE0 - 82599-specific * 12 SFP_1g_sx_CORE1 - 82599-specific */ if (hw->mac.type == ixgbe_mac_82598EB) { if (cable_tech & IXGBE_SFF_DA_PASSIVE_CABLE) hw->phy.sfp_type = ixgbe_sfp_type_da_cu; else if (comp_codes_10g & IXGBE_SFF_10GBASESR_CAPABLE) hw->phy.sfp_type = ixgbe_sfp_type_sr; else if (comp_codes_10g & IXGBE_SFF_10GBASELR_CAPABLE) hw->phy.sfp_type = ixgbe_sfp_type_lr; else hw->phy.sfp_type = ixgbe_sfp_type_unknown; } else { if (cable_tech & IXGBE_SFF_DA_PASSIVE_CABLE) { if (hw->bus.lan_id == 0) hw->phy.sfp_type = ixgbe_sfp_type_da_cu_core0; else hw->phy.sfp_type = ixgbe_sfp_type_da_cu_core1; } else if (cable_tech & IXGBE_SFF_DA_ACTIVE_CABLE) { hw->phy.ops.read_i2c_eeprom( hw, IXGBE_SFF_CABLE_SPEC_COMP, &cable_spec); if (cable_spec & IXGBE_SFF_DA_SPEC_ACTIVE_LIMITING) { if (hw->bus.lan_id == 0) hw->phy.sfp_type = ixgbe_sfp_type_da_act_lmt_core0; else hw->phy.sfp_type = ixgbe_sfp_type_da_act_lmt_core1; } else { hw->phy.sfp_type = ixgbe_sfp_type_unknown; } } else if (comp_codes_10g & (IXGBE_SFF_10GBASESR_CAPABLE | IXGBE_SFF_10GBASELR_CAPABLE)) { if (hw->bus.lan_id == 0) hw->phy.sfp_type = ixgbe_sfp_type_srlr_core0; else hw->phy.sfp_type = ixgbe_sfp_type_srlr_core1; } else if (comp_codes_1g & IXGBE_SFF_1GBASET_CAPABLE) { if (hw->bus.lan_id == 0) hw->phy.sfp_type = ixgbe_sfp_type_1g_cu_core0; else hw->phy.sfp_type = ixgbe_sfp_type_1g_cu_core1; } else if (comp_codes_1g & IXGBE_SFF_1GBASESX_CAPABLE) { if (hw->bus.lan_id == 0) hw->phy.sfp_type = ixgbe_sfp_type_1g_sx_core0; else hw->phy.sfp_type = ixgbe_sfp_type_1g_sx_core1; } else if (comp_codes_1g & IXGBE_SFF_1GBASELX_CAPABLE) { if (hw->bus.lan_id == 0) hw->phy.sfp_type = ixgbe_sfp_type_1g_lx_core0; else hw->phy.sfp_type = ixgbe_sfp_type_1g_lx_core1; } else { hw->phy.sfp_type = ixgbe_sfp_type_unknown; } } if (hw->phy.sfp_type != stored_sfp_type) hw->phy.sfp_setup_needed = true; /* Determine if the SFP+ PHY is dual speed or not. */ hw->phy.multispeed_fiber = false; if (((comp_codes_1g & IXGBE_SFF_1GBASESX_CAPABLE) && (comp_codes_10g & IXGBE_SFF_10GBASESR_CAPABLE)) || ((comp_codes_1g & IXGBE_SFF_1GBASELX_CAPABLE) && (comp_codes_10g & IXGBE_SFF_10GBASELR_CAPABLE))) hw->phy.multispeed_fiber = true; /* Determine PHY vendor */ if (hw->phy.type != ixgbe_phy_nl) { hw->phy.id = identifier; status = hw->phy.ops.read_i2c_eeprom(hw, IXGBE_SFF_VENDOR_OUI_BYTE0, &oui_bytes[0]); if (status != 0) goto err_read_i2c_eeprom; status = hw->phy.ops.read_i2c_eeprom(hw, IXGBE_SFF_VENDOR_OUI_BYTE1, &oui_bytes[1]); if (status != 0) goto err_read_i2c_eeprom; status = hw->phy.ops.read_i2c_eeprom(hw, IXGBE_SFF_VENDOR_OUI_BYTE2, &oui_bytes[2]); if (status != 0) goto err_read_i2c_eeprom; vendor_oui = ((oui_bytes[0] << IXGBE_SFF_VENDOR_OUI_BYTE0_SHIFT) | (oui_bytes[1] << IXGBE_SFF_VENDOR_OUI_BYTE1_SHIFT) | (oui_bytes[2] << IXGBE_SFF_VENDOR_OUI_BYTE2_SHIFT)); switch (vendor_oui) { case IXGBE_SFF_VENDOR_OUI_TYCO: if (cable_tech & IXGBE_SFF_DA_PASSIVE_CABLE) hw->phy.type = ixgbe_phy_sfp_passive_tyco; break; case IXGBE_SFF_VENDOR_OUI_FTL: if (cable_tech & IXGBE_SFF_DA_ACTIVE_CABLE) hw->phy.type = ixgbe_phy_sfp_ftl_active; else hw->phy.type = ixgbe_phy_sfp_ftl; break; case IXGBE_SFF_VENDOR_OUI_AVAGO: hw->phy.type = ixgbe_phy_sfp_avago; break; case IXGBE_SFF_VENDOR_OUI_INTEL: hw->phy.type = ixgbe_phy_sfp_intel; break; default: if (cable_tech & IXGBE_SFF_DA_PASSIVE_CABLE) hw->phy.type = ixgbe_phy_sfp_passive_unknown; else if (cable_tech & IXGBE_SFF_DA_ACTIVE_CABLE) hw->phy.type = ixgbe_phy_sfp_active_unknown; else hw->phy.type = ixgbe_phy_sfp_unknown; break; } } /* Allow any DA cable vendor */ if (cable_tech & (IXGBE_SFF_DA_PASSIVE_CABLE | IXGBE_SFF_DA_ACTIVE_CABLE)) return 0; /* Verify supported 1G SFP modules */ if (comp_codes_10g == 0 && !(hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core1 || hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core0 || hw->phy.sfp_type == ixgbe_sfp_type_1g_lx_core0 || hw->phy.sfp_type == ixgbe_sfp_type_1g_lx_core1 || hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core0 || hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core1)) { hw->phy.type = ixgbe_phy_sfp_unsupported; return IXGBE_ERR_SFP_NOT_SUPPORTED; } /* Anything else 82598-based is supported */ if (hw->mac.type == ixgbe_mac_82598EB) return 0; hw->mac.ops.get_device_caps(hw, &enforce_sfp); if (!(enforce_sfp & IXGBE_DEVICE_CAPS_ALLOW_ANY_SFP) && !(hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core0 || hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core1 || hw->phy.sfp_type == ixgbe_sfp_type_1g_lx_core0 || hw->phy.sfp_type == ixgbe_sfp_type_1g_lx_core1 || hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core0 || hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core1)) { /* Make sure we're a supported PHY type */ if (hw->phy.type == ixgbe_phy_sfp_intel) return 0; if (hw->allow_unsupported_sfp) { e_warn(drv, "WARNING: Intel (R) Network Connections are quality tested using Intel (R) Ethernet Optics. Using untested modules is not supported and may cause unstable operation or damage to the module or the adapter. Intel Corporation is not responsible for any harm caused by using untested modules.\n"); return 0; } hw_dbg(hw, "SFP+ module not supported\n"); hw->phy.type = ixgbe_phy_sfp_unsupported; return IXGBE_ERR_SFP_NOT_SUPPORTED; } return 0; err_read_i2c_eeprom: hw->phy.sfp_type = ixgbe_sfp_type_not_present; if (hw->phy.type != ixgbe_phy_nl) { hw->phy.id = 0; hw->phy.type = ixgbe_phy_unknown; } return IXGBE_ERR_SFP_NOT_PRESENT; } /** * ixgbe_identify_qsfp_module_generic - Identifies QSFP modules * @hw: pointer to hardware structure * * Searches for and identifies the QSFP module and assigns appropriate PHY type **/ static s32 ixgbe_identify_qsfp_module_generic(struct ixgbe_hw *hw) { struct ixgbe_adapter *adapter = hw->back; s32 status; u32 vendor_oui = 0; enum ixgbe_sfp_type stored_sfp_type = hw->phy.sfp_type; u8 identifier = 0; u8 comp_codes_1g = 0; u8 comp_codes_10g = 0; u8 oui_bytes[3] = {0, 0, 0}; u16 enforce_sfp = 0; u8 connector = 0; u8 cable_length = 0; u8 device_tech = 0; bool active_cable = false; if (hw->mac.ops.get_media_type(hw) != ixgbe_media_type_fiber_qsfp) { hw->phy.sfp_type = ixgbe_sfp_type_not_present; return IXGBE_ERR_SFP_NOT_PRESENT; } /* LAN ID is needed for sfp_type determination */ hw->mac.ops.set_lan_id(hw); status = hw->phy.ops.read_i2c_eeprom(hw, IXGBE_SFF_IDENTIFIER, &identifier); if (status != 0) goto err_read_i2c_eeprom; if (identifier != IXGBE_SFF_IDENTIFIER_QSFP_PLUS) { hw->phy.type = ixgbe_phy_sfp_unsupported; return IXGBE_ERR_SFP_NOT_SUPPORTED; } hw->phy.id = identifier; status = hw->phy.ops.read_i2c_eeprom(hw, IXGBE_SFF_QSFP_10GBE_COMP, &comp_codes_10g); if (status != 0) goto err_read_i2c_eeprom; status = hw->phy.ops.read_i2c_eeprom(hw, IXGBE_SFF_QSFP_1GBE_COMP, &comp_codes_1g); if (status != 0) goto err_read_i2c_eeprom; if (comp_codes_10g & IXGBE_SFF_QSFP_DA_PASSIVE_CABLE) { hw->phy.type = ixgbe_phy_qsfp_passive_unknown; if (hw->bus.lan_id == 0) hw->phy.sfp_type = ixgbe_sfp_type_da_cu_core0; else hw->phy.sfp_type = ixgbe_sfp_type_da_cu_core1; } else if (comp_codes_10g & (IXGBE_SFF_10GBASESR_CAPABLE | IXGBE_SFF_10GBASELR_CAPABLE)) { if (hw->bus.lan_id == 0) hw->phy.sfp_type = ixgbe_sfp_type_srlr_core0; else hw->phy.sfp_type = ixgbe_sfp_type_srlr_core1; } else { if (comp_codes_10g & IXGBE_SFF_QSFP_DA_ACTIVE_CABLE) active_cable = true; if (!active_cable) { /* check for active DA cables that pre-date * SFF-8436 v3.6 */ hw->phy.ops.read_i2c_eeprom(hw, IXGBE_SFF_QSFP_CONNECTOR, &connector); hw->phy.ops.read_i2c_eeprom(hw, IXGBE_SFF_QSFP_CABLE_LENGTH, &cable_length); hw->phy.ops.read_i2c_eeprom(hw, IXGBE_SFF_QSFP_DEVICE_TECH, &device_tech); if ((connector == IXGBE_SFF_QSFP_CONNECTOR_NOT_SEPARABLE) && (cable_length > 0) && ((device_tech >> 4) == IXGBE_SFF_QSFP_TRANSMITER_850NM_VCSEL)) active_cable = true; } if (active_cable) { hw->phy.type = ixgbe_phy_qsfp_active_unknown; if (hw->bus.lan_id == 0) hw->phy.sfp_type = ixgbe_sfp_type_da_act_lmt_core0; else hw->phy.sfp_type = ixgbe_sfp_type_da_act_lmt_core1; } else { /* unsupported module type */ hw->phy.type = ixgbe_phy_sfp_unsupported; return IXGBE_ERR_SFP_NOT_SUPPORTED; } } if (hw->phy.sfp_type != stored_sfp_type) hw->phy.sfp_setup_needed = true; /* Determine if the QSFP+ PHY is dual speed or not. */ hw->phy.multispeed_fiber = false; if (((comp_codes_1g & IXGBE_SFF_1GBASESX_CAPABLE) && (comp_codes_10g & IXGBE_SFF_10GBASESR_CAPABLE)) || ((comp_codes_1g & IXGBE_SFF_1GBASELX_CAPABLE) && (comp_codes_10g & IXGBE_SFF_10GBASELR_CAPABLE))) hw->phy.multispeed_fiber = true; /* Determine PHY vendor for optical modules */ if (comp_codes_10g & (IXGBE_SFF_10GBASESR_CAPABLE | IXGBE_SFF_10GBASELR_CAPABLE)) { status = hw->phy.ops.read_i2c_eeprom(hw, IXGBE_SFF_QSFP_VENDOR_OUI_BYTE0, &oui_bytes[0]); if (status != 0) goto err_read_i2c_eeprom; status = hw->phy.ops.read_i2c_eeprom(hw, IXGBE_SFF_QSFP_VENDOR_OUI_BYTE1, &oui_bytes[1]); if (status != 0) goto err_read_i2c_eeprom; status = hw->phy.ops.read_i2c_eeprom(hw, IXGBE_SFF_QSFP_VENDOR_OUI_BYTE2, &oui_bytes[2]); if (status != 0) goto err_read_i2c_eeprom; vendor_oui = ((oui_bytes[0] << IXGBE_SFF_VENDOR_OUI_BYTE0_SHIFT) | (oui_bytes[1] << IXGBE_SFF_VENDOR_OUI_BYTE1_SHIFT) | (oui_bytes[2] << IXGBE_SFF_VENDOR_OUI_BYTE2_SHIFT)); if (vendor_oui == IXGBE_SFF_VENDOR_OUI_INTEL) hw->phy.type = ixgbe_phy_qsfp_intel; else hw->phy.type = ixgbe_phy_qsfp_unknown; hw->mac.ops.get_device_caps(hw, &enforce_sfp); if (!(enforce_sfp & IXGBE_DEVICE_CAPS_ALLOW_ANY_SFP)) { /* Make sure we're a supported PHY type */ if (hw->phy.type == ixgbe_phy_qsfp_intel) return 0; if (hw->allow_unsupported_sfp) { e_warn(drv, "WARNING: Intel (R) Network Connections are quality tested using Intel (R) Ethernet Optics. Using untested modules is not supported and may cause unstable operation or damage to the module or the adapter. Intel Corporation is not responsible for any harm caused by using untested modules.\n"); return 0; } hw_dbg(hw, "QSFP module not supported\n"); hw->phy.type = ixgbe_phy_sfp_unsupported; return IXGBE_ERR_SFP_NOT_SUPPORTED; } return 0; } return 0; err_read_i2c_eeprom: hw->phy.sfp_type = ixgbe_sfp_type_not_present; hw->phy.id = 0; hw->phy.type = ixgbe_phy_unknown; return IXGBE_ERR_SFP_NOT_PRESENT; } /** * ixgbe_get_sfp_init_sequence_offsets - Provides offset of PHY init sequence * @hw: pointer to hardware structure * @list_offset: offset to the SFP ID list * @data_offset: offset to the SFP data block * * Checks the MAC's EEPROM to see if it supports a given SFP+ module type, if * so it returns the offsets to the phy init sequence block. **/ s32 ixgbe_get_sfp_init_sequence_offsets(struct ixgbe_hw *hw, u16 *list_offset, u16 *data_offset) { u16 sfp_id; u16 sfp_type = hw->phy.sfp_type; if (hw->phy.sfp_type == ixgbe_sfp_type_unknown) return IXGBE_ERR_SFP_NOT_SUPPORTED; if (hw->phy.sfp_type == ixgbe_sfp_type_not_present) return IXGBE_ERR_SFP_NOT_PRESENT; if ((hw->device_id == IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM) && (hw->phy.sfp_type == ixgbe_sfp_type_da_cu)) return IXGBE_ERR_SFP_NOT_SUPPORTED; /* * Limiting active cables and 1G Phys must be initialized as * SR modules */ if (sfp_type == ixgbe_sfp_type_da_act_lmt_core0 || sfp_type == ixgbe_sfp_type_1g_lx_core0 || sfp_type == ixgbe_sfp_type_1g_cu_core0 || sfp_type == ixgbe_sfp_type_1g_sx_core0) sfp_type = ixgbe_sfp_type_srlr_core0; else if (sfp_type == ixgbe_sfp_type_da_act_lmt_core1 || sfp_type == ixgbe_sfp_type_1g_lx_core1 || sfp_type == ixgbe_sfp_type_1g_cu_core1 || sfp_type == ixgbe_sfp_type_1g_sx_core1) sfp_type = ixgbe_sfp_type_srlr_core1; /* Read offset to PHY init contents */ if (hw->eeprom.ops.read(hw, IXGBE_PHY_INIT_OFFSET_NL, list_offset)) { hw_err(hw, "eeprom read at %d failed\n", IXGBE_PHY_INIT_OFFSET_NL); return IXGBE_ERR_SFP_NO_INIT_SEQ_PRESENT; } if ((!*list_offset) || (*list_offset == 0xFFFF)) return IXGBE_ERR_SFP_NO_INIT_SEQ_PRESENT; /* Shift offset to first ID word */ (*list_offset)++; /* * Find the matching SFP ID in the EEPROM * and program the init sequence */ if (hw->eeprom.ops.read(hw, *list_offset, &sfp_id)) goto err_phy; while (sfp_id != IXGBE_PHY_INIT_END_NL) { if (sfp_id == sfp_type) { (*list_offset)++; if (hw->eeprom.ops.read(hw, *list_offset, data_offset)) goto err_phy; if ((!*data_offset) || (*data_offset == 0xFFFF)) { hw_dbg(hw, "SFP+ module not supported\n"); return IXGBE_ERR_SFP_NOT_SUPPORTED; } else { break; } } else { (*list_offset) += 2; if (hw->eeprom.ops.read(hw, *list_offset, &sfp_id)) goto err_phy; } } if (sfp_id == IXGBE_PHY_INIT_END_NL) { hw_dbg(hw, "No matching SFP+ module found\n"); return IXGBE_ERR_SFP_NOT_SUPPORTED; } return 0; err_phy: hw_err(hw, "eeprom read at offset %d failed\n", *list_offset); return IXGBE_ERR_PHY; } /** * ixgbe_read_i2c_eeprom_generic - Reads 8 bit EEPROM word over I2C interface * @hw: pointer to hardware structure * @byte_offset: EEPROM byte offset to read * @eeprom_data: value read * * Performs byte read operation to SFP module's EEPROM over I2C interface. **/ s32 ixgbe_read_i2c_eeprom_generic(struct ixgbe_hw *hw, u8 byte_offset, u8 *eeprom_data) { return hw->phy.ops.read_i2c_byte(hw, byte_offset, IXGBE_I2C_EEPROM_DEV_ADDR, eeprom_data); } /** * ixgbe_read_i2c_sff8472_generic - Reads 8 bit word over I2C interface * @hw: pointer to hardware structure * @byte_offset: byte offset at address 0xA2 * @sff8472_data: value read * * Performs byte read operation to SFP module's SFF-8472 data over I2C **/ s32 ixgbe_read_i2c_sff8472_generic(struct ixgbe_hw *hw, u8 byte_offset, u8 *sff8472_data) { return hw->phy.ops.read_i2c_byte(hw, byte_offset, IXGBE_I2C_EEPROM_DEV_ADDR2, sff8472_data); } /** * ixgbe_write_i2c_eeprom_generic - Writes 8 bit EEPROM word over I2C interface * @hw: pointer to hardware structure * @byte_offset: EEPROM byte offset to write * @eeprom_data: value to write * * Performs byte write operation to SFP module's EEPROM over I2C interface. **/ s32 ixgbe_write_i2c_eeprom_generic(struct ixgbe_hw *hw, u8 byte_offset, u8 eeprom_data) { return hw->phy.ops.write_i2c_byte(hw, byte_offset, IXGBE_I2C_EEPROM_DEV_ADDR, eeprom_data); } /** * ixgbe_is_sfp_probe - Returns true if SFP is being detected * @hw: pointer to hardware structure * @offset: eeprom offset to be read * @addr: I2C address to be read */ static bool ixgbe_is_sfp_probe(struct ixgbe_hw *hw, u8 offset, u8 addr) { if (addr == IXGBE_I2C_EEPROM_DEV_ADDR && offset == IXGBE_SFF_IDENTIFIER && hw->phy.sfp_type == ixgbe_sfp_type_not_present) return true; return false; } /** * ixgbe_read_i2c_byte_generic_int - Reads 8 bit word over I2C * @hw: pointer to hardware structure * @byte_offset: byte offset to read * @dev_addr: device address * @data: value read * @lock: true if to take and release semaphore * * Performs byte read operation to SFP module's EEPROM over I2C interface at * a specified device address. */ static s32 ixgbe_read_i2c_byte_generic_int(struct ixgbe_hw *hw, u8 byte_offset, u8 dev_addr, u8 *data, bool lock) { s32 status; u32 max_retry = 10; u32 retry = 0; u32 swfw_mask = hw->phy.phy_semaphore_mask; bool nack = true; if (hw->mac.type >= ixgbe_mac_X550) max_retry = 3; if (ixgbe_is_sfp_probe(hw, byte_offset, dev_addr)) max_retry = IXGBE_SFP_DETECT_RETRIES; *data = 0; do { if (lock && hw->mac.ops.acquire_swfw_sync(hw, swfw_mask)) return IXGBE_ERR_SWFW_SYNC; ixgbe_i2c_start(hw); /* Device Address and write indication */ status = ixgbe_clock_out_i2c_byte(hw, dev_addr); if (status != 0) goto fail; status = ixgbe_get_i2c_ack(hw); if (status != 0) goto fail; status = ixgbe_clock_out_i2c_byte(hw, byte_offset); if (status != 0) goto fail; status = ixgbe_get_i2c_ack(hw); if (status != 0) goto fail; ixgbe_i2c_start(hw); /* Device Address and read indication */ status = ixgbe_clock_out_i2c_byte(hw, (dev_addr | 0x1)); if (status != 0) goto fail; status = ixgbe_get_i2c_ack(hw); if (status != 0) goto fail; status = ixgbe_clock_in_i2c_byte(hw, data); if (status != 0) goto fail; status = ixgbe_clock_out_i2c_bit(hw, nack); if (status != 0) goto fail; ixgbe_i2c_stop(hw); if (lock) hw->mac.ops.release_swfw_sync(hw, swfw_mask); return 0; fail: ixgbe_i2c_bus_clear(hw); if (lock) { hw->mac.ops.release_swfw_sync(hw, swfw_mask); msleep(100); } retry++; if (retry < max_retry) hw_dbg(hw, "I2C byte read error - Retrying.\n"); else hw_dbg(hw, "I2C byte read error.\n"); } while (retry < max_retry); return status; } /** * ixgbe_read_i2c_byte_generic - Reads 8 bit word over I2C * @hw: pointer to hardware structure * @byte_offset: byte offset to read * @dev_addr: device address * @data: value read * * Performs byte read operation to SFP module's EEPROM over I2C interface at * a specified device address. */ s32 ixgbe_read_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset, u8 dev_addr, u8 *data) { return ixgbe_read_i2c_byte_generic_int(hw, byte_offset, dev_addr, data, true); } /** * ixgbe_read_i2c_byte_generic_unlocked - Reads 8 bit word over I2C * @hw: pointer to hardware structure * @byte_offset: byte offset to read * @dev_addr: device address * @data: value read * * Performs byte read operation to SFP module's EEPROM over I2C interface at * a specified device address. */ s32 ixgbe_read_i2c_byte_generic_unlocked(struct ixgbe_hw *hw, u8 byte_offset, u8 dev_addr, u8 *data) { return ixgbe_read_i2c_byte_generic_int(hw, byte_offset, dev_addr, data, false); } /** * ixgbe_write_i2c_byte_generic_int - Writes 8 bit word over I2C * @hw: pointer to hardware structure * @byte_offset: byte offset to write * @dev_addr: device address * @data: value to write * @lock: true if to take and release semaphore * * Performs byte write operation to SFP module's EEPROM over I2C interface at * a specified device address. */ static s32 ixgbe_write_i2c_byte_generic_int(struct ixgbe_hw *hw, u8 byte_offset, u8 dev_addr, u8 data, bool lock) { s32 status; u32 max_retry = 1; u32 retry = 0; u32 swfw_mask = hw->phy.phy_semaphore_mask; if (lock && hw->mac.ops.acquire_swfw_sync(hw, swfw_mask)) return IXGBE_ERR_SWFW_SYNC; do { ixgbe_i2c_start(hw); status = ixgbe_clock_out_i2c_byte(hw, dev_addr); if (status != 0) goto fail; status = ixgbe_get_i2c_ack(hw); if (status != 0) goto fail; status = ixgbe_clock_out_i2c_byte(hw, byte_offset); if (status != 0) goto fail; status = ixgbe_get_i2c_ack(hw); if (status != 0) goto fail; status = ixgbe_clock_out_i2c_byte(hw, data); if (status != 0) goto fail; status = ixgbe_get_i2c_ack(hw); if (status != 0) goto fail; ixgbe_i2c_stop(hw); if (lock) hw->mac.ops.release_swfw_sync(hw, swfw_mask); return 0; fail: ixgbe_i2c_bus_clear(hw); retry++; if (retry < max_retry) hw_dbg(hw, "I2C byte write error - Retrying.\n"); else hw_dbg(hw, "I2C byte write error.\n"); } while (retry < max_retry); if (lock) hw->mac.ops.release_swfw_sync(hw, swfw_mask); return status; } /** * ixgbe_write_i2c_byte_generic - Writes 8 bit word over I2C * @hw: pointer to hardware structure * @byte_offset: byte offset to write * @dev_addr: device address * @data: value to write * * Performs byte write operation to SFP module's EEPROM over I2C interface at * a specified device address. */ s32 ixgbe_write_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset, u8 dev_addr, u8 data) { return ixgbe_write_i2c_byte_generic_int(hw, byte_offset, dev_addr, data, true); } /** * ixgbe_write_i2c_byte_generic_unlocked - Writes 8 bit word over I2C * @hw: pointer to hardware structure * @byte_offset: byte offset to write * @dev_addr: device address * @data: value to write * * Performs byte write operation to SFP module's EEPROM over I2C interface at * a specified device address. */ s32 ixgbe_write_i2c_byte_generic_unlocked(struct ixgbe_hw *hw, u8 byte_offset, u8 dev_addr, u8 data) { return ixgbe_write_i2c_byte_generic_int(hw, byte_offset, dev_addr, data, false); } /** * ixgbe_i2c_start - Sets I2C start condition * @hw: pointer to hardware structure * * Sets I2C start condition (High -> Low on SDA while SCL is High) * Set bit-bang mode on X550 hardware. **/ static void ixgbe_i2c_start(struct ixgbe_hw *hw) { u32 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL(hw)); i2cctl |= IXGBE_I2C_BB_EN(hw); /* Start condition must begin with data and clock high */ ixgbe_set_i2c_data(hw, &i2cctl, 1); ixgbe_raise_i2c_clk(hw, &i2cctl); /* Setup time for start condition (4.7us) */ udelay(IXGBE_I2C_T_SU_STA); ixgbe_set_i2c_data(hw, &i2cctl, 0); /* Hold time for start condition (4us) */ udelay(IXGBE_I2C_T_HD_STA); ixgbe_lower_i2c_clk(hw, &i2cctl); /* Minimum low period of clock is 4.7 us */ udelay(IXGBE_I2C_T_LOW); } /** * ixgbe_i2c_stop - Sets I2C stop condition * @hw: pointer to hardware structure * * Sets I2C stop condition (Low -> High on SDA while SCL is High) * Disables bit-bang mode and negates data output enable on X550 * hardware. **/ static void ixgbe_i2c_stop(struct ixgbe_hw *hw) { u32 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL(hw)); u32 data_oe_bit = IXGBE_I2C_DATA_OE_N_EN(hw); u32 clk_oe_bit = IXGBE_I2C_CLK_OE_N_EN(hw); u32 bb_en_bit = IXGBE_I2C_BB_EN(hw); /* Stop condition must begin with data low and clock high */ ixgbe_set_i2c_data(hw, &i2cctl, 0); ixgbe_raise_i2c_clk(hw, &i2cctl); /* Setup time for stop condition (4us) */ udelay(IXGBE_I2C_T_SU_STO); ixgbe_set_i2c_data(hw, &i2cctl, 1); /* bus free time between stop and start (4.7us)*/ udelay(IXGBE_I2C_T_BUF); if (bb_en_bit || data_oe_bit || clk_oe_bit) { i2cctl &= ~bb_en_bit; i2cctl |= data_oe_bit | clk_oe_bit; IXGBE_WRITE_REG(hw, IXGBE_I2CCTL(hw), i2cctl); IXGBE_WRITE_FLUSH(hw); } } /** * ixgbe_clock_in_i2c_byte - Clocks in one byte via I2C * @hw: pointer to hardware structure * @data: data byte to clock in * * Clocks in one byte data via I2C data/clock **/ static s32 ixgbe_clock_in_i2c_byte(struct ixgbe_hw *hw, u8 *data) { s32 i; bool bit = false; *data = 0; for (i = 7; i >= 0; i--) { ixgbe_clock_in_i2c_bit(hw, &bit); *data |= bit << i; } return 0; } /** * ixgbe_clock_out_i2c_byte - Clocks out one byte via I2C * @hw: pointer to hardware structure * @data: data byte clocked out * * Clocks out one byte data via I2C data/clock **/ static s32 ixgbe_clock_out_i2c_byte(struct ixgbe_hw *hw, u8 data) { s32 status; s32 i; u32 i2cctl; bool bit = false; for (i = 7; i >= 0; i--) { bit = (data >> i) & 0x1; status = ixgbe_clock_out_i2c_bit(hw, bit); if (status != 0) break; } /* Release SDA line (set high) */ i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL(hw)); i2cctl |= IXGBE_I2C_DATA_OUT(hw); i2cctl |= IXGBE_I2C_DATA_OE_N_EN(hw); IXGBE_WRITE_REG(hw, IXGBE_I2CCTL(hw), i2cctl); IXGBE_WRITE_FLUSH(hw); return status; } /** * ixgbe_get_i2c_ack - Polls for I2C ACK * @hw: pointer to hardware structure * * Clocks in/out one bit via I2C data/clock **/ static s32 ixgbe_get_i2c_ack(struct ixgbe_hw *hw) { u32 data_oe_bit = IXGBE_I2C_DATA_OE_N_EN(hw); s32 status = 0; u32 i = 0; u32 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL(hw)); u32 timeout = 10; bool ack = true; if (data_oe_bit) { i2cctl |= IXGBE_I2C_DATA_OUT(hw); i2cctl |= data_oe_bit; IXGBE_WRITE_REG(hw, IXGBE_I2CCTL(hw), i2cctl); IXGBE_WRITE_FLUSH(hw); } ixgbe_raise_i2c_clk(hw, &i2cctl); /* Minimum high period of clock is 4us */ udelay(IXGBE_I2C_T_HIGH); /* Poll for ACK. Note that ACK in I2C spec is * transition from 1 to 0 */ for (i = 0; i < timeout; i++) { i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL(hw)); ack = ixgbe_get_i2c_data(hw, &i2cctl); udelay(1); if (ack == 0) break; } if (ack == 1) { hw_dbg(hw, "I2C ack was not received.\n"); status = IXGBE_ERR_I2C; } ixgbe_lower_i2c_clk(hw, &i2cctl); /* Minimum low period of clock is 4.7 us */ udelay(IXGBE_I2C_T_LOW); return status; } /** * ixgbe_clock_in_i2c_bit - Clocks in one bit via I2C data/clock * @hw: pointer to hardware structure * @data: read data value * * Clocks in one bit via I2C data/clock **/ static s32 ixgbe_clock_in_i2c_bit(struct ixgbe_hw *hw, bool *data) { u32 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL(hw)); u32 data_oe_bit = IXGBE_I2C_DATA_OE_N_EN(hw); if (data_oe_bit) { i2cctl |= IXGBE_I2C_DATA_OUT(hw); i2cctl |= data_oe_bit; IXGBE_WRITE_REG(hw, IXGBE_I2CCTL(hw), i2cctl); IXGBE_WRITE_FLUSH(hw); } ixgbe_raise_i2c_clk(hw, &i2cctl); /* Minimum high period of clock is 4us */ udelay(IXGBE_I2C_T_HIGH); i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL(hw)); *data = ixgbe_get_i2c_data(hw, &i2cctl); ixgbe_lower_i2c_clk(hw, &i2cctl); /* Minimum low period of clock is 4.7 us */ udelay(IXGBE_I2C_T_LOW); return 0; } /** * ixgbe_clock_out_i2c_bit - Clocks in/out one bit via I2C data/clock * @hw: pointer to hardware structure * @data: data value to write * * Clocks out one bit via I2C data/clock **/ static s32 ixgbe_clock_out_i2c_bit(struct ixgbe_hw *hw, bool data) { s32 status; u32 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL(hw)); status = ixgbe_set_i2c_data(hw, &i2cctl, data); if (status == 0) { ixgbe_raise_i2c_clk(hw, &i2cctl); /* Minimum high period of clock is 4us */ udelay(IXGBE_I2C_T_HIGH); ixgbe_lower_i2c_clk(hw, &i2cctl); /* Minimum low period of clock is 4.7 us. * This also takes care of the data hold time. */ udelay(IXGBE_I2C_T_LOW); } else { hw_dbg(hw, "I2C data was not set to %X\n", data); return IXGBE_ERR_I2C; } return 0; } /** * ixgbe_raise_i2c_clk - Raises the I2C SCL clock * @hw: pointer to hardware structure * @i2cctl: Current value of I2CCTL register * * Raises the I2C clock line '0'->'1' * Negates the I2C clock output enable on X550 hardware. **/ static void ixgbe_raise_i2c_clk(struct ixgbe_hw *hw, u32 *i2cctl) { u32 clk_oe_bit = IXGBE_I2C_CLK_OE_N_EN(hw); u32 i = 0; u32 timeout = IXGBE_I2C_CLOCK_STRETCHING_TIMEOUT; u32 i2cctl_r = 0; if (clk_oe_bit) { *i2cctl |= clk_oe_bit; IXGBE_WRITE_REG(hw, IXGBE_I2CCTL(hw), *i2cctl); } for (i = 0; i < timeout; i++) { *i2cctl |= IXGBE_I2C_CLK_OUT(hw); IXGBE_WRITE_REG(hw, IXGBE_I2CCTL(hw), *i2cctl); IXGBE_WRITE_FLUSH(hw); /* SCL rise time (1000ns) */ udelay(IXGBE_I2C_T_RISE); i2cctl_r = IXGBE_READ_REG(hw, IXGBE_I2CCTL(hw)); if (i2cctl_r & IXGBE_I2C_CLK_IN(hw)) break; } } /** * ixgbe_lower_i2c_clk - Lowers the I2C SCL clock * @hw: pointer to hardware structure * @i2cctl: Current value of I2CCTL register * * Lowers the I2C clock line '1'->'0' * Asserts the I2C clock output enable on X550 hardware. **/ static void ixgbe_lower_i2c_clk(struct ixgbe_hw *hw, u32 *i2cctl) { *i2cctl &= ~IXGBE_I2C_CLK_OUT(hw); *i2cctl &= ~IXGBE_I2C_CLK_OE_N_EN(hw); IXGBE_WRITE_REG(hw, IXGBE_I2CCTL(hw), *i2cctl); IXGBE_WRITE_FLUSH(hw); /* SCL fall time (300ns) */ udelay(IXGBE_I2C_T_FALL); } /** * ixgbe_set_i2c_data - Sets the I2C data bit * @hw: pointer to hardware structure * @i2cctl: Current value of I2CCTL register * @data: I2C data value (0 or 1) to set * * Sets the I2C data bit * Asserts the I2C data output enable on X550 hardware. **/ static s32 ixgbe_set_i2c_data(struct ixgbe_hw *hw, u32 *i2cctl, bool data) { u32 data_oe_bit = IXGBE_I2C_DATA_OE_N_EN(hw); if (data) *i2cctl |= IXGBE_I2C_DATA_OUT(hw); else *i2cctl &= ~IXGBE_I2C_DATA_OUT(hw); *i2cctl &= ~data_oe_bit; IXGBE_WRITE_REG(hw, IXGBE_I2CCTL(hw), *i2cctl); IXGBE_WRITE_FLUSH(hw); /* Data rise/fall (1000ns/300ns) and set-up time (250ns) */ udelay(IXGBE_I2C_T_RISE + IXGBE_I2C_T_FALL + IXGBE_I2C_T_SU_DATA); if (!data) /* Can't verify data in this case */ return 0; if (data_oe_bit) { *i2cctl |= data_oe_bit; IXGBE_WRITE_REG(hw, IXGBE_I2CCTL(hw), *i2cctl); IXGBE_WRITE_FLUSH(hw); } /* Verify data was set correctly */ *i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL(hw)); if (data != ixgbe_get_i2c_data(hw, i2cctl)) { hw_dbg(hw, "Error - I2C data was not set to %X.\n", data); return IXGBE_ERR_I2C; } return 0; } /** * ixgbe_get_i2c_data - Reads the I2C SDA data bit * @hw: pointer to hardware structure * @i2cctl: Current value of I2CCTL register * * Returns the I2C data bit value * Negates the I2C data output enable on X550 hardware. **/ static bool ixgbe_get_i2c_data(struct ixgbe_hw *hw, u32 *i2cctl) { u32 data_oe_bit = IXGBE_I2C_DATA_OE_N_EN(hw); if (data_oe_bit) { *i2cctl |= data_oe_bit; IXGBE_WRITE_REG(hw, IXGBE_I2CCTL(hw), *i2cctl); IXGBE_WRITE_FLUSH(hw); udelay(IXGBE_I2C_T_FALL); } if (*i2cctl & IXGBE_I2C_DATA_IN(hw)) return true; return false; } /** * ixgbe_i2c_bus_clear - Clears the I2C bus * @hw: pointer to hardware structure * * Clears the I2C bus by sending nine clock pulses. * Used when data line is stuck low. **/ static void ixgbe_i2c_bus_clear(struct ixgbe_hw *hw) { u32 i2cctl; u32 i; ixgbe_i2c_start(hw); i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL(hw)); ixgbe_set_i2c_data(hw, &i2cctl, 1); for (i = 0; i < 9; i++) { ixgbe_raise_i2c_clk(hw, &i2cctl); /* Min high period of clock is 4us */ udelay(IXGBE_I2C_T_HIGH); ixgbe_lower_i2c_clk(hw, &i2cctl); /* Min low period of clock is 4.7us*/ udelay(IXGBE_I2C_T_LOW); } ixgbe_i2c_start(hw); /* Put the i2c bus back to default state */ ixgbe_i2c_stop(hw); } /** * ixgbe_tn_check_overtemp - Checks if an overtemp occurred. * @hw: pointer to hardware structure * * Checks if the LASI temp alarm status was triggered due to overtemp **/ s32 ixgbe_tn_check_overtemp(struct ixgbe_hw *hw) { u16 phy_data = 0; if (hw->device_id != IXGBE_DEV_ID_82599_T3_LOM) return 0; /* Check that the LASI temp alarm status was triggered */ hw->phy.ops.read_reg(hw, IXGBE_TN_LASI_STATUS_REG, MDIO_MMD_PMAPMD, &phy_data); if (!(phy_data & IXGBE_TN_LASI_STATUS_TEMP_ALARM)) return 0; return IXGBE_ERR_OVERTEMP; } /** ixgbe_set_copper_phy_power - Control power for copper phy * @hw: pointer to hardware structure * @on: true for on, false for off **/ s32 ixgbe_set_copper_phy_power(struct ixgbe_hw *hw, bool on) { u32 status; u16 reg; /* Bail if we don't have copper phy */ if (hw->mac.ops.get_media_type(hw) != ixgbe_media_type_copper) return 0; if (!on && ixgbe_mng_present(hw)) return 0; status = hw->phy.ops.read_reg(hw, MDIO_CTRL1, MDIO_MMD_VEND1, &reg); if (status) return status; if (on) { reg &= ~IXGBE_MDIO_PHY_SET_LOW_POWER_MODE; } else { if (ixgbe_check_reset_blocked(hw)) return 0; reg |= IXGBE_MDIO_PHY_SET_LOW_POWER_MODE; } status = hw->phy.ops.write_reg(hw, MDIO_CTRL1, MDIO_MMD_VEND1, reg); return status; }
linux-master
drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
// SPDX-License-Identifier: GPL-2.0 /* Copyright(c) 1999 - 2018 Intel Corporation. */ #include "ixgbe.h" #include "ixgbe_type.h" #include "ixgbe_dcb.h" #include "ixgbe_dcb_82598.h" #include "ixgbe_dcb_82599.h" /** * ixgbe_ieee_credits - This calculates the ieee traffic class * credits from the configured bandwidth percentages. Credits * are the smallest unit programmable into the underlying * hardware. The IEEE 802.1Qaz specification do not use bandwidth * groups so this is much simplified from the CEE case. * @bw: bandwidth index by traffic class * @refill: refill credits index by traffic class * @max: max credits by traffic class * @max_frame: maximum frame size */ static s32 ixgbe_ieee_credits(__u8 *bw, __u16 *refill, __u16 *max, int max_frame) { int min_percent = 100; int min_credit, multiplier; int i; min_credit = ((max_frame / 2) + DCB_CREDIT_QUANTUM - 1) / DCB_CREDIT_QUANTUM; for (i = 0; i < MAX_TRAFFIC_CLASS; i++) { if (bw[i] < min_percent && bw[i]) min_percent = bw[i]; } multiplier = (min_credit / min_percent) + 1; /* Find out the hw credits for each TC */ for (i = 0; i < MAX_TRAFFIC_CLASS; i++) { int val = min(bw[i] * multiplier, MAX_CREDIT_REFILL); if (val < min_credit) val = min_credit; refill[i] = val; max[i] = bw[i] ? (bw[i] * MAX_CREDIT)/100 : min_credit; } return 0; } /** * ixgbe_dcb_calculate_tc_credits - Calculates traffic class credits * @hw: pointer to hardware structure * @dcb_config: Struct containing DCB settings * @max_frame: Maximum frame size * @direction: Configuring either Tx or Rx * * This function calculates the credits allocated to each traffic class. * It should be called only after the rules are checked by * ixgbe_dcb_check_config(). */ s32 ixgbe_dcb_calculate_tc_credits(struct ixgbe_hw *hw, struct ixgbe_dcb_config *dcb_config, int max_frame, u8 direction) { struct tc_bw_alloc *p; int min_credit; int min_multiplier; int min_percent = 100; /* Initialization values default for Tx settings */ u32 credit_refill = 0; u32 credit_max = 0; u16 link_percentage = 0; u8 bw_percent = 0; u8 i; if (!dcb_config) return DCB_ERR_CONFIG; min_credit = ((max_frame / 2) + DCB_CREDIT_QUANTUM - 1) / DCB_CREDIT_QUANTUM; /* Find smallest link percentage */ for (i = 0; i < MAX_TRAFFIC_CLASS; i++) { p = &dcb_config->tc_config[i].path[direction]; bw_percent = dcb_config->bw_percentage[direction][p->bwg_id]; link_percentage = p->bwg_percent; link_percentage = (link_percentage * bw_percent) / 100; if (link_percentage && link_percentage < min_percent) min_percent = link_percentage; } /* * The ratio between traffic classes will control the bandwidth * percentages seen on the wire. To calculate this ratio we use * a multiplier. It is required that the refill credits must be * larger than the max frame size so here we find the smallest * multiplier that will allow all bandwidth percentages to be * greater than the max frame size. */ min_multiplier = (min_credit / min_percent) + 1; /* Find out the link percentage for each TC first */ for (i = 0; i < MAX_TRAFFIC_CLASS; i++) { p = &dcb_config->tc_config[i].path[direction]; bw_percent = dcb_config->bw_percentage[direction][p->bwg_id]; link_percentage = p->bwg_percent; /* Must be careful of integer division for very small nums */ link_percentage = (link_percentage * bw_percent) / 100; if (p->bwg_percent > 0 && link_percentage == 0) link_percentage = 1; /* Save link_percentage for reference */ p->link_percent = (u8)link_percentage; /* Calculate credit refill ratio using multiplier */ credit_refill = min(link_percentage * min_multiplier, MAX_CREDIT_REFILL); /* Refill at least minimum credit */ if (credit_refill < min_credit) credit_refill = min_credit; p->data_credits_refill = (u16)credit_refill; /* Calculate maximum credit for the TC */ credit_max = (link_percentage * MAX_CREDIT) / 100; /* * Adjustment based on rule checking, if the percentage * of a TC is too small, the maximum credit may not be * enough to send out a jumbo frame in data plane arbitration. */ if (credit_max < min_credit) credit_max = min_credit; if (direction == DCB_TX_CONFIG) { /* * Adjustment based on rule checking, if the * percentage of a TC is too small, the maximum * credit may not be enough to send out a TSO * packet in descriptor plane arbitration. */ if ((hw->mac.type == ixgbe_mac_82598EB) && credit_max && (credit_max < MINIMUM_CREDIT_FOR_TSO)) credit_max = MINIMUM_CREDIT_FOR_TSO; dcb_config->tc_config[i].desc_credits_max = (u16)credit_max; } p->data_credits_max = (u16)credit_max; } return 0; } void ixgbe_dcb_unpack_pfc(struct ixgbe_dcb_config *cfg, u8 *pfc_en) { struct tc_configuration *tc_config = &cfg->tc_config[0]; int tc; for (*pfc_en = 0, tc = 0; tc < MAX_TRAFFIC_CLASS; tc++) { if (tc_config[tc].dcb_pfc != pfc_disabled) *pfc_en |= BIT(tc); } } void ixgbe_dcb_unpack_refill(struct ixgbe_dcb_config *cfg, int direction, u16 *refill) { struct tc_configuration *tc_config = &cfg->tc_config[0]; int tc; for (tc = 0; tc < MAX_TRAFFIC_CLASS; tc++) refill[tc] = tc_config[tc].path[direction].data_credits_refill; } void ixgbe_dcb_unpack_max(struct ixgbe_dcb_config *cfg, u16 *max) { struct tc_configuration *tc_config = &cfg->tc_config[0]; int tc; for (tc = 0; tc < MAX_TRAFFIC_CLASS; tc++) max[tc] = tc_config[tc].desc_credits_max; } void ixgbe_dcb_unpack_bwgid(struct ixgbe_dcb_config *cfg, int direction, u8 *bwgid) { struct tc_configuration *tc_config = &cfg->tc_config[0]; int tc; for (tc = 0; tc < MAX_TRAFFIC_CLASS; tc++) bwgid[tc] = tc_config[tc].path[direction].bwg_id; } void ixgbe_dcb_unpack_prio(struct ixgbe_dcb_config *cfg, int direction, u8 *ptype) { struct tc_configuration *tc_config = &cfg->tc_config[0]; int tc; for (tc = 0; tc < MAX_TRAFFIC_CLASS; tc++) ptype[tc] = tc_config[tc].path[direction].prio_type; } u8 ixgbe_dcb_get_tc_from_up(struct ixgbe_dcb_config *cfg, int direction, u8 up) { struct tc_configuration *tc_config = &cfg->tc_config[0]; u8 prio_mask = BIT(up); u8 tc = cfg->num_tcs.pg_tcs; /* If tc is 0 then DCB is likely not enabled or supported */ if (!tc) return 0; /* * Test from maximum TC to 1 and report the first match we find. If * we find no match we can assume that the TC is 0 since the TC must * be set for all user priorities */ for (tc--; tc; tc--) { if (prio_mask & tc_config[tc].path[direction].up_to_tc_bitmap) break; } return tc; } void ixgbe_dcb_unpack_map(struct ixgbe_dcb_config *cfg, int direction, u8 *map) { u8 up; for (up = 0; up < MAX_USER_PRIORITY; up++) map[up] = ixgbe_dcb_get_tc_from_up(cfg, direction, up); } /** * ixgbe_dcb_hw_config - Config and enable DCB * @hw: pointer to hardware structure * @dcb_config: pointer to ixgbe_dcb_config structure * * Configure dcb settings and enable dcb mode. */ s32 ixgbe_dcb_hw_config(struct ixgbe_hw *hw, struct ixgbe_dcb_config *dcb_config) { u8 pfc_en; u8 ptype[MAX_TRAFFIC_CLASS]; u8 bwgid[MAX_TRAFFIC_CLASS]; u8 prio_tc[MAX_TRAFFIC_CLASS]; u16 refill[MAX_TRAFFIC_CLASS]; u16 max[MAX_TRAFFIC_CLASS]; /* Unpack CEE standard containers */ ixgbe_dcb_unpack_pfc(dcb_config, &pfc_en); ixgbe_dcb_unpack_refill(dcb_config, DCB_TX_CONFIG, refill); ixgbe_dcb_unpack_max(dcb_config, max); ixgbe_dcb_unpack_bwgid(dcb_config, DCB_TX_CONFIG, bwgid); ixgbe_dcb_unpack_prio(dcb_config, DCB_TX_CONFIG, ptype); ixgbe_dcb_unpack_map(dcb_config, DCB_TX_CONFIG, prio_tc); switch (hw->mac.type) { case ixgbe_mac_82598EB: return ixgbe_dcb_hw_config_82598(hw, pfc_en, refill, max, bwgid, ptype); case ixgbe_mac_82599EB: case ixgbe_mac_X540: case ixgbe_mac_X550: case ixgbe_mac_X550EM_x: case ixgbe_mac_x550em_a: return ixgbe_dcb_hw_config_82599(hw, pfc_en, refill, max, bwgid, ptype, prio_tc); default: break; } return 0; } /* Helper routines to abstract HW specifics from DCB netlink ops */ s32 ixgbe_dcb_hw_pfc_config(struct ixgbe_hw *hw, u8 pfc_en, u8 *prio_tc) { switch (hw->mac.type) { case ixgbe_mac_82598EB: return ixgbe_dcb_config_pfc_82598(hw, pfc_en); case ixgbe_mac_82599EB: case ixgbe_mac_X540: case ixgbe_mac_X550: case ixgbe_mac_X550EM_x: case ixgbe_mac_x550em_a: return ixgbe_dcb_config_pfc_82599(hw, pfc_en, prio_tc); default: break; } return -EINVAL; } s32 ixgbe_dcb_hw_ets(struct ixgbe_hw *hw, struct ieee_ets *ets, int max_frame) { __u16 refill[IEEE_8021QAZ_MAX_TCS], max[IEEE_8021QAZ_MAX_TCS]; __u8 prio_type[IEEE_8021QAZ_MAX_TCS]; int i; /* naively give each TC a bwg to map onto CEE hardware */ __u8 bwg_id[IEEE_8021QAZ_MAX_TCS] = {0, 1, 2, 3, 4, 5, 6, 7}; /* Map TSA onto CEE prio type */ for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { switch (ets->tc_tsa[i]) { case IEEE_8021QAZ_TSA_STRICT: prio_type[i] = 2; break; case IEEE_8021QAZ_TSA_ETS: prio_type[i] = 0; break; default: /* Hardware only supports priority strict or * ETS transmission selection algorithms if * we receive some other value from dcbnl * throw an error */ return -EINVAL; } } ixgbe_ieee_credits(ets->tc_tx_bw, refill, max, max_frame); return ixgbe_dcb_hw_ets_config(hw, refill, max, bwg_id, prio_type, ets->prio_tc); } s32 ixgbe_dcb_hw_ets_config(struct ixgbe_hw *hw, u16 *refill, u16 *max, u8 *bwg_id, u8 *prio_type, u8 *prio_tc) { switch (hw->mac.type) { case ixgbe_mac_82598EB: ixgbe_dcb_config_rx_arbiter_82598(hw, refill, max, prio_type); ixgbe_dcb_config_tx_desc_arbiter_82598(hw, refill, max, bwg_id, prio_type); ixgbe_dcb_config_tx_data_arbiter_82598(hw, refill, max, bwg_id, prio_type); break; case ixgbe_mac_82599EB: case ixgbe_mac_X540: case ixgbe_mac_X550: case ixgbe_mac_X550EM_x: case ixgbe_mac_x550em_a: ixgbe_dcb_config_rx_arbiter_82599(hw, refill, max, bwg_id, prio_type, prio_tc); ixgbe_dcb_config_tx_desc_arbiter_82599(hw, refill, max, bwg_id, prio_type); ixgbe_dcb_config_tx_data_arbiter_82599(hw, refill, max, bwg_id, prio_type, prio_tc); break; default: break; } return 0; } static void ixgbe_dcb_read_rtrup2tc_82599(struct ixgbe_hw *hw, u8 *map) { u32 reg, i; reg = IXGBE_READ_REG(hw, IXGBE_RTRUP2TC); for (i = 0; i < MAX_USER_PRIORITY; i++) map[i] = IXGBE_RTRUP2TC_UP_MASK & (reg >> (i * IXGBE_RTRUP2TC_UP_SHIFT)); } void ixgbe_dcb_read_rtrup2tc(struct ixgbe_hw *hw, u8 *map) { switch (hw->mac.type) { case ixgbe_mac_82599EB: case ixgbe_mac_X540: case ixgbe_mac_X550: case ixgbe_mac_X550EM_x: case ixgbe_mac_x550em_a: ixgbe_dcb_read_rtrup2tc_82599(hw, map); break; default: break; } }
linux-master
drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.c
// SPDX-License-Identifier: GPL-2.0 /* Copyright(c) 2018 Intel Corporation. */ #include <linux/bpf_trace.h> #include <net/xdp_sock_drv.h> #include <net/xdp.h> #include "ixgbe.h" #include "ixgbe_txrx_common.h" struct xsk_buff_pool *ixgbe_xsk_pool(struct ixgbe_adapter *adapter, struct ixgbe_ring *ring) { bool xdp_on = READ_ONCE(adapter->xdp_prog); int qid = ring->ring_idx; if (!xdp_on || !test_bit(qid, adapter->af_xdp_zc_qps)) return NULL; return xsk_get_pool_from_qid(adapter->netdev, qid); } static int ixgbe_xsk_pool_enable(struct ixgbe_adapter *adapter, struct xsk_buff_pool *pool, u16 qid) { struct net_device *netdev = adapter->netdev; bool if_running; int err; if (qid >= adapter->num_rx_queues) return -EINVAL; if (qid >= netdev->real_num_rx_queues || qid >= netdev->real_num_tx_queues) return -EINVAL; err = xsk_pool_dma_map(pool, &adapter->pdev->dev, IXGBE_RX_DMA_ATTR); if (err) return err; if_running = netif_running(adapter->netdev) && ixgbe_enabled_xdp_adapter(adapter); if (if_running) ixgbe_txrx_ring_disable(adapter, qid); set_bit(qid, adapter->af_xdp_zc_qps); if (if_running) { ixgbe_txrx_ring_enable(adapter, qid); /* Kick start the NAPI context so that receiving will start */ err = ixgbe_xsk_wakeup(adapter->netdev, qid, XDP_WAKEUP_RX); if (err) { clear_bit(qid, adapter->af_xdp_zc_qps); xsk_pool_dma_unmap(pool, IXGBE_RX_DMA_ATTR); return err; } } return 0; } static int ixgbe_xsk_pool_disable(struct ixgbe_adapter *adapter, u16 qid) { struct xsk_buff_pool *pool; bool if_running; pool = xsk_get_pool_from_qid(adapter->netdev, qid); if (!pool) return -EINVAL; if_running = netif_running(adapter->netdev) && ixgbe_enabled_xdp_adapter(adapter); if (if_running) ixgbe_txrx_ring_disable(adapter, qid); clear_bit(qid, adapter->af_xdp_zc_qps); xsk_pool_dma_unmap(pool, IXGBE_RX_DMA_ATTR); if (if_running) ixgbe_txrx_ring_enable(adapter, qid); return 0; } int ixgbe_xsk_pool_setup(struct ixgbe_adapter *adapter, struct xsk_buff_pool *pool, u16 qid) { return pool ? ixgbe_xsk_pool_enable(adapter, pool, qid) : ixgbe_xsk_pool_disable(adapter, qid); } static int ixgbe_run_xdp_zc(struct ixgbe_adapter *adapter, struct ixgbe_ring *rx_ring, struct xdp_buff *xdp) { int err, result = IXGBE_XDP_PASS; struct bpf_prog *xdp_prog; struct ixgbe_ring *ring; struct xdp_frame *xdpf; u32 act; xdp_prog = READ_ONCE(rx_ring->xdp_prog); act = bpf_prog_run_xdp(xdp_prog, xdp); if (likely(act == XDP_REDIRECT)) { err = xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog); if (!err) return IXGBE_XDP_REDIR; if (xsk_uses_need_wakeup(rx_ring->xsk_pool) && err == -ENOBUFS) result = IXGBE_XDP_EXIT; else result = IXGBE_XDP_CONSUMED; goto out_failure; } switch (act) { case XDP_PASS: break; case XDP_TX: xdpf = xdp_convert_buff_to_frame(xdp); if (unlikely(!xdpf)) goto out_failure; ring = ixgbe_determine_xdp_ring(adapter); if (static_branch_unlikely(&ixgbe_xdp_locking_key)) spin_lock(&ring->tx_lock); result = ixgbe_xmit_xdp_ring(ring, xdpf); if (static_branch_unlikely(&ixgbe_xdp_locking_key)) spin_unlock(&ring->tx_lock); if (result == IXGBE_XDP_CONSUMED) goto out_failure; break; case XDP_DROP: result = IXGBE_XDP_CONSUMED; break; default: bpf_warn_invalid_xdp_action(rx_ring->netdev, xdp_prog, act); fallthrough; case XDP_ABORTED: result = IXGBE_XDP_CONSUMED; out_failure: trace_xdp_exception(rx_ring->netdev, xdp_prog, act); } return result; } bool ixgbe_alloc_rx_buffers_zc(struct ixgbe_ring *rx_ring, u16 count) { union ixgbe_adv_rx_desc *rx_desc; struct ixgbe_rx_buffer *bi; u16 i = rx_ring->next_to_use; dma_addr_t dma; bool ok = true; /* nothing to do */ if (!count) return true; rx_desc = IXGBE_RX_DESC(rx_ring, i); bi = &rx_ring->rx_buffer_info[i]; i -= rx_ring->count; do { bi->xdp = xsk_buff_alloc(rx_ring->xsk_pool); if (!bi->xdp) { ok = false; break; } dma = xsk_buff_xdp_get_dma(bi->xdp); /* Refresh the desc even if buffer_addrs didn't change * because each write-back erases this info. */ rx_desc->read.pkt_addr = cpu_to_le64(dma); rx_desc++; bi++; i++; if (unlikely(!i)) { rx_desc = IXGBE_RX_DESC(rx_ring, 0); bi = rx_ring->rx_buffer_info; i -= rx_ring->count; } /* clear the length for the next_to_use descriptor */ rx_desc->wb.upper.length = 0; count--; } while (count); i += rx_ring->count; if (rx_ring->next_to_use != i) { rx_ring->next_to_use = i; /* Force memory writes to complete before letting h/w * know there are new descriptors to fetch. (Only * applicable for weak-ordered memory model archs, * such as IA-64). */ wmb(); writel(i, rx_ring->tail); } return ok; } static struct sk_buff *ixgbe_construct_skb_zc(struct ixgbe_ring *rx_ring, const struct xdp_buff *xdp) { unsigned int totalsize = xdp->data_end - xdp->data_meta; unsigned int metasize = xdp->data - xdp->data_meta; struct sk_buff *skb; net_prefetch(xdp->data_meta); /* allocate a skb to store the frags */ skb = __napi_alloc_skb(&rx_ring->q_vector->napi, totalsize, GFP_ATOMIC | __GFP_NOWARN); if (unlikely(!skb)) return NULL; memcpy(__skb_put(skb, totalsize), xdp->data_meta, ALIGN(totalsize, sizeof(long))); if (metasize) { skb_metadata_set(skb, metasize); __skb_pull(skb, metasize); } return skb; } static void ixgbe_inc_ntc(struct ixgbe_ring *rx_ring) { u32 ntc = rx_ring->next_to_clean + 1; ntc = (ntc < rx_ring->count) ? ntc : 0; rx_ring->next_to_clean = ntc; prefetch(IXGBE_RX_DESC(rx_ring, ntc)); } int ixgbe_clean_rx_irq_zc(struct ixgbe_q_vector *q_vector, struct ixgbe_ring *rx_ring, const int budget) { unsigned int total_rx_bytes = 0, total_rx_packets = 0; struct ixgbe_adapter *adapter = q_vector->adapter; u16 cleaned_count = ixgbe_desc_unused(rx_ring); unsigned int xdp_res, xdp_xmit = 0; bool failure = false; struct sk_buff *skb; while (likely(total_rx_packets < budget)) { union ixgbe_adv_rx_desc *rx_desc; struct ixgbe_rx_buffer *bi; unsigned int size; /* return some buffers to hardware, one at a time is too slow */ if (cleaned_count >= IXGBE_RX_BUFFER_WRITE) { failure = failure || !ixgbe_alloc_rx_buffers_zc(rx_ring, cleaned_count); cleaned_count = 0; } rx_desc = IXGBE_RX_DESC(rx_ring, rx_ring->next_to_clean); size = le16_to_cpu(rx_desc->wb.upper.length); if (!size) break; /* This memory barrier is needed to keep us from reading * any other fields out of the rx_desc until we know the * descriptor has been written back */ dma_rmb(); bi = &rx_ring->rx_buffer_info[rx_ring->next_to_clean]; if (unlikely(!ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_EOP))) { struct ixgbe_rx_buffer *next_bi; xsk_buff_free(bi->xdp); bi->xdp = NULL; ixgbe_inc_ntc(rx_ring); next_bi = &rx_ring->rx_buffer_info[rx_ring->next_to_clean]; next_bi->discard = true; continue; } if (unlikely(bi->discard)) { xsk_buff_free(bi->xdp); bi->xdp = NULL; bi->discard = false; ixgbe_inc_ntc(rx_ring); continue; } bi->xdp->data_end = bi->xdp->data + size; xsk_buff_dma_sync_for_cpu(bi->xdp, rx_ring->xsk_pool); xdp_res = ixgbe_run_xdp_zc(adapter, rx_ring, bi->xdp); if (likely(xdp_res & (IXGBE_XDP_TX | IXGBE_XDP_REDIR))) { xdp_xmit |= xdp_res; } else if (xdp_res == IXGBE_XDP_EXIT) { failure = true; break; } else if (xdp_res == IXGBE_XDP_CONSUMED) { xsk_buff_free(bi->xdp); } else if (xdp_res == IXGBE_XDP_PASS) { goto construct_skb; } bi->xdp = NULL; total_rx_packets++; total_rx_bytes += size; cleaned_count++; ixgbe_inc_ntc(rx_ring); continue; construct_skb: /* XDP_PASS path */ skb = ixgbe_construct_skb_zc(rx_ring, bi->xdp); if (!skb) { rx_ring->rx_stats.alloc_rx_buff_failed++; break; } xsk_buff_free(bi->xdp); bi->xdp = NULL; cleaned_count++; ixgbe_inc_ntc(rx_ring); if (eth_skb_pad(skb)) continue; total_rx_bytes += skb->len; total_rx_packets++; ixgbe_process_skb_fields(rx_ring, rx_desc, skb); ixgbe_rx_skb(q_vector, skb); } if (xdp_xmit & IXGBE_XDP_REDIR) xdp_do_flush_map(); if (xdp_xmit & IXGBE_XDP_TX) { struct ixgbe_ring *ring = ixgbe_determine_xdp_ring(adapter); ixgbe_xdp_ring_update_tail_locked(ring); } u64_stats_update_begin(&rx_ring->syncp); rx_ring->stats.packets += total_rx_packets; rx_ring->stats.bytes += total_rx_bytes; u64_stats_update_end(&rx_ring->syncp); q_vector->rx.total_packets += total_rx_packets; q_vector->rx.total_bytes += total_rx_bytes; if (xsk_uses_need_wakeup(rx_ring->xsk_pool)) { if (failure || rx_ring->next_to_clean == rx_ring->next_to_use) xsk_set_rx_need_wakeup(rx_ring->xsk_pool); else xsk_clear_rx_need_wakeup(rx_ring->xsk_pool); return (int)total_rx_packets; } return failure ? budget : (int)total_rx_packets; } void ixgbe_xsk_clean_rx_ring(struct ixgbe_ring *rx_ring) { struct ixgbe_rx_buffer *bi; u16 i; for (i = 0; i < rx_ring->count; i++) { bi = &rx_ring->rx_buffer_info[i]; if (!bi->xdp) continue; xsk_buff_free(bi->xdp); bi->xdp = NULL; } } static bool ixgbe_xmit_zc(struct ixgbe_ring *xdp_ring, unsigned int budget) { struct xsk_buff_pool *pool = xdp_ring->xsk_pool; union ixgbe_adv_tx_desc *tx_desc = NULL; struct ixgbe_tx_buffer *tx_bi; bool work_done = true; struct xdp_desc desc; dma_addr_t dma; u32 cmd_type; while (budget-- > 0) { if (unlikely(!ixgbe_desc_unused(xdp_ring))) { work_done = false; break; } if (!netif_carrier_ok(xdp_ring->netdev)) break; if (!xsk_tx_peek_desc(pool, &desc)) break; dma = xsk_buff_raw_get_dma(pool, desc.addr); xsk_buff_raw_dma_sync_for_device(pool, dma, desc.len); tx_bi = &xdp_ring->tx_buffer_info[xdp_ring->next_to_use]; tx_bi->bytecount = desc.len; tx_bi->xdpf = NULL; tx_bi->gso_segs = 1; tx_desc = IXGBE_TX_DESC(xdp_ring, xdp_ring->next_to_use); tx_desc->read.buffer_addr = cpu_to_le64(dma); /* put descriptor type bits */ cmd_type = IXGBE_ADVTXD_DTYP_DATA | IXGBE_ADVTXD_DCMD_DEXT | IXGBE_ADVTXD_DCMD_IFCS; cmd_type |= desc.len | IXGBE_TXD_CMD; tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type); tx_desc->read.olinfo_status = cpu_to_le32(desc.len << IXGBE_ADVTXD_PAYLEN_SHIFT); xdp_ring->next_to_use++; if (xdp_ring->next_to_use == xdp_ring->count) xdp_ring->next_to_use = 0; } if (tx_desc) { ixgbe_xdp_ring_update_tail(xdp_ring); xsk_tx_release(pool); } return !!budget && work_done; } static void ixgbe_clean_xdp_tx_buffer(struct ixgbe_ring *tx_ring, struct ixgbe_tx_buffer *tx_bi) { xdp_return_frame(tx_bi->xdpf); dma_unmap_single(tx_ring->dev, dma_unmap_addr(tx_bi, dma), dma_unmap_len(tx_bi, len), DMA_TO_DEVICE); dma_unmap_len_set(tx_bi, len, 0); } bool ixgbe_clean_xdp_tx_irq(struct ixgbe_q_vector *q_vector, struct ixgbe_ring *tx_ring, int napi_budget) { u16 ntc = tx_ring->next_to_clean, ntu = tx_ring->next_to_use; unsigned int total_packets = 0, total_bytes = 0; struct xsk_buff_pool *pool = tx_ring->xsk_pool; union ixgbe_adv_tx_desc *tx_desc; struct ixgbe_tx_buffer *tx_bi; u32 xsk_frames = 0; tx_bi = &tx_ring->tx_buffer_info[ntc]; tx_desc = IXGBE_TX_DESC(tx_ring, ntc); while (ntc != ntu) { if (!(tx_desc->wb.status & cpu_to_le32(IXGBE_TXD_STAT_DD))) break; total_bytes += tx_bi->bytecount; total_packets += tx_bi->gso_segs; if (tx_bi->xdpf) ixgbe_clean_xdp_tx_buffer(tx_ring, tx_bi); else xsk_frames++; tx_bi->xdpf = NULL; tx_bi++; tx_desc++; ntc++; if (unlikely(ntc == tx_ring->count)) { ntc = 0; tx_bi = tx_ring->tx_buffer_info; tx_desc = IXGBE_TX_DESC(tx_ring, 0); } /* issue prefetch for next Tx descriptor */ prefetch(tx_desc); } tx_ring->next_to_clean = ntc; u64_stats_update_begin(&tx_ring->syncp); tx_ring->stats.bytes += total_bytes; tx_ring->stats.packets += total_packets; u64_stats_update_end(&tx_ring->syncp); q_vector->tx.total_bytes += total_bytes; q_vector->tx.total_packets += total_packets; if (xsk_frames) xsk_tx_completed(pool, xsk_frames); if (xsk_uses_need_wakeup(pool)) xsk_set_tx_need_wakeup(pool); return ixgbe_xmit_zc(tx_ring, q_vector->tx.work_limit); } int ixgbe_xsk_wakeup(struct net_device *dev, u32 qid, u32 flags) { struct ixgbe_adapter *adapter = netdev_priv(dev); struct ixgbe_ring *ring; if (test_bit(__IXGBE_DOWN, &adapter->state)) return -ENETDOWN; if (!READ_ONCE(adapter->xdp_prog)) return -EINVAL; if (qid >= adapter->num_xdp_queues) return -EINVAL; ring = adapter->xdp_ring[qid]; if (test_bit(__IXGBE_TX_DISABLED, &ring->state)) return -ENETDOWN; if (!ring->xsk_pool) return -EINVAL; if (!napi_if_scheduled_mark_missed(&ring->q_vector->napi)) { u64 eics = BIT_ULL(ring->q_vector->v_idx); ixgbe_irq_rearm_queues(adapter, eics); } return 0; } void ixgbe_xsk_clean_tx_ring(struct ixgbe_ring *tx_ring) { u16 ntc = tx_ring->next_to_clean, ntu = tx_ring->next_to_use; struct xsk_buff_pool *pool = tx_ring->xsk_pool; struct ixgbe_tx_buffer *tx_bi; u32 xsk_frames = 0; while (ntc != ntu) { tx_bi = &tx_ring->tx_buffer_info[ntc]; if (tx_bi->xdpf) ixgbe_clean_xdp_tx_buffer(tx_ring, tx_bi); else xsk_frames++; tx_bi->xdpf = NULL; ntc++; if (ntc == tx_ring->count) ntc = 0; } if (xsk_frames) xsk_tx_completed(pool, xsk_frames); }
linux-master
drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c
// SPDX-License-Identifier: GPL-2.0 /* Copyright(c) 1999 - 2018 Intel Corporation. */ #include <linux/pci.h> #include <linux/delay.h> #include <linux/sched.h> #include "ixgbe.h" #include "ixgbe_phy.h" #include "ixgbe_mbx.h" #define IXGBE_82599_MAX_TX_QUEUES 128 #define IXGBE_82599_MAX_RX_QUEUES 128 #define IXGBE_82599_RAR_ENTRIES 128 #define IXGBE_82599_MC_TBL_SIZE 128 #define IXGBE_82599_VFT_TBL_SIZE 128 #define IXGBE_82599_RX_PB_SIZE 512 static void ixgbe_disable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw); static void ixgbe_enable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw); static void ixgbe_flap_tx_laser_multispeed_fiber(struct ixgbe_hw *hw); static void ixgbe_set_hard_rate_select_speed(struct ixgbe_hw *, ixgbe_link_speed); static s32 ixgbe_setup_mac_link_smartspeed(struct ixgbe_hw *hw, ixgbe_link_speed speed, bool autoneg_wait_to_complete); static void ixgbe_stop_mac_link_on_d3_82599(struct ixgbe_hw *hw); static s32 ixgbe_start_mac_link_82599(struct ixgbe_hw *hw, bool autoneg_wait_to_complete); static s32 ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw, ixgbe_link_speed speed, bool autoneg_wait_to_complete); static s32 ixgbe_setup_copper_link_82599(struct ixgbe_hw *hw, ixgbe_link_speed speed, bool autoneg_wait_to_complete); static s32 ixgbe_verify_fw_version_82599(struct ixgbe_hw *hw); static s32 ixgbe_read_i2c_byte_82599(struct ixgbe_hw *hw, u8 byte_offset, u8 dev_addr, u8 *data); static s32 ixgbe_write_i2c_byte_82599(struct ixgbe_hw *hw, u8 byte_offset, u8 dev_addr, u8 data); static s32 ixgbe_reset_pipeline_82599(struct ixgbe_hw *hw); static bool ixgbe_verify_lesm_fw_enabled_82599(struct ixgbe_hw *hw); bool ixgbe_mng_enabled(struct ixgbe_hw *hw) { u32 fwsm, manc, factps; fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM(hw)); if ((fwsm & IXGBE_FWSM_MODE_MASK) != IXGBE_FWSM_FW_MODE_PT) return false; manc = IXGBE_READ_REG(hw, IXGBE_MANC); if (!(manc & IXGBE_MANC_RCV_TCO_EN)) return false; factps = IXGBE_READ_REG(hw, IXGBE_FACTPS(hw)); if (factps & IXGBE_FACTPS_MNGCG) return false; return true; } static void ixgbe_init_mac_link_ops_82599(struct ixgbe_hw *hw) { struct ixgbe_mac_info *mac = &hw->mac; /* enable the laser control functions for SFP+ fiber * and MNG not enabled */ if ((mac->ops.get_media_type(hw) == ixgbe_media_type_fiber) && !ixgbe_mng_enabled(hw)) { mac->ops.disable_tx_laser = &ixgbe_disable_tx_laser_multispeed_fiber; mac->ops.enable_tx_laser = &ixgbe_enable_tx_laser_multispeed_fiber; mac->ops.flap_tx_laser = &ixgbe_flap_tx_laser_multispeed_fiber; } else { mac->ops.disable_tx_laser = NULL; mac->ops.enable_tx_laser = NULL; mac->ops.flap_tx_laser = NULL; } if (hw->phy.multispeed_fiber) { /* Set up dual speed SFP+ support */ mac->ops.setup_link = &ixgbe_setup_mac_link_multispeed_fiber; mac->ops.setup_mac_link = ixgbe_setup_mac_link_82599; mac->ops.set_rate_select_speed = ixgbe_set_hard_rate_select_speed; } else { if ((mac->ops.get_media_type(hw) == ixgbe_media_type_backplane) && (hw->phy.smart_speed == ixgbe_smart_speed_auto || hw->phy.smart_speed == ixgbe_smart_speed_on) && !ixgbe_verify_lesm_fw_enabled_82599(hw)) mac->ops.setup_link = &ixgbe_setup_mac_link_smartspeed; else mac->ops.setup_link = &ixgbe_setup_mac_link_82599; } } static s32 ixgbe_setup_sfp_modules_82599(struct ixgbe_hw *hw) { s32 ret_val; u16 list_offset, data_offset, data_value; if (hw->phy.sfp_type != ixgbe_sfp_type_unknown) { ixgbe_init_mac_link_ops_82599(hw); hw->phy.ops.reset = NULL; ret_val = ixgbe_get_sfp_init_sequence_offsets(hw, &list_offset, &data_offset); if (ret_val) return ret_val; /* PHY config will finish before releasing the semaphore */ ret_val = hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_MAC_CSR_SM); if (ret_val) return IXGBE_ERR_SWFW_SYNC; if (hw->eeprom.ops.read(hw, ++data_offset, &data_value)) goto setup_sfp_err; while (data_value != 0xffff) { IXGBE_WRITE_REG(hw, IXGBE_CORECTL, data_value); IXGBE_WRITE_FLUSH(hw); if (hw->eeprom.ops.read(hw, ++data_offset, &data_value)) goto setup_sfp_err; } /* Release the semaphore */ hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_MAC_CSR_SM); /* * Delay obtaining semaphore again to allow FW access, * semaphore_delay is in ms usleep_range needs us. */ usleep_range(hw->eeprom.semaphore_delay * 1000, hw->eeprom.semaphore_delay * 2000); /* Restart DSP and set SFI mode */ ret_val = hw->mac.ops.prot_autoc_write(hw, hw->mac.orig_autoc | IXGBE_AUTOC_LMS_10G_SERIAL, false); if (ret_val) { hw_dbg(hw, " sfp module setup not complete\n"); return IXGBE_ERR_SFP_SETUP_NOT_COMPLETE; } } return 0; setup_sfp_err: /* Release the semaphore */ hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_MAC_CSR_SM); /* Delay obtaining semaphore again to allow FW access, * semaphore_delay is in ms usleep_range needs us. */ usleep_range(hw->eeprom.semaphore_delay * 1000, hw->eeprom.semaphore_delay * 2000); hw_err(hw, "eeprom read at offset %d failed\n", data_offset); return IXGBE_ERR_SFP_SETUP_NOT_COMPLETE; } /** * prot_autoc_read_82599 - Hides MAC differences needed for AUTOC read * @hw: pointer to hardware structure * @locked: Return the if we locked for this read. * @reg_val: Value we read from AUTOC * * For this part (82599) we need to wrap read-modify-writes with a possible * FW/SW lock. It is assumed this lock will be freed with the next * prot_autoc_write_82599(). Note, that locked can only be true in cases * where this function doesn't return an error. **/ static s32 prot_autoc_read_82599(struct ixgbe_hw *hw, bool *locked, u32 *reg_val) { s32 ret_val; *locked = false; /* If LESM is on then we need to hold the SW/FW semaphore. */ if (ixgbe_verify_lesm_fw_enabled_82599(hw)) { ret_val = hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_MAC_CSR_SM); if (ret_val) return IXGBE_ERR_SWFW_SYNC; *locked = true; } *reg_val = IXGBE_READ_REG(hw, IXGBE_AUTOC); return 0; } /** * prot_autoc_write_82599 - Hides MAC differences needed for AUTOC write * @hw: pointer to hardware structure * @autoc: value to write to AUTOC * @locked: bool to indicate whether the SW/FW lock was already taken by * previous proc_autoc_read_82599. * * This part (82599) may need to hold a the SW/FW lock around all writes to * AUTOC. Likewise after a write we need to do a pipeline reset. **/ static s32 prot_autoc_write_82599(struct ixgbe_hw *hw, u32 autoc, bool locked) { s32 ret_val = 0; /* Blocked by MNG FW so bail */ if (ixgbe_check_reset_blocked(hw)) goto out; /* We only need to get the lock if: * - We didn't do it already (in the read part of a read-modify-write) * - LESM is enabled. */ if (!locked && ixgbe_verify_lesm_fw_enabled_82599(hw)) { ret_val = hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_MAC_CSR_SM); if (ret_val) return IXGBE_ERR_SWFW_SYNC; locked = true; } IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc); ret_val = ixgbe_reset_pipeline_82599(hw); out: /* Free the SW/FW semaphore as we either grabbed it here or * already had it when this function was called. */ if (locked) hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_MAC_CSR_SM); return ret_val; } static s32 ixgbe_get_invariants_82599(struct ixgbe_hw *hw) { struct ixgbe_mac_info *mac = &hw->mac; ixgbe_init_mac_link_ops_82599(hw); mac->mcft_size = IXGBE_82599_MC_TBL_SIZE; mac->vft_size = IXGBE_82599_VFT_TBL_SIZE; mac->num_rar_entries = IXGBE_82599_RAR_ENTRIES; mac->rx_pb_size = IXGBE_82599_RX_PB_SIZE; mac->max_rx_queues = IXGBE_82599_MAX_RX_QUEUES; mac->max_tx_queues = IXGBE_82599_MAX_TX_QUEUES; mac->max_msix_vectors = ixgbe_get_pcie_msix_count_generic(hw); return 0; } /** * ixgbe_init_phy_ops_82599 - PHY/SFP specific init * @hw: pointer to hardware structure * * Initialize any function pointers that were not able to be * set during get_invariants because the PHY/SFP type was * not known. Perform the SFP init if necessary. * **/ static s32 ixgbe_init_phy_ops_82599(struct ixgbe_hw *hw) { struct ixgbe_mac_info *mac = &hw->mac; struct ixgbe_phy_info *phy = &hw->phy; s32 ret_val; u32 esdp; if (hw->device_id == IXGBE_DEV_ID_82599_QSFP_SF_QP) { /* Store flag indicating I2C bus access control unit. */ hw->phy.qsfp_shared_i2c_bus = true; /* Initialize access to QSFP+ I2C bus */ esdp = IXGBE_READ_REG(hw, IXGBE_ESDP); esdp |= IXGBE_ESDP_SDP0_DIR; esdp &= ~IXGBE_ESDP_SDP1_DIR; esdp &= ~IXGBE_ESDP_SDP0; esdp &= ~IXGBE_ESDP_SDP0_NATIVE; esdp &= ~IXGBE_ESDP_SDP1_NATIVE; IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp); IXGBE_WRITE_FLUSH(hw); phy->ops.read_i2c_byte = &ixgbe_read_i2c_byte_82599; phy->ops.write_i2c_byte = &ixgbe_write_i2c_byte_82599; } /* Identify the PHY or SFP module */ ret_val = phy->ops.identify(hw); /* Setup function pointers based on detected SFP module and speeds */ ixgbe_init_mac_link_ops_82599(hw); /* If copper media, overwrite with copper function pointers */ if (mac->ops.get_media_type(hw) == ixgbe_media_type_copper) { mac->ops.setup_link = &ixgbe_setup_copper_link_82599; mac->ops.get_link_capabilities = &ixgbe_get_copper_link_capabilities_generic; } /* Set necessary function pointers based on phy type */ switch (hw->phy.type) { case ixgbe_phy_tn: phy->ops.check_link = &ixgbe_check_phy_link_tnx; phy->ops.setup_link = &ixgbe_setup_phy_link_tnx; break; default: break; } return ret_val; } /** * ixgbe_get_link_capabilities_82599 - Determines link capabilities * @hw: pointer to hardware structure * @speed: pointer to link speed * @autoneg: true when autoneg or autotry is enabled * * Determines the link capabilities by reading the AUTOC register. **/ static s32 ixgbe_get_link_capabilities_82599(struct ixgbe_hw *hw, ixgbe_link_speed *speed, bool *autoneg) { u32 autoc = 0; /* Determine 1G link capabilities off of SFP+ type */ if (hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core0 || hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core1 || hw->phy.sfp_type == ixgbe_sfp_type_1g_lx_core0 || hw->phy.sfp_type == ixgbe_sfp_type_1g_lx_core1 || hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core0 || hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core1) { *speed = IXGBE_LINK_SPEED_1GB_FULL; *autoneg = true; return 0; } /* * Determine link capabilities based on the stored value of AUTOC, * which represents EEPROM defaults. If AUTOC value has not been * stored, use the current register value. */ if (hw->mac.orig_link_settings_stored) autoc = hw->mac.orig_autoc; else autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC); switch (autoc & IXGBE_AUTOC_LMS_MASK) { case IXGBE_AUTOC_LMS_1G_LINK_NO_AN: *speed = IXGBE_LINK_SPEED_1GB_FULL; *autoneg = false; break; case IXGBE_AUTOC_LMS_10G_LINK_NO_AN: *speed = IXGBE_LINK_SPEED_10GB_FULL; *autoneg = false; break; case IXGBE_AUTOC_LMS_1G_AN: *speed = IXGBE_LINK_SPEED_1GB_FULL; *autoneg = true; break; case IXGBE_AUTOC_LMS_10G_SERIAL: *speed = IXGBE_LINK_SPEED_10GB_FULL; *autoneg = false; break; case IXGBE_AUTOC_LMS_KX4_KX_KR: case IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN: *speed = IXGBE_LINK_SPEED_UNKNOWN; if (autoc & IXGBE_AUTOC_KR_SUPP) *speed |= IXGBE_LINK_SPEED_10GB_FULL; if (autoc & IXGBE_AUTOC_KX4_SUPP) *speed |= IXGBE_LINK_SPEED_10GB_FULL; if (autoc & IXGBE_AUTOC_KX_SUPP) *speed |= IXGBE_LINK_SPEED_1GB_FULL; *autoneg = true; break; case IXGBE_AUTOC_LMS_KX4_KX_KR_SGMII: *speed = IXGBE_LINK_SPEED_100_FULL; if (autoc & IXGBE_AUTOC_KR_SUPP) *speed |= IXGBE_LINK_SPEED_10GB_FULL; if (autoc & IXGBE_AUTOC_KX4_SUPP) *speed |= IXGBE_LINK_SPEED_10GB_FULL; if (autoc & IXGBE_AUTOC_KX_SUPP) *speed |= IXGBE_LINK_SPEED_1GB_FULL; *autoneg = true; break; case IXGBE_AUTOC_LMS_SGMII_1G_100M: *speed = IXGBE_LINK_SPEED_1GB_FULL | IXGBE_LINK_SPEED_100_FULL; *autoneg = false; break; default: return IXGBE_ERR_LINK_SETUP; } if (hw->phy.multispeed_fiber) { *speed |= IXGBE_LINK_SPEED_10GB_FULL | IXGBE_LINK_SPEED_1GB_FULL; /* QSFP must not enable auto-negotiation */ if (hw->phy.media_type == ixgbe_media_type_fiber_qsfp) *autoneg = false; else *autoneg = true; } return 0; } /** * ixgbe_get_media_type_82599 - Get media type * @hw: pointer to hardware structure * * Returns the media type (fiber, copper, backplane) **/ static enum ixgbe_media_type ixgbe_get_media_type_82599(struct ixgbe_hw *hw) { /* Detect if there is a copper PHY attached. */ switch (hw->phy.type) { case ixgbe_phy_cu_unknown: case ixgbe_phy_tn: return ixgbe_media_type_copper; default: break; } switch (hw->device_id) { case IXGBE_DEV_ID_82599_KX4: case IXGBE_DEV_ID_82599_KX4_MEZZ: case IXGBE_DEV_ID_82599_COMBO_BACKPLANE: case IXGBE_DEV_ID_82599_KR: case IXGBE_DEV_ID_82599_BACKPLANE_FCOE: case IXGBE_DEV_ID_82599_XAUI_LOM: /* Default device ID is mezzanine card KX/KX4 */ return ixgbe_media_type_backplane; case IXGBE_DEV_ID_82599_SFP: case IXGBE_DEV_ID_82599_SFP_FCOE: case IXGBE_DEV_ID_82599_SFP_EM: case IXGBE_DEV_ID_82599_SFP_SF2: case IXGBE_DEV_ID_82599_SFP_SF_QP: case IXGBE_DEV_ID_82599EN_SFP: return ixgbe_media_type_fiber; case IXGBE_DEV_ID_82599_CX4: return ixgbe_media_type_cx4; case IXGBE_DEV_ID_82599_T3_LOM: return ixgbe_media_type_copper; case IXGBE_DEV_ID_82599_LS: return ixgbe_media_type_fiber_lco; case IXGBE_DEV_ID_82599_QSFP_SF_QP: return ixgbe_media_type_fiber_qsfp; default: return ixgbe_media_type_unknown; } } /** * ixgbe_stop_mac_link_on_d3_82599 - Disables link on D3 * @hw: pointer to hardware structure * * Disables link, should be called during D3 power down sequence. * **/ static void ixgbe_stop_mac_link_on_d3_82599(struct ixgbe_hw *hw) { u32 autoc2_reg; u16 ee_ctrl_2 = 0; hw->eeprom.ops.read(hw, IXGBE_EEPROM_CTRL_2, &ee_ctrl_2); if (!ixgbe_mng_present(hw) && !hw->wol_enabled && ee_ctrl_2 & IXGBE_EEPROM_CCD_BIT) { autoc2_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC2); autoc2_reg |= IXGBE_AUTOC2_LINK_DISABLE_ON_D3_MASK; IXGBE_WRITE_REG(hw, IXGBE_AUTOC2, autoc2_reg); } } /** * ixgbe_start_mac_link_82599 - Setup MAC link settings * @hw: pointer to hardware structure * @autoneg_wait_to_complete: true when waiting for completion is needed * * Configures link settings based on values in the ixgbe_hw struct. * Restarts the link. Performs autonegotiation if needed. **/ static s32 ixgbe_start_mac_link_82599(struct ixgbe_hw *hw, bool autoneg_wait_to_complete) { u32 autoc_reg; u32 links_reg; u32 i; s32 status = 0; bool got_lock = false; if (ixgbe_verify_lesm_fw_enabled_82599(hw)) { status = hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_MAC_CSR_SM); if (status) return status; got_lock = true; } /* Restart link */ ixgbe_reset_pipeline_82599(hw); if (got_lock) hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_MAC_CSR_SM); /* Only poll for autoneg to complete if specified to do so */ if (autoneg_wait_to_complete) { autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC); if ((autoc_reg & IXGBE_AUTOC_LMS_MASK) == IXGBE_AUTOC_LMS_KX4_KX_KR || (autoc_reg & IXGBE_AUTOC_LMS_MASK) == IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN || (autoc_reg & IXGBE_AUTOC_LMS_MASK) == IXGBE_AUTOC_LMS_KX4_KX_KR_SGMII) { links_reg = 0; /* Just in case Autoneg time = 0 */ for (i = 0; i < IXGBE_AUTO_NEG_TIME; i++) { links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS); if (links_reg & IXGBE_LINKS_KX_AN_COMP) break; msleep(100); } if (!(links_reg & IXGBE_LINKS_KX_AN_COMP)) { status = IXGBE_ERR_AUTONEG_NOT_COMPLETE; hw_dbg(hw, "Autoneg did not complete.\n"); } } } /* Add delay to filter out noises during initial link setup */ msleep(50); return status; } /** * ixgbe_disable_tx_laser_multispeed_fiber - Disable Tx laser * @hw: pointer to hardware structure * * The base drivers may require better control over SFP+ module * PHY states. This includes selectively shutting down the Tx * laser on the PHY, effectively halting physical link. **/ static void ixgbe_disable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw) { u32 esdp_reg = IXGBE_READ_REG(hw, IXGBE_ESDP); /* Blocked by MNG FW so bail */ if (ixgbe_check_reset_blocked(hw)) return; /* Disable tx laser; allow 100us to go dark per spec */ esdp_reg |= IXGBE_ESDP_SDP3; IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg); IXGBE_WRITE_FLUSH(hw); udelay(100); } /** * ixgbe_enable_tx_laser_multispeed_fiber - Enable Tx laser * @hw: pointer to hardware structure * * The base drivers may require better control over SFP+ module * PHY states. This includes selectively turning on the Tx * laser on the PHY, effectively starting physical link. **/ static void ixgbe_enable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw) { u32 esdp_reg = IXGBE_READ_REG(hw, IXGBE_ESDP); /* Enable tx laser; allow 100ms to light up */ esdp_reg &= ~IXGBE_ESDP_SDP3; IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg); IXGBE_WRITE_FLUSH(hw); msleep(100); } /** * ixgbe_flap_tx_laser_multispeed_fiber - Flap Tx laser * @hw: pointer to hardware structure * * When the driver changes the link speeds that it can support, * it sets autotry_restart to true to indicate that we need to * initiate a new autotry session with the link partner. To do * so, we set the speed then disable and re-enable the tx laser, to * alert the link partner that it also needs to restart autotry on its * end. This is consistent with true clause 37 autoneg, which also * involves a loss of signal. **/ static void ixgbe_flap_tx_laser_multispeed_fiber(struct ixgbe_hw *hw) { /* Blocked by MNG FW so bail */ if (ixgbe_check_reset_blocked(hw)) return; if (hw->mac.autotry_restart) { ixgbe_disable_tx_laser_multispeed_fiber(hw); ixgbe_enable_tx_laser_multispeed_fiber(hw); hw->mac.autotry_restart = false; } } /** * ixgbe_set_hard_rate_select_speed - Set module link speed * @hw: pointer to hardware structure * @speed: link speed to set * * Set module link speed via RS0/RS1 rate select pins. */ static void ixgbe_set_hard_rate_select_speed(struct ixgbe_hw *hw, ixgbe_link_speed speed) { u32 esdp_reg = IXGBE_READ_REG(hw, IXGBE_ESDP); switch (speed) { case IXGBE_LINK_SPEED_10GB_FULL: esdp_reg |= (IXGBE_ESDP_SDP5_DIR | IXGBE_ESDP_SDP5); break; case IXGBE_LINK_SPEED_1GB_FULL: esdp_reg &= ~IXGBE_ESDP_SDP5; esdp_reg |= IXGBE_ESDP_SDP5_DIR; break; default: hw_dbg(hw, "Invalid fixed module speed\n"); return; } IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg); IXGBE_WRITE_FLUSH(hw); } /** * ixgbe_setup_mac_link_smartspeed - Set MAC link speed using SmartSpeed * @hw: pointer to hardware structure * @speed: new link speed * @autoneg_wait_to_complete: true when waiting for completion is needed * * Implements the Intel SmartSpeed algorithm. **/ static s32 ixgbe_setup_mac_link_smartspeed(struct ixgbe_hw *hw, ixgbe_link_speed speed, bool autoneg_wait_to_complete) { s32 status = 0; ixgbe_link_speed link_speed = IXGBE_LINK_SPEED_UNKNOWN; s32 i, j; bool link_up = false; u32 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC); /* Set autoneg_advertised value based on input link speed */ hw->phy.autoneg_advertised = 0; if (speed & IXGBE_LINK_SPEED_10GB_FULL) hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_10GB_FULL; if (speed & IXGBE_LINK_SPEED_1GB_FULL) hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_1GB_FULL; if (speed & IXGBE_LINK_SPEED_100_FULL) hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_100_FULL; /* * Implement Intel SmartSpeed algorithm. SmartSpeed will reduce the * autoneg advertisement if link is unable to be established at the * highest negotiated rate. This can sometimes happen due to integrity * issues with the physical media connection. */ /* First, try to get link with full advertisement */ hw->phy.smart_speed_active = false; for (j = 0; j < IXGBE_SMARTSPEED_MAX_RETRIES; j++) { status = ixgbe_setup_mac_link_82599(hw, speed, autoneg_wait_to_complete); if (status != 0) goto out; /* * Wait for the controller to acquire link. Per IEEE 802.3ap, * Section 73.10.2, we may have to wait up to 500ms if KR is * attempted, or 200ms if KX/KX4/BX/BX4 is attempted, per * Table 9 in the AN MAS. */ for (i = 0; i < 5; i++) { mdelay(100); /* If we have link, just jump out */ status = hw->mac.ops.check_link(hw, &link_speed, &link_up, false); if (status != 0) goto out; if (link_up) goto out; } } /* * We didn't get link. If we advertised KR plus one of KX4/KX * (or BX4/BX), then disable KR and try again. */ if (((autoc_reg & IXGBE_AUTOC_KR_SUPP) == 0) || ((autoc_reg & IXGBE_AUTOC_KX4_KX_SUPP_MASK) == 0)) goto out; /* Turn SmartSpeed on to disable KR support */ hw->phy.smart_speed_active = true; status = ixgbe_setup_mac_link_82599(hw, speed, autoneg_wait_to_complete); if (status != 0) goto out; /* * Wait for the controller to acquire link. 600ms will allow for * the AN link_fail_inhibit_timer as well for multiple cycles of * parallel detect, both 10g and 1g. This allows for the maximum * connect attempts as defined in the AN MAS table 73-7. */ for (i = 0; i < 6; i++) { mdelay(100); /* If we have link, just jump out */ status = hw->mac.ops.check_link(hw, &link_speed, &link_up, false); if (status != 0) goto out; if (link_up) goto out; } /* We didn't get link. Turn SmartSpeed back off. */ hw->phy.smart_speed_active = false; status = ixgbe_setup_mac_link_82599(hw, speed, autoneg_wait_to_complete); out: if (link_up && (link_speed == IXGBE_LINK_SPEED_1GB_FULL)) hw_dbg(hw, "Smartspeed has downgraded the link speed from the maximum advertised\n"); return status; } /** * ixgbe_setup_mac_link_82599 - Set MAC link speed * @hw: pointer to hardware structure * @speed: new link speed * @autoneg_wait_to_complete: true when waiting for completion is needed * * Set the link speed in the AUTOC register and restarts link. **/ static s32 ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw, ixgbe_link_speed speed, bool autoneg_wait_to_complete) { bool autoneg = false; s32 status; u32 pma_pmd_1g, link_mode, links_reg, i; u32 autoc2 = IXGBE_READ_REG(hw, IXGBE_AUTOC2); u32 pma_pmd_10g_serial = autoc2 & IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_MASK; ixgbe_link_speed link_capabilities = IXGBE_LINK_SPEED_UNKNOWN; /* holds the value of AUTOC register at this current point in time */ u32 current_autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC); /* holds the cached value of AUTOC register */ u32 orig_autoc = 0; /* temporary variable used for comparison purposes */ u32 autoc = current_autoc; /* Check to see if speed passed in is supported. */ status = hw->mac.ops.get_link_capabilities(hw, &link_capabilities, &autoneg); if (status) return status; speed &= link_capabilities; if (speed == IXGBE_LINK_SPEED_UNKNOWN) return IXGBE_ERR_LINK_SETUP; /* Use stored value (EEPROM defaults) of AUTOC to find KR/KX4 support*/ if (hw->mac.orig_link_settings_stored) orig_autoc = hw->mac.orig_autoc; else orig_autoc = autoc; link_mode = autoc & IXGBE_AUTOC_LMS_MASK; pma_pmd_1g = autoc & IXGBE_AUTOC_1G_PMA_PMD_MASK; if (link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR || link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN || link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR_SGMII) { /* Set KX4/KX/KR support according to speed requested */ autoc &= ~(IXGBE_AUTOC_KX4_KX_SUPP_MASK | IXGBE_AUTOC_KR_SUPP); if (speed & IXGBE_LINK_SPEED_10GB_FULL) { if (orig_autoc & IXGBE_AUTOC_KX4_SUPP) autoc |= IXGBE_AUTOC_KX4_SUPP; if ((orig_autoc & IXGBE_AUTOC_KR_SUPP) && (hw->phy.smart_speed_active == false)) autoc |= IXGBE_AUTOC_KR_SUPP; } if (speed & IXGBE_LINK_SPEED_1GB_FULL) autoc |= IXGBE_AUTOC_KX_SUPP; } else if ((pma_pmd_1g == IXGBE_AUTOC_1G_SFI) && (link_mode == IXGBE_AUTOC_LMS_1G_LINK_NO_AN || link_mode == IXGBE_AUTOC_LMS_1G_AN)) { /* Switch from 1G SFI to 10G SFI if requested */ if ((speed == IXGBE_LINK_SPEED_10GB_FULL) && (pma_pmd_10g_serial == IXGBE_AUTOC2_10G_SFI)) { autoc &= ~IXGBE_AUTOC_LMS_MASK; autoc |= IXGBE_AUTOC_LMS_10G_SERIAL; } } else if ((pma_pmd_10g_serial == IXGBE_AUTOC2_10G_SFI) && (link_mode == IXGBE_AUTOC_LMS_10G_SERIAL)) { /* Switch from 10G SFI to 1G SFI if requested */ if ((speed == IXGBE_LINK_SPEED_1GB_FULL) && (pma_pmd_1g == IXGBE_AUTOC_1G_SFI)) { autoc &= ~IXGBE_AUTOC_LMS_MASK; if (autoneg) autoc |= IXGBE_AUTOC_LMS_1G_AN; else autoc |= IXGBE_AUTOC_LMS_1G_LINK_NO_AN; } } if (autoc != current_autoc) { /* Restart link */ status = hw->mac.ops.prot_autoc_write(hw, autoc, false); if (status) return status; /* Only poll for autoneg to complete if specified to do so */ if (autoneg_wait_to_complete) { if (link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR || link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN || link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR_SGMII) { links_reg = 0; /*Just in case Autoneg time=0*/ for (i = 0; i < IXGBE_AUTO_NEG_TIME; i++) { links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS); if (links_reg & IXGBE_LINKS_KX_AN_COMP) break; msleep(100); } if (!(links_reg & IXGBE_LINKS_KX_AN_COMP)) { status = IXGBE_ERR_AUTONEG_NOT_COMPLETE; hw_dbg(hw, "Autoneg did not complete.\n"); } } } /* Add delay to filter out noises during initial link setup */ msleep(50); } return status; } /** * ixgbe_setup_copper_link_82599 - Set the PHY autoneg advertised field * @hw: pointer to hardware structure * @speed: new link speed * @autoneg_wait_to_complete: true if waiting is needed to complete * * Restarts link on PHY and MAC based on settings passed in. **/ static s32 ixgbe_setup_copper_link_82599(struct ixgbe_hw *hw, ixgbe_link_speed speed, bool autoneg_wait_to_complete) { s32 status; /* Setup the PHY according to input speed */ status = hw->phy.ops.setup_link_speed(hw, speed, autoneg_wait_to_complete); /* Set up MAC */ ixgbe_start_mac_link_82599(hw, autoneg_wait_to_complete); return status; } /** * ixgbe_reset_hw_82599 - Perform hardware reset * @hw: pointer to hardware structure * * Resets the hardware by resetting the transmit and receive units, masks * and clears all interrupts, perform a PHY reset, and perform a link (MAC) * reset. **/ static s32 ixgbe_reset_hw_82599(struct ixgbe_hw *hw) { ixgbe_link_speed link_speed; s32 status; u32 ctrl, i, autoc, autoc2; u32 curr_lms; bool link_up = false; /* Call adapter stop to disable tx/rx and clear interrupts */ status = hw->mac.ops.stop_adapter(hw); if (status) return status; /* flush pending Tx transactions */ ixgbe_clear_tx_pending(hw); /* PHY ops must be identified and initialized prior to reset */ /* Identify PHY and related function pointers */ status = hw->phy.ops.init(hw); if (status == IXGBE_ERR_SFP_NOT_SUPPORTED) return status; /* Setup SFP module if there is one present. */ if (hw->phy.sfp_setup_needed) { status = hw->mac.ops.setup_sfp(hw); hw->phy.sfp_setup_needed = false; } if (status == IXGBE_ERR_SFP_NOT_SUPPORTED) return status; /* Reset PHY */ if (hw->phy.reset_disable == false && hw->phy.ops.reset != NULL) hw->phy.ops.reset(hw); /* remember AUTOC from before we reset */ curr_lms = IXGBE_READ_REG(hw, IXGBE_AUTOC) & IXGBE_AUTOC_LMS_MASK; mac_reset_top: /* * Issue global reset to the MAC. Needs to be SW reset if link is up. * If link reset is used when link is up, it might reset the PHY when * mng is using it. If link is down or the flag to force full link * reset is set, then perform link reset. */ ctrl = IXGBE_CTRL_LNK_RST; if (!hw->force_full_reset) { hw->mac.ops.check_link(hw, &link_speed, &link_up, false); if (link_up) ctrl = IXGBE_CTRL_RST; } ctrl |= IXGBE_READ_REG(hw, IXGBE_CTRL); IXGBE_WRITE_REG(hw, IXGBE_CTRL, ctrl); IXGBE_WRITE_FLUSH(hw); usleep_range(1000, 1200); /* Poll for reset bit to self-clear indicating reset is complete */ for (i = 0; i < 10; i++) { ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL); if (!(ctrl & IXGBE_CTRL_RST_MASK)) break; udelay(1); } if (ctrl & IXGBE_CTRL_RST_MASK) { status = IXGBE_ERR_RESET_FAILED; hw_dbg(hw, "Reset polling failed to complete.\n"); } msleep(50); /* * Double resets are required for recovery from certain error * conditions. Between resets, it is necessary to stall to allow time * for any pending HW events to complete. */ if (hw->mac.flags & IXGBE_FLAGS_DOUBLE_RESET_REQUIRED) { hw->mac.flags &= ~IXGBE_FLAGS_DOUBLE_RESET_REQUIRED; goto mac_reset_top; } /* * Store the original AUTOC/AUTOC2 values if they have not been * stored off yet. Otherwise restore the stored original * values since the reset operation sets back to defaults. */ autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC); autoc2 = IXGBE_READ_REG(hw, IXGBE_AUTOC2); /* Enable link if disabled in NVM */ if (autoc2 & IXGBE_AUTOC2_LINK_DISABLE_MASK) { autoc2 &= ~IXGBE_AUTOC2_LINK_DISABLE_MASK; IXGBE_WRITE_REG(hw, IXGBE_AUTOC2, autoc2); IXGBE_WRITE_FLUSH(hw); } if (hw->mac.orig_link_settings_stored == false) { hw->mac.orig_autoc = autoc; hw->mac.orig_autoc2 = autoc2; hw->mac.orig_link_settings_stored = true; } else { /* If MNG FW is running on a multi-speed device that * doesn't autoneg with out driver support we need to * leave LMS in the state it was before we MAC reset. * Likewise if we support WoL we don't want change the * LMS state either. */ if ((hw->phy.multispeed_fiber && ixgbe_mng_enabled(hw)) || hw->wol_enabled) hw->mac.orig_autoc = (hw->mac.orig_autoc & ~IXGBE_AUTOC_LMS_MASK) | curr_lms; if (autoc != hw->mac.orig_autoc) { status = hw->mac.ops.prot_autoc_write(hw, hw->mac.orig_autoc, false); if (status) return status; } if ((autoc2 & IXGBE_AUTOC2_UPPER_MASK) != (hw->mac.orig_autoc2 & IXGBE_AUTOC2_UPPER_MASK)) { autoc2 &= ~IXGBE_AUTOC2_UPPER_MASK; autoc2 |= (hw->mac.orig_autoc2 & IXGBE_AUTOC2_UPPER_MASK); IXGBE_WRITE_REG(hw, IXGBE_AUTOC2, autoc2); } } /* Store the permanent mac address */ hw->mac.ops.get_mac_addr(hw, hw->mac.perm_addr); /* * Store MAC address from RAR0, clear receive address registers, and * clear the multicast table. Also reset num_rar_entries to 128, * since we modify this value when programming the SAN MAC address. */ hw->mac.num_rar_entries = IXGBE_82599_RAR_ENTRIES; hw->mac.ops.init_rx_addrs(hw); /* Store the permanent SAN mac address */ hw->mac.ops.get_san_mac_addr(hw, hw->mac.san_addr); /* Add the SAN MAC address to the RAR only if it's a valid address */ if (is_valid_ether_addr(hw->mac.san_addr)) { /* Save the SAN MAC RAR index */ hw->mac.san_mac_rar_index = hw->mac.num_rar_entries - 1; hw->mac.ops.set_rar(hw, hw->mac.san_mac_rar_index, hw->mac.san_addr, 0, IXGBE_RAH_AV); /* clear VMDq pool/queue selection for this RAR */ hw->mac.ops.clear_vmdq(hw, hw->mac.san_mac_rar_index, IXGBE_CLEAR_VMDQ_ALL); /* Reserve the last RAR for the SAN MAC address */ hw->mac.num_rar_entries--; } /* Store the alternative WWNN/WWPN prefix */ hw->mac.ops.get_wwn_prefix(hw, &hw->mac.wwnn_prefix, &hw->mac.wwpn_prefix); return status; } /** * ixgbe_fdir_check_cmd_complete - poll to check whether FDIRCMD is complete * @hw: pointer to hardware structure * @fdircmd: current value of FDIRCMD register */ static s32 ixgbe_fdir_check_cmd_complete(struct ixgbe_hw *hw, u32 *fdircmd) { int i; for (i = 0; i < IXGBE_FDIRCMD_CMD_POLL; i++) { *fdircmd = IXGBE_READ_REG(hw, IXGBE_FDIRCMD); if (!(*fdircmd & IXGBE_FDIRCMD_CMD_MASK)) return 0; udelay(10); } return IXGBE_ERR_FDIR_CMD_INCOMPLETE; } /** * ixgbe_reinit_fdir_tables_82599 - Reinitialize Flow Director tables. * @hw: pointer to hardware structure **/ s32 ixgbe_reinit_fdir_tables_82599(struct ixgbe_hw *hw) { int i; u32 fdirctrl = IXGBE_READ_REG(hw, IXGBE_FDIRCTRL); u32 fdircmd; s32 err; fdirctrl &= ~IXGBE_FDIRCTRL_INIT_DONE; /* * Before starting reinitialization process, * FDIRCMD.CMD must be zero. */ err = ixgbe_fdir_check_cmd_complete(hw, &fdircmd); if (err) { hw_dbg(hw, "Flow Director previous command did not complete, aborting table re-initialization.\n"); return err; } IXGBE_WRITE_REG(hw, IXGBE_FDIRFREE, 0); IXGBE_WRITE_FLUSH(hw); /* * 82599 adapters flow director init flow cannot be restarted, * Workaround 82599 silicon errata by performing the following steps * before re-writing the FDIRCTRL control register with the same value. * - write 1 to bit 8 of FDIRCMD register & * - write 0 to bit 8 of FDIRCMD register */ IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD, (IXGBE_READ_REG(hw, IXGBE_FDIRCMD) | IXGBE_FDIRCMD_CLEARHT)); IXGBE_WRITE_FLUSH(hw); IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD, (IXGBE_READ_REG(hw, IXGBE_FDIRCMD) & ~IXGBE_FDIRCMD_CLEARHT)); IXGBE_WRITE_FLUSH(hw); /* * Clear FDIR Hash register to clear any leftover hashes * waiting to be programmed. */ IXGBE_WRITE_REG(hw, IXGBE_FDIRHASH, 0x00); IXGBE_WRITE_FLUSH(hw); IXGBE_WRITE_REG(hw, IXGBE_FDIRCTRL, fdirctrl); IXGBE_WRITE_FLUSH(hw); /* Poll init-done after we write FDIRCTRL register */ for (i = 0; i < IXGBE_FDIR_INIT_DONE_POLL; i++) { if (IXGBE_READ_REG(hw, IXGBE_FDIRCTRL) & IXGBE_FDIRCTRL_INIT_DONE) break; usleep_range(1000, 2000); } if (i >= IXGBE_FDIR_INIT_DONE_POLL) { hw_dbg(hw, "Flow Director Signature poll time exceeded!\n"); return IXGBE_ERR_FDIR_REINIT_FAILED; } /* Clear FDIR statistics registers (read to clear) */ IXGBE_READ_REG(hw, IXGBE_FDIRUSTAT); IXGBE_READ_REG(hw, IXGBE_FDIRFSTAT); IXGBE_READ_REG(hw, IXGBE_FDIRMATCH); IXGBE_READ_REG(hw, IXGBE_FDIRMISS); IXGBE_READ_REG(hw, IXGBE_FDIRLEN); return 0; } /** * ixgbe_fdir_enable_82599 - Initialize Flow Director control registers * @hw: pointer to hardware structure * @fdirctrl: value to write to flow director control register **/ static void ixgbe_fdir_enable_82599(struct ixgbe_hw *hw, u32 fdirctrl) { int i; /* Prime the keys for hashing */ IXGBE_WRITE_REG(hw, IXGBE_FDIRHKEY, IXGBE_ATR_BUCKET_HASH_KEY); IXGBE_WRITE_REG(hw, IXGBE_FDIRSKEY, IXGBE_ATR_SIGNATURE_HASH_KEY); /* * Poll init-done after we write the register. Estimated times: * 10G: PBALLOC = 11b, timing is 60us * 1G: PBALLOC = 11b, timing is 600us * 100M: PBALLOC = 11b, timing is 6ms * * Multiple these timings by 4 if under full Rx load * * So we'll poll for IXGBE_FDIR_INIT_DONE_POLL times, sleeping for * 1 msec per poll time. If we're at line rate and drop to 100M, then * this might not finish in our poll time, but we can live with that * for now. */ IXGBE_WRITE_REG(hw, IXGBE_FDIRCTRL, fdirctrl); IXGBE_WRITE_FLUSH(hw); for (i = 0; i < IXGBE_FDIR_INIT_DONE_POLL; i++) { if (IXGBE_READ_REG(hw, IXGBE_FDIRCTRL) & IXGBE_FDIRCTRL_INIT_DONE) break; usleep_range(1000, 2000); } if (i >= IXGBE_FDIR_INIT_DONE_POLL) hw_dbg(hw, "Flow Director poll time exceeded!\n"); } /** * ixgbe_init_fdir_signature_82599 - Initialize Flow Director signature filters * @hw: pointer to hardware structure * @fdirctrl: value to write to flow director control register, initially * contains just the value of the Rx packet buffer allocation **/ s32 ixgbe_init_fdir_signature_82599(struct ixgbe_hw *hw, u32 fdirctrl) { /* * Continue setup of fdirctrl register bits: * Move the flexible bytes to use the ethertype - shift 6 words * Set the maximum length per hash bucket to 0xA filters * Send interrupt when 64 filters are left */ fdirctrl |= (0x6 << IXGBE_FDIRCTRL_FLEX_SHIFT) | (0xA << IXGBE_FDIRCTRL_MAX_LENGTH_SHIFT) | (4 << IXGBE_FDIRCTRL_FULL_THRESH_SHIFT); /* write hashes and fdirctrl register, poll for completion */ ixgbe_fdir_enable_82599(hw, fdirctrl); return 0; } /** * ixgbe_init_fdir_perfect_82599 - Initialize Flow Director perfect filters * @hw: pointer to hardware structure * @fdirctrl: value to write to flow director control register, initially * contains just the value of the Rx packet buffer allocation **/ s32 ixgbe_init_fdir_perfect_82599(struct ixgbe_hw *hw, u32 fdirctrl) { /* * Continue setup of fdirctrl register bits: * Turn perfect match filtering on * Initialize the drop queue * Move the flexible bytes to use the ethertype - shift 6 words * Set the maximum length per hash bucket to 0xA filters * Send interrupt when 64 (0x4 * 16) filters are left */ fdirctrl |= IXGBE_FDIRCTRL_PERFECT_MATCH | (IXGBE_FDIR_DROP_QUEUE << IXGBE_FDIRCTRL_DROP_Q_SHIFT) | (0x6 << IXGBE_FDIRCTRL_FLEX_SHIFT) | (0xA << IXGBE_FDIRCTRL_MAX_LENGTH_SHIFT) | (4 << IXGBE_FDIRCTRL_FULL_THRESH_SHIFT); /* write hashes and fdirctrl register, poll for completion */ ixgbe_fdir_enable_82599(hw, fdirctrl); return 0; } /* * These defines allow us to quickly generate all of the necessary instructions * in the function below by simply calling out IXGBE_COMPUTE_SIG_HASH_ITERATION * for values 0 through 15 */ #define IXGBE_ATR_COMMON_HASH_KEY \ (IXGBE_ATR_BUCKET_HASH_KEY & IXGBE_ATR_SIGNATURE_HASH_KEY) #define IXGBE_COMPUTE_SIG_HASH_ITERATION(_n) \ do { \ u32 n = (_n); \ if (IXGBE_ATR_COMMON_HASH_KEY & BIT(n)) \ common_hash ^= lo_hash_dword >> n; \ else if (IXGBE_ATR_BUCKET_HASH_KEY & BIT(n)) \ bucket_hash ^= lo_hash_dword >> n; \ else if (IXGBE_ATR_SIGNATURE_HASH_KEY & BIT(n)) \ sig_hash ^= lo_hash_dword << (16 - n); \ if (IXGBE_ATR_COMMON_HASH_KEY & BIT(n + 16)) \ common_hash ^= hi_hash_dword >> n; \ else if (IXGBE_ATR_BUCKET_HASH_KEY & BIT(n + 16)) \ bucket_hash ^= hi_hash_dword >> n; \ else if (IXGBE_ATR_SIGNATURE_HASH_KEY & BIT(n + 16)) \ sig_hash ^= hi_hash_dword << (16 - n); \ } while (0) /** * ixgbe_atr_compute_sig_hash_82599 - Compute the signature hash * @input: input bitstream to compute the hash on * @common: compressed common input dword * * This function is almost identical to the function above but contains * several optimizations such as unwinding all of the loops, letting the * compiler work out all of the conditional ifs since the keys are static * defines, and computing two keys at once since the hashed dword stream * will be the same for both keys. **/ static u32 ixgbe_atr_compute_sig_hash_82599(union ixgbe_atr_hash_dword input, union ixgbe_atr_hash_dword common) { u32 hi_hash_dword, lo_hash_dword, flow_vm_vlan; u32 sig_hash = 0, bucket_hash = 0, common_hash = 0; /* record the flow_vm_vlan bits as they are a key part to the hash */ flow_vm_vlan = ntohl(input.dword); /* generate common hash dword */ hi_hash_dword = ntohl(common.dword); /* low dword is word swapped version of common */ lo_hash_dword = (hi_hash_dword >> 16) | (hi_hash_dword << 16); /* apply flow ID/VM pool/VLAN ID bits to hash words */ hi_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan >> 16); /* Process bits 0 and 16 */ IXGBE_COMPUTE_SIG_HASH_ITERATION(0); /* * apply flow ID/VM pool/VLAN ID bits to lo hash dword, we had to * delay this because bit 0 of the stream should not be processed * so we do not add the vlan until after bit 0 was processed */ lo_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan << 16); /* Process remaining 30 bit of the key */ IXGBE_COMPUTE_SIG_HASH_ITERATION(1); IXGBE_COMPUTE_SIG_HASH_ITERATION(2); IXGBE_COMPUTE_SIG_HASH_ITERATION(3); IXGBE_COMPUTE_SIG_HASH_ITERATION(4); IXGBE_COMPUTE_SIG_HASH_ITERATION(5); IXGBE_COMPUTE_SIG_HASH_ITERATION(6); IXGBE_COMPUTE_SIG_HASH_ITERATION(7); IXGBE_COMPUTE_SIG_HASH_ITERATION(8); IXGBE_COMPUTE_SIG_HASH_ITERATION(9); IXGBE_COMPUTE_SIG_HASH_ITERATION(10); IXGBE_COMPUTE_SIG_HASH_ITERATION(11); IXGBE_COMPUTE_SIG_HASH_ITERATION(12); IXGBE_COMPUTE_SIG_HASH_ITERATION(13); IXGBE_COMPUTE_SIG_HASH_ITERATION(14); IXGBE_COMPUTE_SIG_HASH_ITERATION(15); /* combine common_hash result with signature and bucket hashes */ bucket_hash ^= common_hash; bucket_hash &= IXGBE_ATR_HASH_MASK; sig_hash ^= common_hash << 16; sig_hash &= IXGBE_ATR_HASH_MASK << 16; /* return completed signature hash */ return sig_hash ^ bucket_hash; } /** * ixgbe_fdir_add_signature_filter_82599 - Adds a signature hash filter * @hw: pointer to hardware structure * @input: unique input dword * @common: compressed common input dword * @queue: queue index to direct traffic to * * Note that the tunnel bit in input must not be set when the hardware * tunneling support does not exist. **/ s32 ixgbe_fdir_add_signature_filter_82599(struct ixgbe_hw *hw, union ixgbe_atr_hash_dword input, union ixgbe_atr_hash_dword common, u8 queue) { u64 fdirhashcmd; u8 flow_type; bool tunnel; u32 fdircmd; /* * Get the flow_type in order to program FDIRCMD properly * lowest 2 bits are FDIRCMD.L4TYPE, third lowest bit is FDIRCMD.IPV6 */ tunnel = !!(input.formatted.flow_type & IXGBE_ATR_L4TYPE_TUNNEL_MASK); flow_type = input.formatted.flow_type & (IXGBE_ATR_L4TYPE_TUNNEL_MASK - 1); switch (flow_type) { case IXGBE_ATR_FLOW_TYPE_TCPV4: case IXGBE_ATR_FLOW_TYPE_UDPV4: case IXGBE_ATR_FLOW_TYPE_SCTPV4: case IXGBE_ATR_FLOW_TYPE_TCPV6: case IXGBE_ATR_FLOW_TYPE_UDPV6: case IXGBE_ATR_FLOW_TYPE_SCTPV6: break; default: hw_dbg(hw, " Error on flow type input\n"); return IXGBE_ERR_CONFIG; } /* configure FDIRCMD register */ fdircmd = IXGBE_FDIRCMD_CMD_ADD_FLOW | IXGBE_FDIRCMD_FILTER_UPDATE | IXGBE_FDIRCMD_LAST | IXGBE_FDIRCMD_QUEUE_EN; fdircmd |= (u32)flow_type << IXGBE_FDIRCMD_FLOW_TYPE_SHIFT; fdircmd |= (u32)queue << IXGBE_FDIRCMD_RX_QUEUE_SHIFT; if (tunnel) fdircmd |= IXGBE_FDIRCMD_TUNNEL_FILTER; /* * The lower 32-bits of fdirhashcmd is for FDIRHASH, the upper 32-bits * is for FDIRCMD. Then do a 64-bit register write from FDIRHASH. */ fdirhashcmd = (u64)fdircmd << 32; fdirhashcmd |= ixgbe_atr_compute_sig_hash_82599(input, common); IXGBE_WRITE_REG64(hw, IXGBE_FDIRHASH, fdirhashcmd); hw_dbg(hw, "Tx Queue=%x hash=%x\n", queue, (u32)fdirhashcmd); return 0; } #define IXGBE_COMPUTE_BKT_HASH_ITERATION(_n) \ do { \ u32 n = (_n); \ if (IXGBE_ATR_BUCKET_HASH_KEY & BIT(n)) \ bucket_hash ^= lo_hash_dword >> n; \ if (IXGBE_ATR_BUCKET_HASH_KEY & BIT(n + 16)) \ bucket_hash ^= hi_hash_dword >> n; \ } while (0) /** * ixgbe_atr_compute_perfect_hash_82599 - Compute the perfect filter hash * @input: input bitstream to compute the hash on * @input_mask: mask for the input bitstream * * This function serves two main purposes. First it applies the input_mask * to the atr_input resulting in a cleaned up atr_input data stream. * Secondly it computes the hash and stores it in the bkt_hash field at * the end of the input byte stream. This way it will be available for * future use without needing to recompute the hash. **/ void ixgbe_atr_compute_perfect_hash_82599(union ixgbe_atr_input *input, union ixgbe_atr_input *input_mask) { u32 hi_hash_dword, lo_hash_dword, flow_vm_vlan; u32 bucket_hash = 0; __be32 hi_dword = 0; int i; /* Apply masks to input data */ for (i = 0; i <= 10; i++) input->dword_stream[i] &= input_mask->dword_stream[i]; /* record the flow_vm_vlan bits as they are a key part to the hash */ flow_vm_vlan = ntohl(input->dword_stream[0]); /* generate common hash dword */ for (i = 1; i <= 10; i++) hi_dword ^= input->dword_stream[i]; hi_hash_dword = ntohl(hi_dword); /* low dword is word swapped version of common */ lo_hash_dword = (hi_hash_dword >> 16) | (hi_hash_dword << 16); /* apply flow ID/VM pool/VLAN ID bits to hash words */ hi_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan >> 16); /* Process bits 0 and 16 */ IXGBE_COMPUTE_BKT_HASH_ITERATION(0); /* * apply flow ID/VM pool/VLAN ID bits to lo hash dword, we had to * delay this because bit 0 of the stream should not be processed * so we do not add the vlan until after bit 0 was processed */ lo_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan << 16); /* Process remaining 30 bit of the key */ for (i = 1; i <= 15; i++) IXGBE_COMPUTE_BKT_HASH_ITERATION(i); /* * Limit hash to 13 bits since max bucket count is 8K. * Store result at the end of the input stream. */ input->formatted.bkt_hash = (__force __be16)(bucket_hash & 0x1FFF); } /** * ixgbe_get_fdirtcpm_82599 - generate a tcp port from atr_input_masks * @input_mask: mask to be bit swapped * * The source and destination port masks for flow director are bit swapped * in that bit 15 effects bit 0, 14 effects 1, 13, 2 etc. In order to * generate a correctly swapped value we need to bit swap the mask and that * is what is accomplished by this function. **/ static u32 ixgbe_get_fdirtcpm_82599(union ixgbe_atr_input *input_mask) { u32 mask = ntohs(input_mask->formatted.dst_port); mask <<= IXGBE_FDIRTCPM_DPORTM_SHIFT; mask |= ntohs(input_mask->formatted.src_port); mask = ((mask & 0x55555555) << 1) | ((mask & 0xAAAAAAAA) >> 1); mask = ((mask & 0x33333333) << 2) | ((mask & 0xCCCCCCCC) >> 2); mask = ((mask & 0x0F0F0F0F) << 4) | ((mask & 0xF0F0F0F0) >> 4); return ((mask & 0x00FF00FF) << 8) | ((mask & 0xFF00FF00) >> 8); } /* * These two macros are meant to address the fact that we have registers * that are either all or in part big-endian. As a result on big-endian * systems we will end up byte swapping the value to little-endian before * it is byte swapped again and written to the hardware in the original * big-endian format. */ #define IXGBE_STORE_AS_BE32(_value) \ (((u32)(_value) >> 24) | (((u32)(_value) & 0x00FF0000) >> 8) | \ (((u32)(_value) & 0x0000FF00) << 8) | ((u32)(_value) << 24)) #define IXGBE_WRITE_REG_BE32(a, reg, value) \ IXGBE_WRITE_REG((a), (reg), IXGBE_STORE_AS_BE32(ntohl(value))) #define IXGBE_STORE_AS_BE16(_value) __swab16(ntohs((_value))) s32 ixgbe_fdir_set_input_mask_82599(struct ixgbe_hw *hw, union ixgbe_atr_input *input_mask) { /* mask IPv6 since it is currently not supported */ u32 fdirm = IXGBE_FDIRM_DIPv6; u32 fdirtcpm; /* * Program the relevant mask registers. If src/dst_port or src/dst_addr * are zero, then assume a full mask for that field. Also assume that * a VLAN of 0 is unspecified, so mask that out as well. L4type * cannot be masked out in this implementation. * * This also assumes IPv4 only. IPv6 masking isn't supported at this * point in time. */ /* verify bucket hash is cleared on hash generation */ if (input_mask->formatted.bkt_hash) hw_dbg(hw, " bucket hash should always be 0 in mask\n"); /* Program FDIRM and verify partial masks */ switch (input_mask->formatted.vm_pool & 0x7F) { case 0x0: fdirm |= IXGBE_FDIRM_POOL; break; case 0x7F: break; default: hw_dbg(hw, " Error on vm pool mask\n"); return IXGBE_ERR_CONFIG; } switch (input_mask->formatted.flow_type & IXGBE_ATR_L4TYPE_MASK) { case 0x0: fdirm |= IXGBE_FDIRM_L4P; if (input_mask->formatted.dst_port || input_mask->formatted.src_port) { hw_dbg(hw, " Error on src/dst port mask\n"); return IXGBE_ERR_CONFIG; } break; case IXGBE_ATR_L4TYPE_MASK: break; default: hw_dbg(hw, " Error on flow type mask\n"); return IXGBE_ERR_CONFIG; } switch (ntohs(input_mask->formatted.vlan_id) & 0xEFFF) { case 0x0000: /* mask VLAN ID */ fdirm |= IXGBE_FDIRM_VLANID; fallthrough; case 0x0FFF: /* mask VLAN priority */ fdirm |= IXGBE_FDIRM_VLANP; break; case 0xE000: /* mask VLAN ID only */ fdirm |= IXGBE_FDIRM_VLANID; fallthrough; case 0xEFFF: /* no VLAN fields masked */ break; default: hw_dbg(hw, " Error on VLAN mask\n"); return IXGBE_ERR_CONFIG; } switch ((__force u16)input_mask->formatted.flex_bytes & 0xFFFF) { case 0x0000: /* Mask Flex Bytes */ fdirm |= IXGBE_FDIRM_FLEX; fallthrough; case 0xFFFF: break; default: hw_dbg(hw, " Error on flexible byte mask\n"); return IXGBE_ERR_CONFIG; } /* Now mask VM pool and destination IPv6 - bits 5 and 2 */ IXGBE_WRITE_REG(hw, IXGBE_FDIRM, fdirm); /* store the TCP/UDP port masks, bit reversed from port layout */ fdirtcpm = ixgbe_get_fdirtcpm_82599(input_mask); /* write both the same so that UDP and TCP use the same mask */ IXGBE_WRITE_REG(hw, IXGBE_FDIRTCPM, ~fdirtcpm); IXGBE_WRITE_REG(hw, IXGBE_FDIRUDPM, ~fdirtcpm); /* also use it for SCTP */ switch (hw->mac.type) { case ixgbe_mac_X550: case ixgbe_mac_X550EM_x: case ixgbe_mac_x550em_a: IXGBE_WRITE_REG(hw, IXGBE_FDIRSCTPM, ~fdirtcpm); break; default: break; } /* store source and destination IP masks (big-enian) */ IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIP4M, ~input_mask->formatted.src_ip[0]); IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRDIP4M, ~input_mask->formatted.dst_ip[0]); return 0; } s32 ixgbe_fdir_write_perfect_filter_82599(struct ixgbe_hw *hw, union ixgbe_atr_input *input, u16 soft_id, u8 queue) { u32 fdirport, fdirvlan, fdirhash, fdircmd; s32 err; /* currently IPv6 is not supported, must be programmed with 0 */ IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIPv6(0), input->formatted.src_ip[0]); IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIPv6(1), input->formatted.src_ip[1]); IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIPv6(2), input->formatted.src_ip[2]); /* record the source address (big-endian) */ IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRIPSA, input->formatted.src_ip[0]); /* record the first 32 bits of the destination address (big-endian) */ IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRIPDA, input->formatted.dst_ip[0]); /* record source and destination port (little-endian)*/ fdirport = be16_to_cpu(input->formatted.dst_port); fdirport <<= IXGBE_FDIRPORT_DESTINATION_SHIFT; fdirport |= be16_to_cpu(input->formatted.src_port); IXGBE_WRITE_REG(hw, IXGBE_FDIRPORT, fdirport); /* record vlan (little-endian) and flex_bytes(big-endian) */ fdirvlan = IXGBE_STORE_AS_BE16(input->formatted.flex_bytes); fdirvlan <<= IXGBE_FDIRVLAN_FLEX_SHIFT; fdirvlan |= ntohs(input->formatted.vlan_id); IXGBE_WRITE_REG(hw, IXGBE_FDIRVLAN, fdirvlan); /* configure FDIRHASH register */ fdirhash = (__force u32)input->formatted.bkt_hash; fdirhash |= soft_id << IXGBE_FDIRHASH_SIG_SW_INDEX_SHIFT; IXGBE_WRITE_REG(hw, IXGBE_FDIRHASH, fdirhash); /* * flush all previous writes to make certain registers are * programmed prior to issuing the command */ IXGBE_WRITE_FLUSH(hw); /* configure FDIRCMD register */ fdircmd = IXGBE_FDIRCMD_CMD_ADD_FLOW | IXGBE_FDIRCMD_FILTER_UPDATE | IXGBE_FDIRCMD_LAST | IXGBE_FDIRCMD_QUEUE_EN; if (queue == IXGBE_FDIR_DROP_QUEUE) fdircmd |= IXGBE_FDIRCMD_DROP; fdircmd |= input->formatted.flow_type << IXGBE_FDIRCMD_FLOW_TYPE_SHIFT; fdircmd |= (u32)queue << IXGBE_FDIRCMD_RX_QUEUE_SHIFT; fdircmd |= (u32)input->formatted.vm_pool << IXGBE_FDIRCMD_VT_POOL_SHIFT; IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD, fdircmd); err = ixgbe_fdir_check_cmd_complete(hw, &fdircmd); if (err) { hw_dbg(hw, "Flow Director command did not complete!\n"); return err; } return 0; } s32 ixgbe_fdir_erase_perfect_filter_82599(struct ixgbe_hw *hw, union ixgbe_atr_input *input, u16 soft_id) { u32 fdirhash; u32 fdircmd; s32 err; /* configure FDIRHASH register */ fdirhash = (__force u32)input->formatted.bkt_hash; fdirhash |= soft_id << IXGBE_FDIRHASH_SIG_SW_INDEX_SHIFT; IXGBE_WRITE_REG(hw, IXGBE_FDIRHASH, fdirhash); /* flush hash to HW */ IXGBE_WRITE_FLUSH(hw); /* Query if filter is present */ IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD, IXGBE_FDIRCMD_CMD_QUERY_REM_FILT); err = ixgbe_fdir_check_cmd_complete(hw, &fdircmd); if (err) { hw_dbg(hw, "Flow Director command did not complete!\n"); return err; } /* if filter exists in hardware then remove it */ if (fdircmd & IXGBE_FDIRCMD_FILTER_VALID) { IXGBE_WRITE_REG(hw, IXGBE_FDIRHASH, fdirhash); IXGBE_WRITE_FLUSH(hw); IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD, IXGBE_FDIRCMD_CMD_REMOVE_FLOW); } return 0; } /** * ixgbe_read_analog_reg8_82599 - Reads 8 bit Omer analog register * @hw: pointer to hardware structure * @reg: analog register to read * @val: read value * * Performs read operation to Omer analog register specified. **/ static s32 ixgbe_read_analog_reg8_82599(struct ixgbe_hw *hw, u32 reg, u8 *val) { u32 core_ctl; IXGBE_WRITE_REG(hw, IXGBE_CORECTL, IXGBE_CORECTL_WRITE_CMD | (reg << 8)); IXGBE_WRITE_FLUSH(hw); udelay(10); core_ctl = IXGBE_READ_REG(hw, IXGBE_CORECTL); *val = (u8)core_ctl; return 0; } /** * ixgbe_write_analog_reg8_82599 - Writes 8 bit Omer analog register * @hw: pointer to hardware structure * @reg: atlas register to write * @val: value to write * * Performs write operation to Omer analog register specified. **/ static s32 ixgbe_write_analog_reg8_82599(struct ixgbe_hw *hw, u32 reg, u8 val) { u32 core_ctl; core_ctl = (reg << 8) | val; IXGBE_WRITE_REG(hw, IXGBE_CORECTL, core_ctl); IXGBE_WRITE_FLUSH(hw); udelay(10); return 0; } /** * ixgbe_start_hw_82599 - Prepare hardware for Tx/Rx * @hw: pointer to hardware structure * * Starts the hardware using the generic start_hw function * and the generation start_hw function. * Then performs revision-specific operations, if any. **/ static s32 ixgbe_start_hw_82599(struct ixgbe_hw *hw) { s32 ret_val = 0; ret_val = ixgbe_start_hw_generic(hw); if (ret_val) return ret_val; ret_val = ixgbe_start_hw_gen2(hw); if (ret_val) return ret_val; /* We need to run link autotry after the driver loads */ hw->mac.autotry_restart = true; return ixgbe_verify_fw_version_82599(hw); } /** * ixgbe_identify_phy_82599 - Get physical layer module * @hw: pointer to hardware structure * * Determines the physical layer module found on the current adapter. * If PHY already detected, maintains current PHY type in hw struct, * otherwise executes the PHY detection routine. **/ static s32 ixgbe_identify_phy_82599(struct ixgbe_hw *hw) { s32 status; /* Detect PHY if not unknown - returns success if already detected. */ status = ixgbe_identify_phy_generic(hw); if (status) { /* 82599 10GBASE-T requires an external PHY */ if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_copper) return status; status = ixgbe_identify_module_generic(hw); } /* Set PHY type none if no PHY detected */ if (hw->phy.type == ixgbe_phy_unknown) { hw->phy.type = ixgbe_phy_none; status = 0; } /* Return error if SFP module has been detected but is not supported */ if (hw->phy.type == ixgbe_phy_sfp_unsupported) return IXGBE_ERR_SFP_NOT_SUPPORTED; return status; } /** * ixgbe_enable_rx_dma_82599 - Enable the Rx DMA unit on 82599 * @hw: pointer to hardware structure * @regval: register value to write to RXCTRL * * Enables the Rx DMA unit for 82599 **/ static s32 ixgbe_enable_rx_dma_82599(struct ixgbe_hw *hw, u32 regval) { /* * Workaround for 82599 silicon errata when enabling the Rx datapath. * If traffic is incoming before we enable the Rx unit, it could hang * the Rx DMA unit. Therefore, make sure the security engine is * completely disabled prior to enabling the Rx unit. */ hw->mac.ops.disable_rx_buff(hw); if (regval & IXGBE_RXCTRL_RXEN) hw->mac.ops.enable_rx(hw); else hw->mac.ops.disable_rx(hw); hw->mac.ops.enable_rx_buff(hw); return 0; } /** * ixgbe_verify_fw_version_82599 - verify fw version for 82599 * @hw: pointer to hardware structure * * Verifies that installed the firmware version is 0.6 or higher * for SFI devices. All 82599 SFI devices should have version 0.6 or higher. * * Returns IXGBE_ERR_EEPROM_VERSION if the FW is not present or * if the FW version is not supported. **/ static s32 ixgbe_verify_fw_version_82599(struct ixgbe_hw *hw) { s32 status = IXGBE_ERR_EEPROM_VERSION; u16 fw_offset, fw_ptp_cfg_offset; u16 offset; u16 fw_version = 0; /* firmware check is only necessary for SFI devices */ if (hw->phy.media_type != ixgbe_media_type_fiber) return 0; /* get the offset to the Firmware Module block */ offset = IXGBE_FW_PTR; if (hw->eeprom.ops.read(hw, offset, &fw_offset)) goto fw_version_err; if (fw_offset == 0 || fw_offset == 0xFFFF) return IXGBE_ERR_EEPROM_VERSION; /* get the offset to the Pass Through Patch Configuration block */ offset = fw_offset + IXGBE_FW_PASSTHROUGH_PATCH_CONFIG_PTR; if (hw->eeprom.ops.read(hw, offset, &fw_ptp_cfg_offset)) goto fw_version_err; if (fw_ptp_cfg_offset == 0 || fw_ptp_cfg_offset == 0xFFFF) return IXGBE_ERR_EEPROM_VERSION; /* get the firmware version */ offset = fw_ptp_cfg_offset + IXGBE_FW_PATCH_VERSION_4; if (hw->eeprom.ops.read(hw, offset, &fw_version)) goto fw_version_err; if (fw_version > 0x5) status = 0; return status; fw_version_err: hw_err(hw, "eeprom read at offset %d failed\n", offset); return IXGBE_ERR_EEPROM_VERSION; } /** * ixgbe_verify_lesm_fw_enabled_82599 - Checks LESM FW module state. * @hw: pointer to hardware structure * * Returns true if the LESM FW module is present and enabled. Otherwise * returns false. Smart Speed must be disabled if LESM FW module is enabled. **/ static bool ixgbe_verify_lesm_fw_enabled_82599(struct ixgbe_hw *hw) { u16 fw_offset, fw_lesm_param_offset, fw_lesm_state; s32 status; /* get the offset to the Firmware Module block */ status = hw->eeprom.ops.read(hw, IXGBE_FW_PTR, &fw_offset); if (status || fw_offset == 0 || fw_offset == 0xFFFF) return false; /* get the offset to the LESM Parameters block */ status = hw->eeprom.ops.read(hw, (fw_offset + IXGBE_FW_LESM_PARAMETERS_PTR), &fw_lesm_param_offset); if (status || fw_lesm_param_offset == 0 || fw_lesm_param_offset == 0xFFFF) return false; /* get the lesm state word */ status = hw->eeprom.ops.read(hw, (fw_lesm_param_offset + IXGBE_FW_LESM_STATE_1), &fw_lesm_state); if (!status && (fw_lesm_state & IXGBE_FW_LESM_STATE_ENABLED)) return true; return false; } /** * ixgbe_read_eeprom_buffer_82599 - Read EEPROM word(s) using * fastest available method * * @hw: pointer to hardware structure * @offset: offset of word in EEPROM to read * @words: number of words * @data: word(s) read from the EEPROM * * Retrieves 16 bit word(s) read from EEPROM **/ static s32 ixgbe_read_eeprom_buffer_82599(struct ixgbe_hw *hw, u16 offset, u16 words, u16 *data) { struct ixgbe_eeprom_info *eeprom = &hw->eeprom; /* If EEPROM is detected and can be addressed using 14 bits, * use EERD otherwise use bit bang */ if (eeprom->type == ixgbe_eeprom_spi && offset + (words - 1) <= IXGBE_EERD_MAX_ADDR) return ixgbe_read_eerd_buffer_generic(hw, offset, words, data); return ixgbe_read_eeprom_buffer_bit_bang_generic(hw, offset, words, data); } /** * ixgbe_read_eeprom_82599 - Read EEPROM word using * fastest available method * * @hw: pointer to hardware structure * @offset: offset of word in the EEPROM to read * @data: word read from the EEPROM * * Reads a 16 bit word from the EEPROM **/ static s32 ixgbe_read_eeprom_82599(struct ixgbe_hw *hw, u16 offset, u16 *data) { struct ixgbe_eeprom_info *eeprom = &hw->eeprom; /* * If EEPROM is detected and can be addressed using 14 bits, * use EERD otherwise use bit bang */ if (eeprom->type == ixgbe_eeprom_spi && offset <= IXGBE_EERD_MAX_ADDR) return ixgbe_read_eerd_generic(hw, offset, data); return ixgbe_read_eeprom_bit_bang_generic(hw, offset, data); } /** * ixgbe_reset_pipeline_82599 - perform pipeline reset * * @hw: pointer to hardware structure * * Reset pipeline by asserting Restart_AN together with LMS change to ensure * full pipeline reset. Note - We must hold the SW/FW semaphore before writing * to AUTOC, so this function assumes the semaphore is held. **/ static s32 ixgbe_reset_pipeline_82599(struct ixgbe_hw *hw) { s32 ret_val; u32 anlp1_reg = 0; u32 i, autoc_reg, autoc2_reg; /* Enable link if disabled in NVM */ autoc2_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC2); if (autoc2_reg & IXGBE_AUTOC2_LINK_DISABLE_MASK) { autoc2_reg &= ~IXGBE_AUTOC2_LINK_DISABLE_MASK; IXGBE_WRITE_REG(hw, IXGBE_AUTOC2, autoc2_reg); IXGBE_WRITE_FLUSH(hw); } autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC); autoc_reg |= IXGBE_AUTOC_AN_RESTART; /* Write AUTOC register with toggled LMS[2] bit and Restart_AN */ IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg ^ (0x4 << IXGBE_AUTOC_LMS_SHIFT)); /* Wait for AN to leave state 0 */ for (i = 0; i < 10; i++) { usleep_range(4000, 8000); anlp1_reg = IXGBE_READ_REG(hw, IXGBE_ANLP1); if (anlp1_reg & IXGBE_ANLP1_AN_STATE_MASK) break; } if (!(anlp1_reg & IXGBE_ANLP1_AN_STATE_MASK)) { hw_dbg(hw, "auto negotiation not completed\n"); ret_val = IXGBE_ERR_RESET_FAILED; goto reset_pipeline_out; } ret_val = 0; reset_pipeline_out: /* Write AUTOC register with original LMS field and Restart_AN */ IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg); IXGBE_WRITE_FLUSH(hw); return ret_val; } /** * ixgbe_read_i2c_byte_82599 - Reads 8 bit word over I2C * @hw: pointer to hardware structure * @byte_offset: byte offset to read * @dev_addr: address to read from * @data: value read * * Performs byte read operation to SFP module's EEPROM over I2C interface at * a specified device address. **/ static s32 ixgbe_read_i2c_byte_82599(struct ixgbe_hw *hw, u8 byte_offset, u8 dev_addr, u8 *data) { u32 esdp; s32 status; s32 timeout = 200; if (hw->phy.qsfp_shared_i2c_bus == true) { /* Acquire I2C bus ownership. */ esdp = IXGBE_READ_REG(hw, IXGBE_ESDP); esdp |= IXGBE_ESDP_SDP0; IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp); IXGBE_WRITE_FLUSH(hw); while (timeout) { esdp = IXGBE_READ_REG(hw, IXGBE_ESDP); if (esdp & IXGBE_ESDP_SDP1) break; usleep_range(5000, 10000); timeout--; } if (!timeout) { hw_dbg(hw, "Driver can't access resource, acquiring I2C bus timeout.\n"); status = IXGBE_ERR_I2C; goto release_i2c_access; } } status = ixgbe_read_i2c_byte_generic(hw, byte_offset, dev_addr, data); release_i2c_access: if (hw->phy.qsfp_shared_i2c_bus == true) { /* Release I2C bus ownership. */ esdp = IXGBE_READ_REG(hw, IXGBE_ESDP); esdp &= ~IXGBE_ESDP_SDP0; IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp); IXGBE_WRITE_FLUSH(hw); } return status; } /** * ixgbe_write_i2c_byte_82599 - Writes 8 bit word over I2C * @hw: pointer to hardware structure * @byte_offset: byte offset to write * @dev_addr: address to write to * @data: value to write * * Performs byte write operation to SFP module's EEPROM over I2C interface at * a specified device address. **/ static s32 ixgbe_write_i2c_byte_82599(struct ixgbe_hw *hw, u8 byte_offset, u8 dev_addr, u8 data) { u32 esdp; s32 status; s32 timeout = 200; if (hw->phy.qsfp_shared_i2c_bus == true) { /* Acquire I2C bus ownership. */ esdp = IXGBE_READ_REG(hw, IXGBE_ESDP); esdp |= IXGBE_ESDP_SDP0; IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp); IXGBE_WRITE_FLUSH(hw); while (timeout) { esdp = IXGBE_READ_REG(hw, IXGBE_ESDP); if (esdp & IXGBE_ESDP_SDP1) break; usleep_range(5000, 10000); timeout--; } if (!timeout) { hw_dbg(hw, "Driver can't access resource, acquiring I2C bus timeout.\n"); status = IXGBE_ERR_I2C; goto release_i2c_access; } } status = ixgbe_write_i2c_byte_generic(hw, byte_offset, dev_addr, data); release_i2c_access: if (hw->phy.qsfp_shared_i2c_bus == true) { /* Release I2C bus ownership. */ esdp = IXGBE_READ_REG(hw, IXGBE_ESDP); esdp &= ~IXGBE_ESDP_SDP0; IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp); IXGBE_WRITE_FLUSH(hw); } return status; } static const struct ixgbe_mac_operations mac_ops_82599 = { .init_hw = &ixgbe_init_hw_generic, .reset_hw = &ixgbe_reset_hw_82599, .start_hw = &ixgbe_start_hw_82599, .clear_hw_cntrs = &ixgbe_clear_hw_cntrs_generic, .get_media_type = &ixgbe_get_media_type_82599, .enable_rx_dma = &ixgbe_enable_rx_dma_82599, .disable_rx_buff = &ixgbe_disable_rx_buff_generic, .enable_rx_buff = &ixgbe_enable_rx_buff_generic, .get_mac_addr = &ixgbe_get_mac_addr_generic, .get_san_mac_addr = &ixgbe_get_san_mac_addr_generic, .get_device_caps = &ixgbe_get_device_caps_generic, .get_wwn_prefix = &ixgbe_get_wwn_prefix_generic, .stop_adapter = &ixgbe_stop_adapter_generic, .get_bus_info = &ixgbe_get_bus_info_generic, .set_lan_id = &ixgbe_set_lan_id_multi_port_pcie, .read_analog_reg8 = &ixgbe_read_analog_reg8_82599, .write_analog_reg8 = &ixgbe_write_analog_reg8_82599, .stop_link_on_d3 = &ixgbe_stop_mac_link_on_d3_82599, .setup_link = &ixgbe_setup_mac_link_82599, .set_rxpba = &ixgbe_set_rxpba_generic, .check_link = &ixgbe_check_mac_link_generic, .get_link_capabilities = &ixgbe_get_link_capabilities_82599, .led_on = &ixgbe_led_on_generic, .led_off = &ixgbe_led_off_generic, .init_led_link_act = ixgbe_init_led_link_act_generic, .blink_led_start = &ixgbe_blink_led_start_generic, .blink_led_stop = &ixgbe_blink_led_stop_generic, .set_rar = &ixgbe_set_rar_generic, .clear_rar = &ixgbe_clear_rar_generic, .set_vmdq = &ixgbe_set_vmdq_generic, .set_vmdq_san_mac = &ixgbe_set_vmdq_san_mac_generic, .clear_vmdq = &ixgbe_clear_vmdq_generic, .init_rx_addrs = &ixgbe_init_rx_addrs_generic, .update_mc_addr_list = &ixgbe_update_mc_addr_list_generic, .enable_mc = &ixgbe_enable_mc_generic, .disable_mc = &ixgbe_disable_mc_generic, .clear_vfta = &ixgbe_clear_vfta_generic, .set_vfta = &ixgbe_set_vfta_generic, .fc_enable = &ixgbe_fc_enable_generic, .setup_fc = ixgbe_setup_fc_generic, .fc_autoneg = ixgbe_fc_autoneg, .set_fw_drv_ver = &ixgbe_set_fw_drv_ver_generic, .init_uta_tables = &ixgbe_init_uta_tables_generic, .setup_sfp = &ixgbe_setup_sfp_modules_82599, .set_mac_anti_spoofing = &ixgbe_set_mac_anti_spoofing, .set_vlan_anti_spoofing = &ixgbe_set_vlan_anti_spoofing, .acquire_swfw_sync = &ixgbe_acquire_swfw_sync, .release_swfw_sync = &ixgbe_release_swfw_sync, .init_swfw_sync = NULL, .get_thermal_sensor_data = &ixgbe_get_thermal_sensor_data_generic, .init_thermal_sensor_thresh = &ixgbe_init_thermal_sensor_thresh_generic, .prot_autoc_read = &prot_autoc_read_82599, .prot_autoc_write = &prot_autoc_write_82599, .enable_rx = &ixgbe_enable_rx_generic, .disable_rx = &ixgbe_disable_rx_generic, }; static const struct ixgbe_eeprom_operations eeprom_ops_82599 = { .init_params = &ixgbe_init_eeprom_params_generic, .read = &ixgbe_read_eeprom_82599, .read_buffer = &ixgbe_read_eeprom_buffer_82599, .write = &ixgbe_write_eeprom_generic, .write_buffer = &ixgbe_write_eeprom_buffer_bit_bang_generic, .calc_checksum = &ixgbe_calc_eeprom_checksum_generic, .validate_checksum = &ixgbe_validate_eeprom_checksum_generic, .update_checksum = &ixgbe_update_eeprom_checksum_generic, }; static const struct ixgbe_phy_operations phy_ops_82599 = { .identify = &ixgbe_identify_phy_82599, .identify_sfp = &ixgbe_identify_module_generic, .init = &ixgbe_init_phy_ops_82599, .reset = &ixgbe_reset_phy_generic, .read_reg = &ixgbe_read_phy_reg_generic, .write_reg = &ixgbe_write_phy_reg_generic, .setup_link = &ixgbe_setup_phy_link_generic, .setup_link_speed = &ixgbe_setup_phy_link_speed_generic, .read_i2c_byte = &ixgbe_read_i2c_byte_generic, .write_i2c_byte = &ixgbe_write_i2c_byte_generic, .read_i2c_sff8472 = &ixgbe_read_i2c_sff8472_generic, .read_i2c_eeprom = &ixgbe_read_i2c_eeprom_generic, .write_i2c_eeprom = &ixgbe_write_i2c_eeprom_generic, .check_overtemp = &ixgbe_tn_check_overtemp, }; const struct ixgbe_info ixgbe_82599_info = { .mac = ixgbe_mac_82599EB, .get_invariants = &ixgbe_get_invariants_82599, .mac_ops = &mac_ops_82599, .eeprom_ops = &eeprom_ops_82599, .phy_ops = &phy_ops_82599, .mbx_ops = &mbx_ops_generic, .mvals = ixgbe_mvals_8259X, };
linux-master
drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c
// SPDX-License-Identifier: GPL-2.0 /* Copyright(c) 1999 - 2018 Intel Corporation. */ #include "ixgbe.h" #include "ixgbe_type.h" #include "ixgbe_dcb.h" #include "ixgbe_dcb_82599.h" /** * ixgbe_dcb_config_rx_arbiter_82599 - Config Rx Data arbiter * @hw: pointer to hardware structure * @refill: refill credits index by traffic class * @max: max credits index by traffic class * @bwg_id: bandwidth grouping indexed by traffic class * @prio_type: priority type indexed by traffic class * @prio_tc: priority to tc assignments indexed by priority * * Configure Rx Packet Arbiter and credits for each traffic class. */ s32 ixgbe_dcb_config_rx_arbiter_82599(struct ixgbe_hw *hw, u16 *refill, u16 *max, u8 *bwg_id, u8 *prio_type, u8 *prio_tc) { u32 reg = 0; u32 credit_refill = 0; u32 credit_max = 0; u8 i = 0; /* * Disable the arbiter before changing parameters * (always enable recycle mode; WSP) */ reg = IXGBE_RTRPCS_RRM | IXGBE_RTRPCS_RAC | IXGBE_RTRPCS_ARBDIS; IXGBE_WRITE_REG(hw, IXGBE_RTRPCS, reg); /* Map all traffic classes to their UP */ reg = 0; for (i = 0; i < MAX_USER_PRIORITY; i++) reg |= (prio_tc[i] << (i * IXGBE_RTRUP2TC_UP_SHIFT)); IXGBE_WRITE_REG(hw, IXGBE_RTRUP2TC, reg); /* Configure traffic class credits and priority */ for (i = 0; i < MAX_TRAFFIC_CLASS; i++) { credit_refill = refill[i]; credit_max = max[i]; reg = credit_refill | (credit_max << IXGBE_RTRPT4C_MCL_SHIFT); reg |= (u32)(bwg_id[i]) << IXGBE_RTRPT4C_BWG_SHIFT; if (prio_type[i] == prio_link) reg |= IXGBE_RTRPT4C_LSP; IXGBE_WRITE_REG(hw, IXGBE_RTRPT4C(i), reg); } /* * Configure Rx packet plane (recycle mode; WSP) and * enable arbiter */ reg = IXGBE_RTRPCS_RRM | IXGBE_RTRPCS_RAC; IXGBE_WRITE_REG(hw, IXGBE_RTRPCS, reg); return 0; } /** * ixgbe_dcb_config_tx_desc_arbiter_82599 - Config Tx Desc. arbiter * @hw: pointer to hardware structure * @refill: refill credits index by traffic class * @max: max credits index by traffic class * @bwg_id: bandwidth grouping indexed by traffic class * @prio_type: priority type indexed by traffic class * * Configure Tx Descriptor Arbiter and credits for each traffic class. */ s32 ixgbe_dcb_config_tx_desc_arbiter_82599(struct ixgbe_hw *hw, u16 *refill, u16 *max, u8 *bwg_id, u8 *prio_type) { u32 reg, max_credits; u8 i; /* Clear the per-Tx queue credits; we use per-TC instead */ for (i = 0; i < 128; i++) { IXGBE_WRITE_REG(hw, IXGBE_RTTDQSEL, i); IXGBE_WRITE_REG(hw, IXGBE_RTTDT1C, 0); } /* Configure traffic class credits and priority */ for (i = 0; i < MAX_TRAFFIC_CLASS; i++) { max_credits = max[i]; reg = max_credits << IXGBE_RTTDT2C_MCL_SHIFT; reg |= refill[i]; reg |= (u32)(bwg_id[i]) << IXGBE_RTTDT2C_BWG_SHIFT; if (prio_type[i] == prio_group) reg |= IXGBE_RTTDT2C_GSP; if (prio_type[i] == prio_link) reg |= IXGBE_RTTDT2C_LSP; IXGBE_WRITE_REG(hw, IXGBE_RTTDT2C(i), reg); } /* * Configure Tx descriptor plane (recycle mode; WSP) and * enable arbiter */ reg = IXGBE_RTTDCS_TDPAC | IXGBE_RTTDCS_TDRM; IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, reg); return 0; } /** * ixgbe_dcb_config_tx_data_arbiter_82599 - Config Tx Data arbiter * @hw: pointer to hardware structure * @refill: refill credits index by traffic class * @max: max credits index by traffic class * @bwg_id: bandwidth grouping indexed by traffic class * @prio_type: priority type indexed by traffic class * @prio_tc: priority to tc assignments indexed by priority * * Configure Tx Packet Arbiter and credits for each traffic class. */ s32 ixgbe_dcb_config_tx_data_arbiter_82599(struct ixgbe_hw *hw, u16 *refill, u16 *max, u8 *bwg_id, u8 *prio_type, u8 *prio_tc) { u32 reg; u8 i; /* * Disable the arbiter before changing parameters * (always enable recycle mode; SP; arb delay) */ reg = IXGBE_RTTPCS_TPPAC | IXGBE_RTTPCS_TPRM | (IXGBE_RTTPCS_ARBD_DCB << IXGBE_RTTPCS_ARBD_SHIFT) | IXGBE_RTTPCS_ARBDIS; IXGBE_WRITE_REG(hw, IXGBE_RTTPCS, reg); /* Map all traffic classes to their UP */ reg = 0; for (i = 0; i < MAX_USER_PRIORITY; i++) reg |= (prio_tc[i] << (i * IXGBE_RTTUP2TC_UP_SHIFT)); IXGBE_WRITE_REG(hw, IXGBE_RTTUP2TC, reg); /* Configure traffic class credits and priority */ for (i = 0; i < MAX_TRAFFIC_CLASS; i++) { reg = refill[i]; reg |= (u32)(max[i]) << IXGBE_RTTPT2C_MCL_SHIFT; reg |= (u32)(bwg_id[i]) << IXGBE_RTTPT2C_BWG_SHIFT; if (prio_type[i] == prio_group) reg |= IXGBE_RTTPT2C_GSP; if (prio_type[i] == prio_link) reg |= IXGBE_RTTPT2C_LSP; IXGBE_WRITE_REG(hw, IXGBE_RTTPT2C(i), reg); } /* * Configure Tx packet plane (recycle mode; SP; arb delay) and * enable arbiter */ reg = IXGBE_RTTPCS_TPPAC | IXGBE_RTTPCS_TPRM | (IXGBE_RTTPCS_ARBD_DCB << IXGBE_RTTPCS_ARBD_SHIFT); IXGBE_WRITE_REG(hw, IXGBE_RTTPCS, reg); return 0; } /** * ixgbe_dcb_config_pfc_82599 - Configure priority flow control * @hw: pointer to hardware structure * @pfc_en: enabled pfc bitmask * @prio_tc: priority to tc assignments indexed by priority * * Configure Priority Flow Control (PFC) for each traffic class. */ s32 ixgbe_dcb_config_pfc_82599(struct ixgbe_hw *hw, u8 pfc_en, u8 *prio_tc) { u32 i, j, fcrtl, reg; u8 max_tc = 0; /* Enable Transmit Priority Flow Control */ IXGBE_WRITE_REG(hw, IXGBE_FCCFG, IXGBE_FCCFG_TFCE_PRIORITY); /* Enable Receive Priority Flow Control */ reg = IXGBE_READ_REG(hw, IXGBE_MFLCN); reg |= IXGBE_MFLCN_DPF; /* * X540 & X550 supports per TC Rx priority flow control. * So clear all TCs and only enable those that should be * enabled. */ reg &= ~(IXGBE_MFLCN_RPFCE_MASK | IXGBE_MFLCN_RFCE); if (hw->mac.type >= ixgbe_mac_X540) reg |= pfc_en << IXGBE_MFLCN_RPFCE_SHIFT; if (pfc_en) reg |= IXGBE_MFLCN_RPFCE; IXGBE_WRITE_REG(hw, IXGBE_MFLCN, reg); for (i = 0; i < MAX_USER_PRIORITY; i++) { if (prio_tc[i] > max_tc) max_tc = prio_tc[i]; } /* Configure PFC Tx thresholds per TC */ for (i = 0; i <= max_tc; i++) { int enabled = 0; for (j = 0; j < MAX_USER_PRIORITY; j++) { if ((prio_tc[j] == i) && (pfc_en & BIT(j))) { enabled = 1; break; } } if (enabled) { reg = (hw->fc.high_water[i] << 10) | IXGBE_FCRTH_FCEN; fcrtl = (hw->fc.low_water[i] << 10) | IXGBE_FCRTL_XONE; IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(i), fcrtl); } else { /* In order to prevent Tx hangs when the internal Tx * switch is enabled we must set the high water mark * to the Rx packet buffer size - 24KB. This allows * the Tx switch to function even under heavy Rx * workloads. */ reg = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(i)) - 24576; IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(i), 0); } IXGBE_WRITE_REG(hw, IXGBE_FCRTH_82599(i), reg); } for (; i < MAX_TRAFFIC_CLASS; i++) { IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(i), 0); IXGBE_WRITE_REG(hw, IXGBE_FCRTH_82599(i), 0); } /* Configure pause time (2 TCs per register) */ reg = hw->fc.pause_time * 0x00010001; for (i = 0; i < (MAX_TRAFFIC_CLASS / 2); i++) IXGBE_WRITE_REG(hw, IXGBE_FCTTV(i), reg); /* Configure flow control refresh threshold value */ IXGBE_WRITE_REG(hw, IXGBE_FCRTV, hw->fc.pause_time / 2); return 0; } /** * ixgbe_dcb_config_tc_stats_82599 - Config traffic class statistics * @hw: pointer to hardware structure * * Configure queue statistics registers, all queues belonging to same traffic * class uses a single set of queue statistics counters. */ static s32 ixgbe_dcb_config_tc_stats_82599(struct ixgbe_hw *hw) { u32 reg = 0; u8 i = 0; /* * Receive Queues stats setting * 32 RQSMR registers, each configuring 4 queues. * Set all 16 queues of each TC to the same stat * with TC 'n' going to stat 'n'. */ for (i = 0; i < 32; i++) { reg = 0x01010101 * (i / 4); IXGBE_WRITE_REG(hw, IXGBE_RQSMR(i), reg); } /* * Transmit Queues stats setting * 32 TQSM registers, each controlling 4 queues. * Set all queues of each TC to the same stat * with TC 'n' going to stat 'n'. * Tx queues are allocated non-uniformly to TCs: * 32, 32, 16, 16, 8, 8, 8, 8. */ for (i = 0; i < 32; i++) { if (i < 8) reg = 0x00000000; else if (i < 16) reg = 0x01010101; else if (i < 20) reg = 0x02020202; else if (i < 24) reg = 0x03030303; else if (i < 26) reg = 0x04040404; else if (i < 28) reg = 0x05050505; else if (i < 30) reg = 0x06060606; else reg = 0x07070707; IXGBE_WRITE_REG(hw, IXGBE_TQSM(i), reg); } return 0; } /** * ixgbe_dcb_hw_config_82599 - Configure and enable DCB * @hw: pointer to hardware structure * @pfc_en: enabled pfc bitmask * @refill: refill credits index by traffic class * @max: max credits index by traffic class * @bwg_id: bandwidth grouping indexed by traffic class * @prio_type: priority type indexed by traffic class * @prio_tc: priority to tc assignments indexed by priority * * Configure dcb settings and enable dcb mode. */ s32 ixgbe_dcb_hw_config_82599(struct ixgbe_hw *hw, u8 pfc_en, u16 *refill, u16 *max, u8 *bwg_id, u8 *prio_type, u8 *prio_tc) { ixgbe_dcb_config_rx_arbiter_82599(hw, refill, max, bwg_id, prio_type, prio_tc); ixgbe_dcb_config_tx_desc_arbiter_82599(hw, refill, max, bwg_id, prio_type); ixgbe_dcb_config_tx_data_arbiter_82599(hw, refill, max, bwg_id, prio_type, prio_tc); ixgbe_dcb_config_pfc_82599(hw, pfc_en, prio_tc); ixgbe_dcb_config_tc_stats_82599(hw); return 0; }
linux-master
drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.c
// SPDX-License-Identifier: GPL-2.0 /* Copyright(c) 1999 - 2018 Intel Corporation. */ #include "ixgbe.h" #include <linux/dcbnl.h> #include "ixgbe_dcb_82598.h" #include "ixgbe_dcb_82599.h" #include "ixgbe_sriov.h" /* Callbacks for DCB netlink in the kernel */ #define BIT_PFC 0x02 #define BIT_PG_RX 0x04 #define BIT_PG_TX 0x08 #define BIT_APP_UPCHG 0x10 /* Responses for the DCB_C_SET_ALL command */ #define DCB_HW_CHG_RST 0 /* DCB configuration changed with reset */ #define DCB_NO_HW_CHG 1 /* DCB configuration did not change */ #define DCB_HW_CHG 2 /* DCB configuration changed, no reset */ static int ixgbe_copy_dcb_cfg(struct ixgbe_adapter *adapter, int tc_max) { struct ixgbe_dcb_config *scfg = &adapter->temp_dcb_cfg; struct ixgbe_dcb_config *dcfg = &adapter->dcb_cfg; struct tc_configuration *src = NULL; struct tc_configuration *dst = NULL; int i, j; int tx = DCB_TX_CONFIG; int rx = DCB_RX_CONFIG; int changes = 0; #ifdef IXGBE_FCOE struct dcb_app app = { .selector = DCB_APP_IDTYPE_ETHTYPE, .protocol = ETH_P_FCOE, }; u8 up = dcb_getapp(adapter->netdev, &app); if (up && !(up & BIT(adapter->fcoe.up))) changes |= BIT_APP_UPCHG; #endif for (i = DCB_PG_ATTR_TC_0; i < tc_max + DCB_PG_ATTR_TC_0; i++) { src = &scfg->tc_config[i - DCB_PG_ATTR_TC_0]; dst = &dcfg->tc_config[i - DCB_PG_ATTR_TC_0]; if (dst->path[tx].prio_type != src->path[tx].prio_type) { dst->path[tx].prio_type = src->path[tx].prio_type; changes |= BIT_PG_TX; } if (dst->path[tx].bwg_id != src->path[tx].bwg_id) { dst->path[tx].bwg_id = src->path[tx].bwg_id; changes |= BIT_PG_TX; } if (dst->path[tx].bwg_percent != src->path[tx].bwg_percent) { dst->path[tx].bwg_percent = src->path[tx].bwg_percent; changes |= BIT_PG_TX; } if (dst->path[tx].up_to_tc_bitmap != src->path[tx].up_to_tc_bitmap) { dst->path[tx].up_to_tc_bitmap = src->path[tx].up_to_tc_bitmap; changes |= (BIT_PG_TX | BIT_PFC | BIT_APP_UPCHG); } if (dst->path[rx].prio_type != src->path[rx].prio_type) { dst->path[rx].prio_type = src->path[rx].prio_type; changes |= BIT_PG_RX; } if (dst->path[rx].bwg_id != src->path[rx].bwg_id) { dst->path[rx].bwg_id = src->path[rx].bwg_id; changes |= BIT_PG_RX; } if (dst->path[rx].bwg_percent != src->path[rx].bwg_percent) { dst->path[rx].bwg_percent = src->path[rx].bwg_percent; changes |= BIT_PG_RX; } if (dst->path[rx].up_to_tc_bitmap != src->path[rx].up_to_tc_bitmap) { dst->path[rx].up_to_tc_bitmap = src->path[rx].up_to_tc_bitmap; changes |= (BIT_PG_RX | BIT_PFC | BIT_APP_UPCHG); } } for (i = DCB_PG_ATTR_BW_ID_0; i < DCB_PG_ATTR_BW_ID_MAX; i++) { j = i - DCB_PG_ATTR_BW_ID_0; if (dcfg->bw_percentage[tx][j] != scfg->bw_percentage[tx][j]) { dcfg->bw_percentage[tx][j] = scfg->bw_percentage[tx][j]; changes |= BIT_PG_TX; } if (dcfg->bw_percentage[rx][j] != scfg->bw_percentage[rx][j]) { dcfg->bw_percentage[rx][j] = scfg->bw_percentage[rx][j]; changes |= BIT_PG_RX; } } for (i = DCB_PFC_UP_ATTR_0; i < DCB_PFC_UP_ATTR_MAX; i++) { j = i - DCB_PFC_UP_ATTR_0; if (dcfg->tc_config[j].dcb_pfc != scfg->tc_config[j].dcb_pfc) { dcfg->tc_config[j].dcb_pfc = scfg->tc_config[j].dcb_pfc; changes |= BIT_PFC; } } if (dcfg->pfc_mode_enable != scfg->pfc_mode_enable) { dcfg->pfc_mode_enable = scfg->pfc_mode_enable; changes |= BIT_PFC; } return changes; } static u8 ixgbe_dcbnl_get_state(struct net_device *netdev) { struct ixgbe_adapter *adapter = netdev_priv(netdev); return !!(adapter->flags & IXGBE_FLAG_DCB_ENABLED); } static u8 ixgbe_dcbnl_set_state(struct net_device *netdev, u8 state) { struct ixgbe_adapter *adapter = netdev_priv(netdev); /* Fail command if not in CEE mode */ if (!(adapter->dcbx_cap & DCB_CAP_DCBX_VER_CEE)) return 1; /* verify there is something to do, if not then exit */ if (!state == !(adapter->flags & IXGBE_FLAG_DCB_ENABLED)) return 0; return !!ixgbe_setup_tc(netdev, state ? adapter->dcb_cfg.num_tcs.pg_tcs : 0); } static void ixgbe_dcbnl_get_perm_hw_addr(struct net_device *netdev, u8 *perm_addr) { struct ixgbe_adapter *adapter = netdev_priv(netdev); int i, j; memset(perm_addr, 0xff, MAX_ADDR_LEN); for (i = 0; i < netdev->addr_len; i++) perm_addr[i] = adapter->hw.mac.perm_addr[i]; switch (adapter->hw.mac.type) { case ixgbe_mac_82599EB: case ixgbe_mac_X540: case ixgbe_mac_X550: for (j = 0; j < netdev->addr_len; j++, i++) perm_addr[i] = adapter->hw.mac.san_addr[j]; break; default: break; } } static void ixgbe_dcbnl_set_pg_tc_cfg_tx(struct net_device *netdev, int tc, u8 prio, u8 bwg_id, u8 bw_pct, u8 up_map) { struct ixgbe_adapter *adapter = netdev_priv(netdev); if (prio != DCB_ATTR_VALUE_UNDEFINED) adapter->temp_dcb_cfg.tc_config[tc].path[0].prio_type = prio; if (bwg_id != DCB_ATTR_VALUE_UNDEFINED) adapter->temp_dcb_cfg.tc_config[tc].path[0].bwg_id = bwg_id; if (bw_pct != DCB_ATTR_VALUE_UNDEFINED) adapter->temp_dcb_cfg.tc_config[tc].path[0].bwg_percent = bw_pct; if (up_map != DCB_ATTR_VALUE_UNDEFINED) adapter->temp_dcb_cfg.tc_config[tc].path[0].up_to_tc_bitmap = up_map; } static void ixgbe_dcbnl_set_pg_bwg_cfg_tx(struct net_device *netdev, int bwg_id, u8 bw_pct) { struct ixgbe_adapter *adapter = netdev_priv(netdev); adapter->temp_dcb_cfg.bw_percentage[0][bwg_id] = bw_pct; } static void ixgbe_dcbnl_set_pg_tc_cfg_rx(struct net_device *netdev, int tc, u8 prio, u8 bwg_id, u8 bw_pct, u8 up_map) { struct ixgbe_adapter *adapter = netdev_priv(netdev); if (prio != DCB_ATTR_VALUE_UNDEFINED) adapter->temp_dcb_cfg.tc_config[tc].path[1].prio_type = prio; if (bwg_id != DCB_ATTR_VALUE_UNDEFINED) adapter->temp_dcb_cfg.tc_config[tc].path[1].bwg_id = bwg_id; if (bw_pct != DCB_ATTR_VALUE_UNDEFINED) adapter->temp_dcb_cfg.tc_config[tc].path[1].bwg_percent = bw_pct; if (up_map != DCB_ATTR_VALUE_UNDEFINED) adapter->temp_dcb_cfg.tc_config[tc].path[1].up_to_tc_bitmap = up_map; } static void ixgbe_dcbnl_set_pg_bwg_cfg_rx(struct net_device *netdev, int bwg_id, u8 bw_pct) { struct ixgbe_adapter *adapter = netdev_priv(netdev); adapter->temp_dcb_cfg.bw_percentage[1][bwg_id] = bw_pct; } static void ixgbe_dcbnl_get_pg_tc_cfg_tx(struct net_device *netdev, int tc, u8 *prio, u8 *bwg_id, u8 *bw_pct, u8 *up_map) { struct ixgbe_adapter *adapter = netdev_priv(netdev); *prio = adapter->dcb_cfg.tc_config[tc].path[0].prio_type; *bwg_id = adapter->dcb_cfg.tc_config[tc].path[0].bwg_id; *bw_pct = adapter->dcb_cfg.tc_config[tc].path[0].bwg_percent; *up_map = adapter->dcb_cfg.tc_config[tc].path[0].up_to_tc_bitmap; } static void ixgbe_dcbnl_get_pg_bwg_cfg_tx(struct net_device *netdev, int bwg_id, u8 *bw_pct) { struct ixgbe_adapter *adapter = netdev_priv(netdev); *bw_pct = adapter->dcb_cfg.bw_percentage[0][bwg_id]; } static void ixgbe_dcbnl_get_pg_tc_cfg_rx(struct net_device *netdev, int tc, u8 *prio, u8 *bwg_id, u8 *bw_pct, u8 *up_map) { struct ixgbe_adapter *adapter = netdev_priv(netdev); *prio = adapter->dcb_cfg.tc_config[tc].path[1].prio_type; *bwg_id = adapter->dcb_cfg.tc_config[tc].path[1].bwg_id; *bw_pct = adapter->dcb_cfg.tc_config[tc].path[1].bwg_percent; *up_map = adapter->dcb_cfg.tc_config[tc].path[1].up_to_tc_bitmap; } static void ixgbe_dcbnl_get_pg_bwg_cfg_rx(struct net_device *netdev, int bwg_id, u8 *bw_pct) { struct ixgbe_adapter *adapter = netdev_priv(netdev); *bw_pct = adapter->dcb_cfg.bw_percentage[1][bwg_id]; } static void ixgbe_dcbnl_set_pfc_cfg(struct net_device *netdev, int priority, u8 setting) { struct ixgbe_adapter *adapter = netdev_priv(netdev); adapter->temp_dcb_cfg.tc_config[priority].dcb_pfc = setting; if (adapter->temp_dcb_cfg.tc_config[priority].dcb_pfc != adapter->dcb_cfg.tc_config[priority].dcb_pfc) adapter->temp_dcb_cfg.pfc_mode_enable = true; } static void ixgbe_dcbnl_get_pfc_cfg(struct net_device *netdev, int priority, u8 *setting) { struct ixgbe_adapter *adapter = netdev_priv(netdev); *setting = adapter->dcb_cfg.tc_config[priority].dcb_pfc; } static void ixgbe_dcbnl_devreset(struct net_device *dev) { struct ixgbe_adapter *adapter = netdev_priv(dev); while (test_and_set_bit(__IXGBE_RESETTING, &adapter->state)) usleep_range(1000, 2000); if (netif_running(dev)) dev->netdev_ops->ndo_stop(dev); ixgbe_clear_interrupt_scheme(adapter); ixgbe_init_interrupt_scheme(adapter); if (netif_running(dev)) dev->netdev_ops->ndo_open(dev); clear_bit(__IXGBE_RESETTING, &adapter->state); } static u8 ixgbe_dcbnl_set_all(struct net_device *netdev) { struct ixgbe_adapter *adapter = netdev_priv(netdev); struct ixgbe_dcb_config *dcb_cfg = &adapter->dcb_cfg; struct ixgbe_hw *hw = &adapter->hw; int ret = DCB_NO_HW_CHG; int i; /* Fail command if not in CEE mode */ if (!(adapter->dcbx_cap & DCB_CAP_DCBX_VER_CEE)) return DCB_NO_HW_CHG; adapter->dcb_set_bitmap |= ixgbe_copy_dcb_cfg(adapter, MAX_TRAFFIC_CLASS); if (!adapter->dcb_set_bitmap) return DCB_NO_HW_CHG; if (adapter->dcb_set_bitmap & (BIT_PG_TX|BIT_PG_RX)) { u16 refill[MAX_TRAFFIC_CLASS], max[MAX_TRAFFIC_CLASS]; u8 bwg_id[MAX_TRAFFIC_CLASS], prio_type[MAX_TRAFFIC_CLASS]; /* Priority to TC mapping in CEE case default to 1:1 */ u8 prio_tc[MAX_USER_PRIORITY]; int max_frame = adapter->netdev->mtu + ETH_HLEN + ETH_FCS_LEN; #ifdef IXGBE_FCOE if (adapter->netdev->features & NETIF_F_FCOE_MTU) max_frame = max(max_frame, IXGBE_FCOE_JUMBO_FRAME_SIZE); #endif ixgbe_dcb_calculate_tc_credits(hw, dcb_cfg, max_frame, DCB_TX_CONFIG); ixgbe_dcb_calculate_tc_credits(hw, dcb_cfg, max_frame, DCB_RX_CONFIG); ixgbe_dcb_unpack_refill(dcb_cfg, DCB_TX_CONFIG, refill); ixgbe_dcb_unpack_max(dcb_cfg, max); ixgbe_dcb_unpack_bwgid(dcb_cfg, DCB_TX_CONFIG, bwg_id); ixgbe_dcb_unpack_prio(dcb_cfg, DCB_TX_CONFIG, prio_type); ixgbe_dcb_unpack_map(dcb_cfg, DCB_TX_CONFIG, prio_tc); ixgbe_dcb_hw_ets_config(hw, refill, max, bwg_id, prio_type, prio_tc); for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) netdev_set_prio_tc_map(netdev, i, prio_tc[i]); ret = DCB_HW_CHG_RST; } if (adapter->dcb_set_bitmap & BIT_PFC) { if (dcb_cfg->pfc_mode_enable) { u8 pfc_en; u8 prio_tc[MAX_USER_PRIORITY]; ixgbe_dcb_unpack_map(dcb_cfg, DCB_TX_CONFIG, prio_tc); ixgbe_dcb_unpack_pfc(dcb_cfg, &pfc_en); ixgbe_dcb_hw_pfc_config(hw, pfc_en, prio_tc); } else { hw->mac.ops.fc_enable(hw); } ixgbe_set_rx_drop_en(adapter); ret = DCB_HW_CHG; } #ifdef IXGBE_FCOE /* Reprogram FCoE hardware offloads when the traffic class * FCoE is using changes. This happens if the APP info * changes or the up2tc mapping is updated. */ if (adapter->dcb_set_bitmap & BIT_APP_UPCHG) { struct dcb_app app = { .selector = DCB_APP_IDTYPE_ETHTYPE, .protocol = ETH_P_FCOE, }; u8 up = dcb_getapp(netdev, &app); adapter->fcoe.up = ffs(up) - 1; ixgbe_dcbnl_devreset(netdev); ret = DCB_HW_CHG_RST; } #endif adapter->dcb_set_bitmap = 0x00; return ret; } static u8 ixgbe_dcbnl_getcap(struct net_device *netdev, int capid, u8 *cap) { struct ixgbe_adapter *adapter = netdev_priv(netdev); switch (capid) { case DCB_CAP_ATTR_PG: *cap = true; break; case DCB_CAP_ATTR_PFC: *cap = true; break; case DCB_CAP_ATTR_UP2TC: *cap = false; break; case DCB_CAP_ATTR_PG_TCS: *cap = 0x80; break; case DCB_CAP_ATTR_PFC_TCS: *cap = 0x80; break; case DCB_CAP_ATTR_GSP: *cap = true; break; case DCB_CAP_ATTR_BCN: *cap = false; break; case DCB_CAP_ATTR_DCBX: *cap = adapter->dcbx_cap; break; default: *cap = false; break; } return 0; } static int ixgbe_dcbnl_getnumtcs(struct net_device *netdev, int tcid, u8 *num) { struct ixgbe_adapter *adapter = netdev_priv(netdev); if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) { switch (tcid) { case DCB_NUMTCS_ATTR_PG: *num = adapter->dcb_cfg.num_tcs.pg_tcs; break; case DCB_NUMTCS_ATTR_PFC: *num = adapter->dcb_cfg.num_tcs.pfc_tcs; break; default: return -EINVAL; } } else { return -EINVAL; } return 0; } static int ixgbe_dcbnl_setnumtcs(struct net_device *netdev, int tcid, u8 num) { return -EINVAL; } static u8 ixgbe_dcbnl_getpfcstate(struct net_device *netdev) { struct ixgbe_adapter *adapter = netdev_priv(netdev); return adapter->dcb_cfg.pfc_mode_enable; } static void ixgbe_dcbnl_setpfcstate(struct net_device *netdev, u8 state) { struct ixgbe_adapter *adapter = netdev_priv(netdev); adapter->temp_dcb_cfg.pfc_mode_enable = state; } /** * ixgbe_dcbnl_getapp - retrieve the DCBX application user priority * @netdev : the corresponding netdev * @idtype : identifies the id as ether type or TCP/UDP port number * @id: id is either ether type or TCP/UDP port number * * Returns : on success, returns a non-zero 802.1p user priority bitmap * otherwise returns -EINVAL as the invalid user priority bitmap to indicate an * error. */ static int ixgbe_dcbnl_getapp(struct net_device *netdev, u8 idtype, u16 id) { struct ixgbe_adapter *adapter = netdev_priv(netdev); struct dcb_app app = { .selector = idtype, .protocol = id, }; if (!(adapter->dcbx_cap & DCB_CAP_DCBX_VER_CEE)) return -EINVAL; return dcb_getapp(netdev, &app); } static int ixgbe_dcbnl_ieee_getets(struct net_device *dev, struct ieee_ets *ets) { struct ixgbe_adapter *adapter = netdev_priv(dev); struct ieee_ets *my_ets = adapter->ixgbe_ieee_ets; ets->ets_cap = adapter->dcb_cfg.num_tcs.pg_tcs; /* No IEEE PFC settings available */ if (!my_ets) return 0; ets->cbs = my_ets->cbs; memcpy(ets->tc_tx_bw, my_ets->tc_tx_bw, sizeof(ets->tc_tx_bw)); memcpy(ets->tc_rx_bw, my_ets->tc_rx_bw, sizeof(ets->tc_rx_bw)); memcpy(ets->tc_tsa, my_ets->tc_tsa, sizeof(ets->tc_tsa)); memcpy(ets->prio_tc, my_ets->prio_tc, sizeof(ets->prio_tc)); return 0; } static int ixgbe_dcbnl_ieee_setets(struct net_device *dev, struct ieee_ets *ets) { struct ixgbe_adapter *adapter = netdev_priv(dev); int max_frame = dev->mtu + ETH_HLEN + ETH_FCS_LEN; int i, err; __u8 max_tc = 0; __u8 map_chg = 0; if (!(adapter->dcbx_cap & DCB_CAP_DCBX_VER_IEEE)) return -EINVAL; if (!adapter->ixgbe_ieee_ets) { adapter->ixgbe_ieee_ets = kmalloc(sizeof(struct ieee_ets), GFP_KERNEL); if (!adapter->ixgbe_ieee_ets) return -ENOMEM; /* initialize UP2TC mappings to invalid value */ for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) adapter->ixgbe_ieee_ets->prio_tc[i] = IEEE_8021QAZ_MAX_TCS; /* if possible update UP2TC mappings from HW */ ixgbe_dcb_read_rtrup2tc(&adapter->hw, adapter->ixgbe_ieee_ets->prio_tc); } for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { if (ets->prio_tc[i] > max_tc) max_tc = ets->prio_tc[i]; if (ets->prio_tc[i] != adapter->ixgbe_ieee_ets->prio_tc[i]) map_chg = 1; } memcpy(adapter->ixgbe_ieee_ets, ets, sizeof(*adapter->ixgbe_ieee_ets)); if (max_tc) max_tc++; if (max_tc > adapter->dcb_cfg.num_tcs.pg_tcs) return -EINVAL; if (max_tc != adapter->hw_tcs) { err = ixgbe_setup_tc(dev, max_tc); if (err) return err; } else if (map_chg) { ixgbe_dcbnl_devreset(dev); } return ixgbe_dcb_hw_ets(&adapter->hw, ets, max_frame); } static int ixgbe_dcbnl_ieee_getpfc(struct net_device *dev, struct ieee_pfc *pfc) { struct ixgbe_adapter *adapter = netdev_priv(dev); struct ieee_pfc *my_pfc = adapter->ixgbe_ieee_pfc; int i; pfc->pfc_cap = adapter->dcb_cfg.num_tcs.pfc_tcs; /* No IEEE PFC settings available */ if (!my_pfc) return 0; pfc->pfc_en = my_pfc->pfc_en; pfc->mbc = my_pfc->mbc; pfc->delay = my_pfc->delay; for (i = 0; i < MAX_TRAFFIC_CLASS; i++) { pfc->requests[i] = adapter->stats.pxoffrxc[i]; pfc->indications[i] = adapter->stats.pxofftxc[i]; } return 0; } static int ixgbe_dcbnl_ieee_setpfc(struct net_device *dev, struct ieee_pfc *pfc) { struct ixgbe_adapter *adapter = netdev_priv(dev); struct ixgbe_hw *hw = &adapter->hw; u8 *prio_tc; int err; if (!(adapter->dcbx_cap & DCB_CAP_DCBX_VER_IEEE)) return -EINVAL; if (!adapter->ixgbe_ieee_pfc) { adapter->ixgbe_ieee_pfc = kmalloc(sizeof(struct ieee_pfc), GFP_KERNEL); if (!adapter->ixgbe_ieee_pfc) return -ENOMEM; } prio_tc = adapter->ixgbe_ieee_ets->prio_tc; memcpy(adapter->ixgbe_ieee_pfc, pfc, sizeof(*adapter->ixgbe_ieee_pfc)); /* Enable link flow control parameters if PFC is disabled */ if (pfc->pfc_en) err = ixgbe_dcb_hw_pfc_config(hw, pfc->pfc_en, prio_tc); else err = hw->mac.ops.fc_enable(hw); ixgbe_set_rx_drop_en(adapter); return err; } static int ixgbe_dcbnl_ieee_setapp(struct net_device *dev, struct dcb_app *app) { struct ixgbe_adapter *adapter = netdev_priv(dev); int err; if (!(adapter->dcbx_cap & DCB_CAP_DCBX_VER_IEEE)) return -EINVAL; err = dcb_ieee_setapp(dev, app); if (err) return err; #ifdef IXGBE_FCOE if (app->selector == IEEE_8021QAZ_APP_SEL_ETHERTYPE && app->protocol == ETH_P_FCOE) { u8 app_mask = dcb_ieee_getapp_mask(dev, app); if (app_mask & BIT(adapter->fcoe.up)) return 0; adapter->fcoe.up = app->priority; ixgbe_dcbnl_devreset(dev); } #endif /* VF devices should use default UP when available */ if (app->selector == IEEE_8021QAZ_APP_SEL_ETHERTYPE && app->protocol == 0) { int vf; adapter->default_up = app->priority; for (vf = 0; vf < adapter->num_vfs; vf++) { struct vf_data_storage *vfinfo = &adapter->vfinfo[vf]; if (!vfinfo->pf_qos) ixgbe_set_vmvir(adapter, vfinfo->pf_vlan, app->priority, vf); } } return 0; } static int ixgbe_dcbnl_ieee_delapp(struct net_device *dev, struct dcb_app *app) { struct ixgbe_adapter *adapter = netdev_priv(dev); int err; if (!(adapter->dcbx_cap & DCB_CAP_DCBX_VER_IEEE)) return -EINVAL; err = dcb_ieee_delapp(dev, app); #ifdef IXGBE_FCOE if (!err && app->selector == IEEE_8021QAZ_APP_SEL_ETHERTYPE && app->protocol == ETH_P_FCOE) { u8 app_mask = dcb_ieee_getapp_mask(dev, app); if (app_mask & BIT(adapter->fcoe.up)) return 0; adapter->fcoe.up = app_mask ? ffs(app_mask) - 1 : IXGBE_FCOE_DEFTC; ixgbe_dcbnl_devreset(dev); } #endif /* IF default priority is being removed clear VF default UP */ if (app->selector == IEEE_8021QAZ_APP_SEL_ETHERTYPE && app->protocol == 0 && adapter->default_up == app->priority) { int vf; long unsigned int app_mask = dcb_ieee_getapp_mask(dev, app); int qos = app_mask ? find_first_bit(&app_mask, 8) : 0; adapter->default_up = qos; for (vf = 0; vf < adapter->num_vfs; vf++) { struct vf_data_storage *vfinfo = &adapter->vfinfo[vf]; if (!vfinfo->pf_qos) ixgbe_set_vmvir(adapter, vfinfo->pf_vlan, qos, vf); } } return err; } static u8 ixgbe_dcbnl_getdcbx(struct net_device *dev) { struct ixgbe_adapter *adapter = netdev_priv(dev); return adapter->dcbx_cap; } static u8 ixgbe_dcbnl_setdcbx(struct net_device *dev, u8 mode) { struct ixgbe_adapter *adapter = netdev_priv(dev); struct ieee_ets ets = {0}; struct ieee_pfc pfc = {0}; int err = 0; /* no support for LLD_MANAGED modes or CEE+IEEE */ if ((mode & DCB_CAP_DCBX_LLD_MANAGED) || ((mode & DCB_CAP_DCBX_VER_IEEE) && (mode & DCB_CAP_DCBX_VER_CEE)) || !(mode & DCB_CAP_DCBX_HOST)) return 1; if (mode == adapter->dcbx_cap) return 0; adapter->dcbx_cap = mode; /* ETS and PFC defaults */ ets.ets_cap = 8; pfc.pfc_cap = 8; if (mode & DCB_CAP_DCBX_VER_IEEE) { ixgbe_dcbnl_ieee_setets(dev, &ets); ixgbe_dcbnl_ieee_setpfc(dev, &pfc); } else if (mode & DCB_CAP_DCBX_VER_CEE) { u8 mask = BIT_PFC | BIT_PG_TX | BIT_PG_RX | BIT_APP_UPCHG; adapter->dcb_set_bitmap |= mask; ixgbe_dcbnl_set_all(dev); } else { /* Drop into single TC mode strict priority as this * indicates CEE and IEEE versions are disabled */ ixgbe_dcbnl_ieee_setets(dev, &ets); ixgbe_dcbnl_ieee_setpfc(dev, &pfc); err = ixgbe_setup_tc(dev, 0); } return err ? 1 : 0; } const struct dcbnl_rtnl_ops ixgbe_dcbnl_ops = { .ieee_getets = ixgbe_dcbnl_ieee_getets, .ieee_setets = ixgbe_dcbnl_ieee_setets, .ieee_getpfc = ixgbe_dcbnl_ieee_getpfc, .ieee_setpfc = ixgbe_dcbnl_ieee_setpfc, .ieee_setapp = ixgbe_dcbnl_ieee_setapp, .ieee_delapp = ixgbe_dcbnl_ieee_delapp, .getstate = ixgbe_dcbnl_get_state, .setstate = ixgbe_dcbnl_set_state, .getpermhwaddr = ixgbe_dcbnl_get_perm_hw_addr, .setpgtccfgtx = ixgbe_dcbnl_set_pg_tc_cfg_tx, .setpgbwgcfgtx = ixgbe_dcbnl_set_pg_bwg_cfg_tx, .setpgtccfgrx = ixgbe_dcbnl_set_pg_tc_cfg_rx, .setpgbwgcfgrx = ixgbe_dcbnl_set_pg_bwg_cfg_rx, .getpgtccfgtx = ixgbe_dcbnl_get_pg_tc_cfg_tx, .getpgbwgcfgtx = ixgbe_dcbnl_get_pg_bwg_cfg_tx, .getpgtccfgrx = ixgbe_dcbnl_get_pg_tc_cfg_rx, .getpgbwgcfgrx = ixgbe_dcbnl_get_pg_bwg_cfg_rx, .setpfccfg = ixgbe_dcbnl_set_pfc_cfg, .getpfccfg = ixgbe_dcbnl_get_pfc_cfg, .setall = ixgbe_dcbnl_set_all, .getcap = ixgbe_dcbnl_getcap, .getnumtcs = ixgbe_dcbnl_getnumtcs, .setnumtcs = ixgbe_dcbnl_setnumtcs, .getpfcstate = ixgbe_dcbnl_getpfcstate, .setpfcstate = ixgbe_dcbnl_setpfcstate, .getapp = ixgbe_dcbnl_getapp, .getdcbx = ixgbe_dcbnl_getdcbx, .setdcbx = ixgbe_dcbnl_setdcbx, };
linux-master
drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c
// SPDX-License-Identifier: GPL-2.0 /* Copyright(c) 1999 - 2018 Intel Corporation. */ #include <linux/pci.h> #include <linux/delay.h> #include "ixgbe.h" #include "ixgbe_mbx.h" /** * ixgbe_read_mbx - Reads a message from the mailbox * @hw: pointer to the HW structure * @msg: The message buffer * @size: Length of buffer * @mbx_id: id of mailbox to read * * returns SUCCESS if it successfully read message from buffer **/ s32 ixgbe_read_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size, u16 mbx_id) { struct ixgbe_mbx_info *mbx = &hw->mbx; /* limit read to size of mailbox */ if (size > mbx->size) size = mbx->size; if (!mbx->ops) return IXGBE_ERR_MBX; return mbx->ops->read(hw, msg, size, mbx_id); } /** * ixgbe_write_mbx - Write a message to the mailbox * @hw: pointer to the HW structure * @msg: The message buffer * @size: Length of buffer * @mbx_id: id of mailbox to write * * returns SUCCESS if it successfully copied message into the buffer **/ s32 ixgbe_write_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size, u16 mbx_id) { struct ixgbe_mbx_info *mbx = &hw->mbx; if (size > mbx->size) return IXGBE_ERR_MBX; if (!mbx->ops) return IXGBE_ERR_MBX; return mbx->ops->write(hw, msg, size, mbx_id); } /** * ixgbe_check_for_msg - checks to see if someone sent us mail * @hw: pointer to the HW structure * @mbx_id: id of mailbox to check * * returns SUCCESS if the Status bit was found or else ERR_MBX **/ s32 ixgbe_check_for_msg(struct ixgbe_hw *hw, u16 mbx_id) { struct ixgbe_mbx_info *mbx = &hw->mbx; if (!mbx->ops) return IXGBE_ERR_MBX; return mbx->ops->check_for_msg(hw, mbx_id); } /** * ixgbe_check_for_ack - checks to see if someone sent us ACK * @hw: pointer to the HW structure * @mbx_id: id of mailbox to check * * returns SUCCESS if the Status bit was found or else ERR_MBX **/ s32 ixgbe_check_for_ack(struct ixgbe_hw *hw, u16 mbx_id) { struct ixgbe_mbx_info *mbx = &hw->mbx; if (!mbx->ops) return IXGBE_ERR_MBX; return mbx->ops->check_for_ack(hw, mbx_id); } /** * ixgbe_check_for_rst - checks to see if other side has reset * @hw: pointer to the HW structure * @mbx_id: id of mailbox to check * * returns SUCCESS if the Status bit was found or else ERR_MBX **/ s32 ixgbe_check_for_rst(struct ixgbe_hw *hw, u16 mbx_id) { struct ixgbe_mbx_info *mbx = &hw->mbx; if (!mbx->ops) return IXGBE_ERR_MBX; return mbx->ops->check_for_rst(hw, mbx_id); } /** * ixgbe_poll_for_msg - Wait for message notification * @hw: pointer to the HW structure * @mbx_id: id of mailbox to write * * returns SUCCESS if it successfully received a message notification **/ static s32 ixgbe_poll_for_msg(struct ixgbe_hw *hw, u16 mbx_id) { struct ixgbe_mbx_info *mbx = &hw->mbx; int countdown = mbx->timeout; if (!countdown || !mbx->ops) return IXGBE_ERR_MBX; while (mbx->ops->check_for_msg(hw, mbx_id)) { countdown--; if (!countdown) return IXGBE_ERR_MBX; udelay(mbx->usec_delay); } return 0; } /** * ixgbe_poll_for_ack - Wait for message acknowledgement * @hw: pointer to the HW structure * @mbx_id: id of mailbox to write * * returns SUCCESS if it successfully received a message acknowledgement **/ static s32 ixgbe_poll_for_ack(struct ixgbe_hw *hw, u16 mbx_id) { struct ixgbe_mbx_info *mbx = &hw->mbx; int countdown = mbx->timeout; if (!countdown || !mbx->ops) return IXGBE_ERR_MBX; while (mbx->ops->check_for_ack(hw, mbx_id)) { countdown--; if (!countdown) return IXGBE_ERR_MBX; udelay(mbx->usec_delay); } return 0; } /** * ixgbe_read_posted_mbx - Wait for message notification and receive message * @hw: pointer to the HW structure * @msg: The message buffer * @size: Length of buffer * @mbx_id: id of mailbox to write * * returns SUCCESS if it successfully received a message notification and * copied it into the receive buffer. **/ static s32 ixgbe_read_posted_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size, u16 mbx_id) { struct ixgbe_mbx_info *mbx = &hw->mbx; s32 ret_val; if (!mbx->ops) return IXGBE_ERR_MBX; ret_val = ixgbe_poll_for_msg(hw, mbx_id); if (ret_val) return ret_val; /* if ack received read message */ return mbx->ops->read(hw, msg, size, mbx_id); } /** * ixgbe_write_posted_mbx - Write a message to the mailbox, wait for ack * @hw: pointer to the HW structure * @msg: The message buffer * @size: Length of buffer * @mbx_id: id of mailbox to write * * returns SUCCESS if it successfully copied message into the buffer and * received an ack to that message within delay * timeout period **/ static s32 ixgbe_write_posted_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size, u16 mbx_id) { struct ixgbe_mbx_info *mbx = &hw->mbx; s32 ret_val; /* exit if either we can't write or there isn't a defined timeout */ if (!mbx->ops || !mbx->timeout) return IXGBE_ERR_MBX; /* send msg */ ret_val = mbx->ops->write(hw, msg, size, mbx_id); if (ret_val) return ret_val; /* if msg sent wait until we receive an ack */ return ixgbe_poll_for_ack(hw, mbx_id); } static s32 ixgbe_check_for_bit_pf(struct ixgbe_hw *hw, u32 mask, s32 index) { u32 mbvficr = IXGBE_READ_REG(hw, IXGBE_MBVFICR(index)); if (mbvficr & mask) { IXGBE_WRITE_REG(hw, IXGBE_MBVFICR(index), mask); return 0; } return IXGBE_ERR_MBX; } /** * ixgbe_check_for_msg_pf - checks to see if the VF has sent mail * @hw: pointer to the HW structure * @vf_number: the VF index * * returns SUCCESS if the VF has set the Status bit or else ERR_MBX **/ static s32 ixgbe_check_for_msg_pf(struct ixgbe_hw *hw, u16 vf_number) { s32 index = IXGBE_MBVFICR_INDEX(vf_number); u32 vf_bit = vf_number % 16; if (!ixgbe_check_for_bit_pf(hw, IXGBE_MBVFICR_VFREQ_VF1 << vf_bit, index)) { hw->mbx.stats.reqs++; return 0; } return IXGBE_ERR_MBX; } /** * ixgbe_check_for_ack_pf - checks to see if the VF has ACKed * @hw: pointer to the HW structure * @vf_number: the VF index * * returns SUCCESS if the VF has set the Status bit or else ERR_MBX **/ static s32 ixgbe_check_for_ack_pf(struct ixgbe_hw *hw, u16 vf_number) { s32 index = IXGBE_MBVFICR_INDEX(vf_number); u32 vf_bit = vf_number % 16; if (!ixgbe_check_for_bit_pf(hw, IXGBE_MBVFICR_VFACK_VF1 << vf_bit, index)) { hw->mbx.stats.acks++; return 0; } return IXGBE_ERR_MBX; } /** * ixgbe_check_for_rst_pf - checks to see if the VF has reset * @hw: pointer to the HW structure * @vf_number: the VF index * * returns SUCCESS if the VF has set the Status bit or else ERR_MBX **/ static s32 ixgbe_check_for_rst_pf(struct ixgbe_hw *hw, u16 vf_number) { u32 reg_offset = (vf_number < 32) ? 0 : 1; u32 vf_shift = vf_number % 32; u32 vflre = 0; switch (hw->mac.type) { case ixgbe_mac_82599EB: vflre = IXGBE_READ_REG(hw, IXGBE_VFLRE(reg_offset)); break; case ixgbe_mac_X540: case ixgbe_mac_X550: case ixgbe_mac_X550EM_x: case ixgbe_mac_x550em_a: vflre = IXGBE_READ_REG(hw, IXGBE_VFLREC(reg_offset)); break; default: break; } if (vflre & BIT(vf_shift)) { IXGBE_WRITE_REG(hw, IXGBE_VFLREC(reg_offset), BIT(vf_shift)); hw->mbx.stats.rsts++; return 0; } return IXGBE_ERR_MBX; } /** * ixgbe_obtain_mbx_lock_pf - obtain mailbox lock * @hw: pointer to the HW structure * @vf_number: the VF index * * return SUCCESS if we obtained the mailbox lock **/ static s32 ixgbe_obtain_mbx_lock_pf(struct ixgbe_hw *hw, u16 vf_number) { u32 p2v_mailbox; /* Take ownership of the buffer */ IXGBE_WRITE_REG(hw, IXGBE_PFMAILBOX(vf_number), IXGBE_PFMAILBOX_PFU); /* reserve mailbox for vf use */ p2v_mailbox = IXGBE_READ_REG(hw, IXGBE_PFMAILBOX(vf_number)); if (p2v_mailbox & IXGBE_PFMAILBOX_PFU) return 0; return IXGBE_ERR_MBX; } /** * ixgbe_write_mbx_pf - Places a message in the mailbox * @hw: pointer to the HW structure * @msg: The message buffer * @size: Length of buffer * @vf_number: the VF index * * returns SUCCESS if it successfully copied message into the buffer **/ static s32 ixgbe_write_mbx_pf(struct ixgbe_hw *hw, u32 *msg, u16 size, u16 vf_number) { s32 ret_val; u16 i; /* lock the mailbox to prevent pf/vf race condition */ ret_val = ixgbe_obtain_mbx_lock_pf(hw, vf_number); if (ret_val) return ret_val; /* flush msg and acks as we are overwriting the message buffer */ ixgbe_check_for_msg_pf(hw, vf_number); ixgbe_check_for_ack_pf(hw, vf_number); /* copy the caller specified message to the mailbox memory buffer */ for (i = 0; i < size; i++) IXGBE_WRITE_REG_ARRAY(hw, IXGBE_PFMBMEM(vf_number), i, msg[i]); /* Interrupt VF to tell it a message has been sent and release buffer*/ IXGBE_WRITE_REG(hw, IXGBE_PFMAILBOX(vf_number), IXGBE_PFMAILBOX_STS); /* update stats */ hw->mbx.stats.msgs_tx++; return 0; } /** * ixgbe_read_mbx_pf - Read a message from the mailbox * @hw: pointer to the HW structure * @msg: The message buffer * @size: Length of buffer * @vf_number: the VF index * * This function copies a message from the mailbox buffer to the caller's * memory buffer. The presumption is that the caller knows that there was * a message due to a VF request so no polling for message is needed. **/ static s32 ixgbe_read_mbx_pf(struct ixgbe_hw *hw, u32 *msg, u16 size, u16 vf_number) { s32 ret_val; u16 i; /* lock the mailbox to prevent pf/vf race condition */ ret_val = ixgbe_obtain_mbx_lock_pf(hw, vf_number); if (ret_val) return ret_val; /* copy the message to the mailbox memory buffer */ for (i = 0; i < size; i++) msg[i] = IXGBE_READ_REG_ARRAY(hw, IXGBE_PFMBMEM(vf_number), i); /* Acknowledge the message and release buffer */ IXGBE_WRITE_REG(hw, IXGBE_PFMAILBOX(vf_number), IXGBE_PFMAILBOX_ACK); /* update stats */ hw->mbx.stats.msgs_rx++; return 0; } #ifdef CONFIG_PCI_IOV /** * ixgbe_init_mbx_params_pf - set initial values for pf mailbox * @hw: pointer to the HW structure * * Initializes the hw->mbx struct to correct values for pf mailbox */ void ixgbe_init_mbx_params_pf(struct ixgbe_hw *hw) { struct ixgbe_mbx_info *mbx = &hw->mbx; if (hw->mac.type != ixgbe_mac_82599EB && hw->mac.type != ixgbe_mac_X550 && hw->mac.type != ixgbe_mac_X550EM_x && hw->mac.type != ixgbe_mac_x550em_a && hw->mac.type != ixgbe_mac_X540) return; mbx->timeout = 0; mbx->usec_delay = 0; mbx->stats.msgs_tx = 0; mbx->stats.msgs_rx = 0; mbx->stats.reqs = 0; mbx->stats.acks = 0; mbx->stats.rsts = 0; mbx->size = IXGBE_VFMAILBOX_SIZE; } #endif /* CONFIG_PCI_IOV */ const struct ixgbe_mbx_operations mbx_ops_generic = { .read = ixgbe_read_mbx_pf, .write = ixgbe_write_mbx_pf, .read_posted = ixgbe_read_posted_mbx, .write_posted = ixgbe_write_posted_mbx, .check_for_msg = ixgbe_check_for_msg_pf, .check_for_ack = ixgbe_check_for_ack_pf, .check_for_rst = ixgbe_check_for_rst_pf, };
linux-master
drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.c
// SPDX-License-Identifier: GPL-2.0 /* Copyright(c) 1999 - 2018 Intel Corporation. */ #include "ixgbe.h" #include <linux/ptp_classify.h> #include <linux/clocksource.h> /* * The 82599 and the X540 do not have true 64bit nanosecond scale * counter registers. Instead, SYSTIME is defined by a fixed point * system which allows the user to define the scale counter increment * value at every level change of the oscillator driving the SYSTIME * value. For both devices the TIMINCA:IV field defines this * increment. On the X540 device, 31 bits are provided. However on the * 82599 only provides 24 bits. The time unit is determined by the * clock frequency of the oscillator in combination with the TIMINCA * register. When these devices link at 10Gb the oscillator has a * period of 6.4ns. In order to convert the scale counter into * nanoseconds the cyclecounter and timecounter structures are * used. The SYSTIME registers need to be converted to ns values by use * of only a right shift (division by power of 2). The following math * determines the largest incvalue that will fit into the available * bits in the TIMINCA register. * * PeriodWidth: Number of bits to store the clock period * MaxWidth: The maximum width value of the TIMINCA register * Period: The clock period for the oscillator * round(): discard the fractional portion of the calculation * * Period * [ 2 ^ ( MaxWidth - PeriodWidth ) ] * * For the X540, MaxWidth is 31 bits, and the base period is 6.4 ns * For the 82599, MaxWidth is 24 bits, and the base period is 6.4 ns * * The period also changes based on the link speed: * At 10Gb link or no link, the period remains the same. * At 1Gb link, the period is multiplied by 10. (64ns) * At 100Mb link, the period is multiplied by 100. (640ns) * * The calculated value allows us to right shift the SYSTIME register * value in order to quickly convert it into a nanosecond clock, * while allowing for the maximum possible adjustment value. * * These diagrams are only for the 10Gb link period * * SYSTIMEH SYSTIMEL * +--------------+ +--------------+ * X540 | 32 | | 1 | 3 | 28 | * *--------------+ +--------------+ * \________ 36 bits ______/ fract * * +--------------+ +--------------+ * 82599 | 32 | | 8 | 3 | 21 | * *--------------+ +--------------+ * \________ 43 bits ______/ fract * * The 36 bit X540 SYSTIME overflows every * 2^36 * 10^-9 / 60 = 1.14 minutes or 69 seconds * * The 43 bit 82599 SYSTIME overflows every * 2^43 * 10^-9 / 3600 = 2.4 hours */ #define IXGBE_INCVAL_10GB 0x66666666 #define IXGBE_INCVAL_1GB 0x40000000 #define IXGBE_INCVAL_100 0x50000000 #define IXGBE_INCVAL_SHIFT_10GB 28 #define IXGBE_INCVAL_SHIFT_1GB 24 #define IXGBE_INCVAL_SHIFT_100 21 #define IXGBE_INCVAL_SHIFT_82599 7 #define IXGBE_INCPER_SHIFT_82599 24 #define IXGBE_OVERFLOW_PERIOD (HZ * 30) #define IXGBE_PTP_TX_TIMEOUT (HZ) /* We use our own definitions instead of NSEC_PER_SEC because we want to mark * the value as a ULL to force precision when bit shifting. */ #define NS_PER_SEC 1000000000ULL #define NS_PER_HALF_SEC 500000000ULL /* In contrast, the X550 controller has two registers, SYSTIMEH and SYSTIMEL * which contain measurements of seconds and nanoseconds respectively. This * matches the standard linux representation of time in the kernel. In addition, * the X550 also has a SYSTIMER register which represents residue, or * subnanosecond overflow adjustments. To control clock adjustment, the TIMINCA * register is used, but it is unlike the X540 and 82599 devices. TIMINCA * represents units of 2^-32 nanoseconds, and uses 31 bits for this, with the * high bit representing whether the adjustent is positive or negative. Every * clock cycle, the X550 will add 12.5 ns + TIMINCA which can result in a range * of 12 to 13 nanoseconds adjustment. Unlike the 82599 and X540 devices, the * X550's clock for purposes of SYSTIME generation is constant and not dependent * on the link speed. * * SYSTIMEH SYSTIMEL SYSTIMER * +--------------+ +--------------+ +-------------+ * X550 | 32 | | 32 | | 32 | * *--------------+ +--------------+ +-------------+ * \____seconds___/ \_nanoseconds_/ \__2^-32 ns__/ * * This results in a full 96 bits to represent the clock, with 32 bits for * seconds, 32 bits for nanoseconds (largest value is 0d999999999 or just under * 1 second) and an additional 32 bits to measure sub nanosecond adjustments for * underflow of adjustments. * * The 32 bits of seconds for the X550 overflows every * 2^32 / ( 365.25 * 24 * 60 * 60 ) = ~136 years. * * In order to adjust the clock frequency for the X550, the TIMINCA register is * provided. This register represents a + or minus nearly 0.5 ns adjustment to * the base frequency. It is measured in 2^-32 ns units, with the high bit being * the sign bit. This register enables software to calculate frequency * adjustments and apply them directly to the clock rate. * * The math for converting scaled_ppm into TIMINCA values is fairly * straightforward. * * TIMINCA value = ( Base_Frequency * scaled_ppm ) / 1000000ULL << 16 * * To avoid overflow, we simply use mul_u64_u64_div_u64. * * This assumes that scaled_ppm is never high enough to create a value bigger * than TIMINCA's 31 bits can store. This is ensured by the stack, and is * measured in parts per billion. Calculating this value is also simple. * Max ppb = ( Max Adjustment / Base Frequency ) / 1000000000ULL * * For the X550, the Max adjustment is +/- 0.5 ns, and the base frequency is * 12.5 nanoseconds. This means that the Max ppb is 39999999 * Note: We subtract one in order to ensure no overflow, because the TIMINCA * register can only hold slightly under 0.5 nanoseconds. * * Because TIMINCA is measured in 2^-32 ns units, we have to convert 12.5 ns * into 2^-32 units, which is * * 12.5 * 2^32 = C80000000 * * Some revisions of hardware have a faster base frequency than the registers * were defined for. To fix this, we use a timecounter structure with the * proper mult and shift to convert the cycles into nanoseconds of time. */ #define IXGBE_X550_BASE_PERIOD 0xC80000000ULL #define INCVALUE_MASK 0x7FFFFFFF #define ISGN 0x80000000 /** * ixgbe_ptp_setup_sdp_X540 * @adapter: private adapter structure * * this function enables or disables the clock out feature on SDP0 for * the X540 device. It will create a 1 second periodic output that can * be used as the PPS (via an interrupt). * * It calculates when the system time will be on an exact second, and then * aligns the start of the PPS signal to that value. * * This works by using the cycle counter shift and mult values in reverse, and * assumes that the values we're shifting will not overflow. */ static void ixgbe_ptp_setup_sdp_X540(struct ixgbe_adapter *adapter) { struct cyclecounter *cc = &adapter->hw_cc; struct ixgbe_hw *hw = &adapter->hw; u32 esdp, tsauxc, clktiml, clktimh, trgttiml, trgttimh, rem; u64 ns = 0, clock_edge = 0, clock_period; unsigned long flags; /* disable the pin first */ IXGBE_WRITE_REG(hw, IXGBE_TSAUXC, 0x0); IXGBE_WRITE_FLUSH(hw); if (!(adapter->flags2 & IXGBE_FLAG2_PTP_PPS_ENABLED)) return; esdp = IXGBE_READ_REG(hw, IXGBE_ESDP); /* enable the SDP0 pin as output, and connected to the * native function for Timesync (ClockOut) */ esdp |= IXGBE_ESDP_SDP0_DIR | IXGBE_ESDP_SDP0_NATIVE; /* enable the Clock Out feature on SDP0, and allow * interrupts to occur when the pin changes */ tsauxc = (IXGBE_TSAUXC_EN_CLK | IXGBE_TSAUXC_SYNCLK | IXGBE_TSAUXC_SDP0_INT); /* Determine the clock time period to use. This assumes that the * cycle counter shift is small enough to avoid overflow. */ clock_period = div_u64((NS_PER_HALF_SEC << cc->shift), cc->mult); clktiml = (u32)(clock_period); clktimh = (u32)(clock_period >> 32); /* Read the current clock time, and save the cycle counter value */ spin_lock_irqsave(&adapter->tmreg_lock, flags); ns = timecounter_read(&adapter->hw_tc); clock_edge = adapter->hw_tc.cycle_last; spin_unlock_irqrestore(&adapter->tmreg_lock, flags); /* Figure out how many seconds to add in order to round up */ div_u64_rem(ns, NS_PER_SEC, &rem); /* Figure out how many nanoseconds to add to round the clock edge up * to the next full second */ rem = (NS_PER_SEC - rem); /* Adjust the clock edge to align with the next full second. */ clock_edge += div_u64(((u64)rem << cc->shift), cc->mult); trgttiml = (u32)clock_edge; trgttimh = (u32)(clock_edge >> 32); IXGBE_WRITE_REG(hw, IXGBE_CLKTIML, clktiml); IXGBE_WRITE_REG(hw, IXGBE_CLKTIMH, clktimh); IXGBE_WRITE_REG(hw, IXGBE_TRGTTIML0, trgttiml); IXGBE_WRITE_REG(hw, IXGBE_TRGTTIMH0, trgttimh); IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp); IXGBE_WRITE_REG(hw, IXGBE_TSAUXC, tsauxc); IXGBE_WRITE_FLUSH(hw); } /** * ixgbe_ptp_setup_sdp_X550 * @adapter: private adapter structure * * Enable or disable a clock output signal on SDP 0 for X550 hardware. * * Use the target time feature to align the output signal on the next full * second. * * This works by using the cycle counter shift and mult values in reverse, and * assumes that the values we're shifting will not overflow. */ static void ixgbe_ptp_setup_sdp_X550(struct ixgbe_adapter *adapter) { u32 esdp, tsauxc, freqout, trgttiml, trgttimh, rem, tssdp; struct cyclecounter *cc = &adapter->hw_cc; struct ixgbe_hw *hw = &adapter->hw; u64 ns = 0, clock_edge = 0; struct timespec64 ts; unsigned long flags; /* disable the pin first */ IXGBE_WRITE_REG(hw, IXGBE_TSAUXC, 0x0); IXGBE_WRITE_FLUSH(hw); if (!(adapter->flags2 & IXGBE_FLAG2_PTP_PPS_ENABLED)) return; esdp = IXGBE_READ_REG(hw, IXGBE_ESDP); /* enable the SDP0 pin as output, and connected to the * native function for Timesync (ClockOut) */ esdp |= IXGBE_ESDP_SDP0_DIR | IXGBE_ESDP_SDP0_NATIVE; /* enable the Clock Out feature on SDP0, and use Target Time 0 to * enable generation of interrupts on the clock change. */ #define IXGBE_TSAUXC_DIS_TS_CLEAR 0x40000000 tsauxc = (IXGBE_TSAUXC_EN_CLK | IXGBE_TSAUXC_ST0 | IXGBE_TSAUXC_EN_TT0 | IXGBE_TSAUXC_SDP0_INT | IXGBE_TSAUXC_DIS_TS_CLEAR); tssdp = (IXGBE_TSSDP_TS_SDP0_EN | IXGBE_TSSDP_TS_SDP0_CLK0); /* Determine the clock time period to use. This assumes that the * cycle counter shift is small enough to avoid overflowing a 32bit * value. */ freqout = div_u64(NS_PER_HALF_SEC << cc->shift, cc->mult); /* Read the current clock time, and save the cycle counter value */ spin_lock_irqsave(&adapter->tmreg_lock, flags); ns = timecounter_read(&adapter->hw_tc); clock_edge = adapter->hw_tc.cycle_last; spin_unlock_irqrestore(&adapter->tmreg_lock, flags); /* Figure out how far past the next second we are */ div_u64_rem(ns, NS_PER_SEC, &rem); /* Figure out how many nanoseconds to add to round the clock edge up * to the next full second */ rem = (NS_PER_SEC - rem); /* Adjust the clock edge to align with the next full second. */ clock_edge += div_u64(((u64)rem << cc->shift), cc->mult); /* X550 hardware stores the time in 32bits of 'billions of cycles' and * 32bits of 'cycles'. There's no guarantee that cycles represents * nanoseconds. However, we can use the math from a timespec64 to * convert into the hardware representation. * * See ixgbe_ptp_read_X550() for more details. */ ts = ns_to_timespec64(clock_edge); trgttiml = (u32)ts.tv_nsec; trgttimh = (u32)ts.tv_sec; IXGBE_WRITE_REG(hw, IXGBE_FREQOUT0, freqout); IXGBE_WRITE_REG(hw, IXGBE_TRGTTIML0, trgttiml); IXGBE_WRITE_REG(hw, IXGBE_TRGTTIMH0, trgttimh); IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp); IXGBE_WRITE_REG(hw, IXGBE_TSSDP, tssdp); IXGBE_WRITE_REG(hw, IXGBE_TSAUXC, tsauxc); IXGBE_WRITE_FLUSH(hw); } /** * ixgbe_ptp_read_X550 - read cycle counter value * @cc: cyclecounter structure * * This function reads SYSTIME registers. It is called by the cyclecounter * structure to convert from internal representation into nanoseconds. We need * this for X550 since some skews do not have expected clock frequency and * result of SYSTIME is 32bits of "billions of cycles" and 32 bits of * "cycles", rather than seconds and nanoseconds. */ static u64 ixgbe_ptp_read_X550(const struct cyclecounter *cc) { struct ixgbe_adapter *adapter = container_of(cc, struct ixgbe_adapter, hw_cc); struct ixgbe_hw *hw = &adapter->hw; struct timespec64 ts; /* storage is 32 bits of 'billions of cycles' and 32 bits of 'cycles'. * Some revisions of hardware run at a higher frequency and so the * cycles are not guaranteed to be nanoseconds. The timespec64 created * here is used for its math/conversions but does not necessarily * represent nominal time. * * It should be noted that this cyclecounter will overflow at a * non-bitmask field since we have to convert our billions of cycles * into an actual cycles count. This results in some possible weird * situations at high cycle counter stamps. However given that 32 bits * of "seconds" is ~138 years this isn't a problem. Even at the * increased frequency of some revisions, this is still ~103 years. * Since the SYSTIME values start at 0 and we never write them, it is * highly unlikely for the cyclecounter to overflow in practice. */ IXGBE_READ_REG(hw, IXGBE_SYSTIMR); ts.tv_nsec = IXGBE_READ_REG(hw, IXGBE_SYSTIML); ts.tv_sec = IXGBE_READ_REG(hw, IXGBE_SYSTIMH); return (u64)timespec64_to_ns(&ts); } /** * ixgbe_ptp_read_82599 - read raw cycle counter (to be used by time counter) * @cc: the cyclecounter structure * * this function reads the cyclecounter registers and is called by the * cyclecounter structure used to construct a ns counter from the * arbitrary fixed point registers */ static u64 ixgbe_ptp_read_82599(const struct cyclecounter *cc) { struct ixgbe_adapter *adapter = container_of(cc, struct ixgbe_adapter, hw_cc); struct ixgbe_hw *hw = &adapter->hw; u64 stamp = 0; stamp |= (u64)IXGBE_READ_REG(hw, IXGBE_SYSTIML); stamp |= (u64)IXGBE_READ_REG(hw, IXGBE_SYSTIMH) << 32; return stamp; } /** * ixgbe_ptp_convert_to_hwtstamp - convert register value to hw timestamp * @adapter: private adapter structure * @hwtstamp: stack timestamp structure * @timestamp: unsigned 64bit system time value * * We need to convert the adapter's RX/TXSTMP registers into a hwtstamp value * which can be used by the stack's ptp functions. * * The lock is used to protect consistency of the cyclecounter and the SYSTIME * registers. However, it does not need to protect against the Rx or Tx * timestamp registers, as there can't be a new timestamp until the old one is * unlatched by reading. * * In addition to the timestamp in hardware, some controllers need a software * overflow cyclecounter, and this function takes this into account as well. **/ static void ixgbe_ptp_convert_to_hwtstamp(struct ixgbe_adapter *adapter, struct skb_shared_hwtstamps *hwtstamp, u64 timestamp) { unsigned long flags; struct timespec64 systime; u64 ns; memset(hwtstamp, 0, sizeof(*hwtstamp)); switch (adapter->hw.mac.type) { /* X550 and later hardware supposedly represent time using a seconds * and nanoseconds counter, instead of raw 64bits nanoseconds. We need * to convert the timestamp into cycles before it can be fed to the * cyclecounter. We need an actual cyclecounter because some revisions * of hardware run at a higher frequency and thus the counter does * not represent seconds/nanoseconds. Instead it can be thought of as * cycles and billions of cycles. */ case ixgbe_mac_X550: case ixgbe_mac_X550EM_x: case ixgbe_mac_x550em_a: /* Upper 32 bits represent billions of cycles, lower 32 bits * represent cycles. However, we use timespec64_to_ns for the * correct math even though the units haven't been corrected * yet. */ systime.tv_sec = timestamp >> 32; systime.tv_nsec = timestamp & 0xFFFFFFFF; timestamp = timespec64_to_ns(&systime); break; default: break; } spin_lock_irqsave(&adapter->tmreg_lock, flags); ns = timecounter_cyc2time(&adapter->hw_tc, timestamp); spin_unlock_irqrestore(&adapter->tmreg_lock, flags); hwtstamp->hwtstamp = ns_to_ktime(ns); } /** * ixgbe_ptp_adjfine_82599 * @ptp: the ptp clock structure * @scaled_ppm: scaled parts per million adjustment from base * * Adjust the frequency of the ptp cycle counter by the * indicated scaled_ppm from the base frequency. * * Scaled parts per million is ppm with a 16-bit binary fractional field. */ static int ixgbe_ptp_adjfine_82599(struct ptp_clock_info *ptp, long scaled_ppm) { struct ixgbe_adapter *adapter = container_of(ptp, struct ixgbe_adapter, ptp_caps); struct ixgbe_hw *hw = &adapter->hw; u64 incval; smp_mb(); incval = READ_ONCE(adapter->base_incval); incval = adjust_by_scaled_ppm(incval, scaled_ppm); switch (hw->mac.type) { case ixgbe_mac_X540: if (incval > 0xFFFFFFFFULL) e_dev_warn("PTP scaled_ppm adjusted SYSTIME rate overflowed!\n"); IXGBE_WRITE_REG(hw, IXGBE_TIMINCA, (u32)incval); break; case ixgbe_mac_82599EB: if (incval > 0x00FFFFFFULL) e_dev_warn("PTP scaled_ppm adjusted SYSTIME rate overflowed!\n"); IXGBE_WRITE_REG(hw, IXGBE_TIMINCA, BIT(IXGBE_INCPER_SHIFT_82599) | ((u32)incval & 0x00FFFFFFUL)); break; default: break; } return 0; } /** * ixgbe_ptp_adjfine_X550 * @ptp: the ptp clock structure * @scaled_ppm: scaled parts per million adjustment from base * * Adjust the frequency of the SYSTIME registers by the indicated scaled_ppm * from base frequency. * * Scaled parts per million is ppm with a 16-bit binary fractional field. */ static int ixgbe_ptp_adjfine_X550(struct ptp_clock_info *ptp, long scaled_ppm) { struct ixgbe_adapter *adapter = container_of(ptp, struct ixgbe_adapter, ptp_caps); struct ixgbe_hw *hw = &adapter->hw; bool neg_adj; u64 rate; u32 inca; neg_adj = diff_by_scaled_ppm(IXGBE_X550_BASE_PERIOD, scaled_ppm, &rate); /* warn if rate is too large */ if (rate >= INCVALUE_MASK) e_dev_warn("PTP scaled_ppm adjusted SYSTIME rate overflowed!\n"); inca = rate & INCVALUE_MASK; if (neg_adj) inca |= ISGN; IXGBE_WRITE_REG(hw, IXGBE_TIMINCA, inca); return 0; } /** * ixgbe_ptp_adjtime * @ptp: the ptp clock structure * @delta: offset to adjust the cycle counter by * * adjust the timer by resetting the timecounter structure. */ static int ixgbe_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta) { struct ixgbe_adapter *adapter = container_of(ptp, struct ixgbe_adapter, ptp_caps); unsigned long flags; spin_lock_irqsave(&adapter->tmreg_lock, flags); timecounter_adjtime(&adapter->hw_tc, delta); spin_unlock_irqrestore(&adapter->tmreg_lock, flags); if (adapter->ptp_setup_sdp) adapter->ptp_setup_sdp(adapter); return 0; } /** * ixgbe_ptp_gettimex * @ptp: the ptp clock structure * @ts: timespec to hold the PHC timestamp * @sts: structure to hold the system time before and after reading the PHC * * read the timecounter and return the correct value on ns, * after converting it into a struct timespec. */ static int ixgbe_ptp_gettimex(struct ptp_clock_info *ptp, struct timespec64 *ts, struct ptp_system_timestamp *sts) { struct ixgbe_adapter *adapter = container_of(ptp, struct ixgbe_adapter, ptp_caps); struct ixgbe_hw *hw = &adapter->hw; unsigned long flags; u64 ns, stamp; spin_lock_irqsave(&adapter->tmreg_lock, flags); switch (adapter->hw.mac.type) { case ixgbe_mac_X550: case ixgbe_mac_X550EM_x: case ixgbe_mac_x550em_a: /* Upper 32 bits represent billions of cycles, lower 32 bits * represent cycles. However, we use timespec64_to_ns for the * correct math even though the units haven't been corrected * yet. */ ptp_read_system_prets(sts); IXGBE_READ_REG(hw, IXGBE_SYSTIMR); ptp_read_system_postts(sts); ts->tv_nsec = IXGBE_READ_REG(hw, IXGBE_SYSTIML); ts->tv_sec = IXGBE_READ_REG(hw, IXGBE_SYSTIMH); stamp = timespec64_to_ns(ts); break; default: ptp_read_system_prets(sts); stamp = IXGBE_READ_REG(hw, IXGBE_SYSTIML); ptp_read_system_postts(sts); stamp |= (u64)IXGBE_READ_REG(hw, IXGBE_SYSTIMH) << 32; break; } ns = timecounter_cyc2time(&adapter->hw_tc, stamp); spin_unlock_irqrestore(&adapter->tmreg_lock, flags); *ts = ns_to_timespec64(ns); return 0; } /** * ixgbe_ptp_settime * @ptp: the ptp clock structure * @ts: the timespec containing the new time for the cycle counter * * reset the timecounter to use a new base value instead of the kernel * wall timer value. */ static int ixgbe_ptp_settime(struct ptp_clock_info *ptp, const struct timespec64 *ts) { struct ixgbe_adapter *adapter = container_of(ptp, struct ixgbe_adapter, ptp_caps); unsigned long flags; u64 ns = timespec64_to_ns(ts); /* reset the timecounter */ spin_lock_irqsave(&adapter->tmreg_lock, flags); timecounter_init(&adapter->hw_tc, &adapter->hw_cc, ns); spin_unlock_irqrestore(&adapter->tmreg_lock, flags); if (adapter->ptp_setup_sdp) adapter->ptp_setup_sdp(adapter); return 0; } /** * ixgbe_ptp_feature_enable * @ptp: the ptp clock structure * @rq: the requested feature to change * @on: whether to enable or disable the feature * * enable (or disable) ancillary features of the phc subsystem. * our driver only supports the PPS feature on the X540 */ static int ixgbe_ptp_feature_enable(struct ptp_clock_info *ptp, struct ptp_clock_request *rq, int on) { struct ixgbe_adapter *adapter = container_of(ptp, struct ixgbe_adapter, ptp_caps); /** * When PPS is enabled, unmask the interrupt for the ClockOut * feature, so that the interrupt handler can send the PPS * event when the clock SDP triggers. Clear mask when PPS is * disabled */ if (rq->type != PTP_CLK_REQ_PPS || !adapter->ptp_setup_sdp) return -ENOTSUPP; if (on) adapter->flags2 |= IXGBE_FLAG2_PTP_PPS_ENABLED; else adapter->flags2 &= ~IXGBE_FLAG2_PTP_PPS_ENABLED; adapter->ptp_setup_sdp(adapter); return 0; } /** * ixgbe_ptp_check_pps_event * @adapter: the private adapter structure * * This function is called by the interrupt routine when checking for * interrupts. It will check and handle a pps event. */ void ixgbe_ptp_check_pps_event(struct ixgbe_adapter *adapter) { struct ixgbe_hw *hw = &adapter->hw; struct ptp_clock_event event; event.type = PTP_CLOCK_PPS; /* this check is necessary in case the interrupt was enabled via some * alternative means (ex. debug_fs). Better to check here than * everywhere that calls this function. */ if (!adapter->ptp_clock) return; switch (hw->mac.type) { case ixgbe_mac_X540: ptp_clock_event(adapter->ptp_clock, &event); break; default: break; } } /** * ixgbe_ptp_overflow_check - watchdog task to detect SYSTIME overflow * @adapter: private adapter struct * * this watchdog task periodically reads the timecounter * in order to prevent missing when the system time registers wrap * around. This needs to be run approximately twice a minute. */ void ixgbe_ptp_overflow_check(struct ixgbe_adapter *adapter) { bool timeout = time_is_before_jiffies(adapter->last_overflow_check + IXGBE_OVERFLOW_PERIOD); unsigned long flags; if (timeout) { /* Update the timecounter */ spin_lock_irqsave(&adapter->tmreg_lock, flags); timecounter_read(&adapter->hw_tc); spin_unlock_irqrestore(&adapter->tmreg_lock, flags); adapter->last_overflow_check = jiffies; } } /** * ixgbe_ptp_rx_hang - detect error case when Rx timestamp registers latched * @adapter: private network adapter structure * * this watchdog task is scheduled to detect error case where hardware has * dropped an Rx packet that was timestamped when the ring is full. The * particular error is rare but leaves the device in a state unable to timestamp * any future packets. */ void ixgbe_ptp_rx_hang(struct ixgbe_adapter *adapter) { struct ixgbe_hw *hw = &adapter->hw; u32 tsyncrxctl = IXGBE_READ_REG(hw, IXGBE_TSYNCRXCTL); struct ixgbe_ring *rx_ring; unsigned long rx_event; int n; /* if we don't have a valid timestamp in the registers, just update the * timeout counter and exit */ if (!(tsyncrxctl & IXGBE_TSYNCRXCTL_VALID)) { adapter->last_rx_ptp_check = jiffies; return; } /* determine the most recent watchdog or rx_timestamp event */ rx_event = adapter->last_rx_ptp_check; for (n = 0; n < adapter->num_rx_queues; n++) { rx_ring = adapter->rx_ring[n]; if (time_after(rx_ring->last_rx_timestamp, rx_event)) rx_event = rx_ring->last_rx_timestamp; } /* only need to read the high RXSTMP register to clear the lock */ if (time_is_before_jiffies(rx_event + 5 * HZ)) { IXGBE_READ_REG(hw, IXGBE_RXSTMPH); adapter->last_rx_ptp_check = jiffies; adapter->rx_hwtstamp_cleared++; e_warn(drv, "clearing RX Timestamp hang\n"); } } /** * ixgbe_ptp_clear_tx_timestamp - utility function to clear Tx timestamp state * @adapter: the private adapter structure * * This function should be called whenever the state related to a Tx timestamp * needs to be cleared. This helps ensure that all related bits are reset for * the next Tx timestamp event. */ static void ixgbe_ptp_clear_tx_timestamp(struct ixgbe_adapter *adapter) { struct ixgbe_hw *hw = &adapter->hw; IXGBE_READ_REG(hw, IXGBE_TXSTMPH); if (adapter->ptp_tx_skb) { dev_kfree_skb_any(adapter->ptp_tx_skb); adapter->ptp_tx_skb = NULL; } clear_bit_unlock(__IXGBE_PTP_TX_IN_PROGRESS, &adapter->state); } /** * ixgbe_ptp_tx_hang - detect error case where Tx timestamp never finishes * @adapter: private network adapter structure */ void ixgbe_ptp_tx_hang(struct ixgbe_adapter *adapter) { bool timeout = time_is_before_jiffies(adapter->ptp_tx_start + IXGBE_PTP_TX_TIMEOUT); if (!adapter->ptp_tx_skb) return; if (!test_bit(__IXGBE_PTP_TX_IN_PROGRESS, &adapter->state)) return; /* If we haven't received a timestamp within the timeout, it is * reasonable to assume that it will never occur, so we can unlock the * timestamp bit when this occurs. */ if (timeout) { cancel_work_sync(&adapter->ptp_tx_work); ixgbe_ptp_clear_tx_timestamp(adapter); adapter->tx_hwtstamp_timeouts++; e_warn(drv, "clearing Tx timestamp hang\n"); } } /** * ixgbe_ptp_tx_hwtstamp - utility function which checks for TX time stamp * @adapter: the private adapter struct * * if the timestamp is valid, we convert it into the timecounter ns * value, then store that result into the shhwtstamps structure which * is passed up the network stack */ static void ixgbe_ptp_tx_hwtstamp(struct ixgbe_adapter *adapter) { struct sk_buff *skb = adapter->ptp_tx_skb; struct ixgbe_hw *hw = &adapter->hw; struct skb_shared_hwtstamps shhwtstamps; u64 regval = 0; regval |= (u64)IXGBE_READ_REG(hw, IXGBE_TXSTMPL); regval |= (u64)IXGBE_READ_REG(hw, IXGBE_TXSTMPH) << 32; ixgbe_ptp_convert_to_hwtstamp(adapter, &shhwtstamps, regval); /* Handle cleanup of the ptp_tx_skb ourselves, and unlock the state * bit prior to notifying the stack via skb_tstamp_tx(). This prevents * well behaved applications from attempting to timestamp again prior * to the lock bit being clear. */ adapter->ptp_tx_skb = NULL; clear_bit_unlock(__IXGBE_PTP_TX_IN_PROGRESS, &adapter->state); /* Notify the stack and then free the skb after we've unlocked */ skb_tstamp_tx(skb, &shhwtstamps); dev_kfree_skb_any(skb); } /** * ixgbe_ptp_tx_hwtstamp_work * @work: pointer to the work struct * * This work item polls TSYNCTXCTL valid bit to determine when a Tx hardware * timestamp has been taken for the current skb. It is necessary, because the * descriptor's "done" bit does not correlate with the timestamp event. */ static void ixgbe_ptp_tx_hwtstamp_work(struct work_struct *work) { struct ixgbe_adapter *adapter = container_of(work, struct ixgbe_adapter, ptp_tx_work); struct ixgbe_hw *hw = &adapter->hw; bool timeout = time_is_before_jiffies(adapter->ptp_tx_start + IXGBE_PTP_TX_TIMEOUT); u32 tsynctxctl; /* we have to have a valid skb to poll for a timestamp */ if (!adapter->ptp_tx_skb) { ixgbe_ptp_clear_tx_timestamp(adapter); return; } /* stop polling once we have a valid timestamp */ tsynctxctl = IXGBE_READ_REG(hw, IXGBE_TSYNCTXCTL); if (tsynctxctl & IXGBE_TSYNCTXCTL_VALID) { ixgbe_ptp_tx_hwtstamp(adapter); return; } if (timeout) { ixgbe_ptp_clear_tx_timestamp(adapter); adapter->tx_hwtstamp_timeouts++; e_warn(drv, "clearing Tx Timestamp hang\n"); } else { /* reschedule to keep checking if it's not available yet */ schedule_work(&adapter->ptp_tx_work); } } /** * ixgbe_ptp_rx_pktstamp - utility function to get RX time stamp from buffer * @q_vector: structure containing interrupt and ring information * @skb: the packet * * This function will be called by the Rx routine of the timestamp for this * packet is stored in the buffer. The value is stored in little endian format * starting at the end of the packet data. */ void ixgbe_ptp_rx_pktstamp(struct ixgbe_q_vector *q_vector, struct sk_buff *skb) { __le64 regval; /* copy the bits out of the skb, and then trim the skb length */ skb_copy_bits(skb, skb->len - IXGBE_TS_HDR_LEN, &regval, IXGBE_TS_HDR_LEN); __pskb_trim(skb, skb->len - IXGBE_TS_HDR_LEN); /* The timestamp is recorded in little endian format, and is stored at * the end of the packet. * * DWORD: N N + 1 N + 2 * Field: End of Packet SYSTIMH SYSTIML */ ixgbe_ptp_convert_to_hwtstamp(q_vector->adapter, skb_hwtstamps(skb), le64_to_cpu(regval)); } /** * ixgbe_ptp_rx_rgtstamp - utility function which checks for RX time stamp * @q_vector: structure containing interrupt and ring information * @skb: particular skb to send timestamp with * * if the timestamp is valid, we convert it into the timecounter ns * value, then store that result into the shhwtstamps structure which * is passed up the network stack */ void ixgbe_ptp_rx_rgtstamp(struct ixgbe_q_vector *q_vector, struct sk_buff *skb) { struct ixgbe_adapter *adapter; struct ixgbe_hw *hw; u64 regval = 0; u32 tsyncrxctl; /* we cannot process timestamps on a ring without a q_vector */ if (!q_vector || !q_vector->adapter) return; adapter = q_vector->adapter; hw = &adapter->hw; /* Read the tsyncrxctl register afterwards in order to prevent taking an * I/O hit on every packet. */ tsyncrxctl = IXGBE_READ_REG(hw, IXGBE_TSYNCRXCTL); if (!(tsyncrxctl & IXGBE_TSYNCRXCTL_VALID)) return; regval |= (u64)IXGBE_READ_REG(hw, IXGBE_RXSTMPL); regval |= (u64)IXGBE_READ_REG(hw, IXGBE_RXSTMPH) << 32; ixgbe_ptp_convert_to_hwtstamp(adapter, skb_hwtstamps(skb), regval); } /** * ixgbe_ptp_get_ts_config - get current hardware timestamping configuration * @adapter: pointer to adapter structure * @ifr: ioctl data * * This function returns the current timestamping settings. Rather than * attempt to deconstruct registers to fill in the values, simply keep a copy * of the old settings around, and return a copy when requested. */ int ixgbe_ptp_get_ts_config(struct ixgbe_adapter *adapter, struct ifreq *ifr) { struct hwtstamp_config *config = &adapter->tstamp_config; return copy_to_user(ifr->ifr_data, config, sizeof(*config)) ? -EFAULT : 0; } /** * ixgbe_ptp_set_timestamp_mode - setup the hardware for the requested mode * @adapter: the private ixgbe adapter structure * @config: the hwtstamp configuration requested * * Outgoing time stamping can be enabled and disabled. Play nice and * disable it when requested, although it shouldn't cause any overhead * when no packet needs it. At most one packet in the queue may be * marked for time stamping, otherwise it would be impossible to tell * for sure to which packet the hardware time stamp belongs. * * Incoming time stamping has to be configured via the hardware * filters. Not all combinations are supported, in particular event * type has to be specified. Matching the kind of event packet is * not supported, with the exception of "all V2 events regardless of * level 2 or 4". * * Since hardware always timestamps Path delay packets when timestamping V2 * packets, regardless of the type specified in the register, only use V2 * Event mode. This more accurately tells the user what the hardware is going * to do anyways. * * Note: this may modify the hwtstamp configuration towards a more general * mode, if required to support the specifically requested mode. */ static int ixgbe_ptp_set_timestamp_mode(struct ixgbe_adapter *adapter, struct hwtstamp_config *config) { struct ixgbe_hw *hw = &adapter->hw; u32 tsync_tx_ctl = IXGBE_TSYNCTXCTL_ENABLED; u32 tsync_rx_ctl = IXGBE_TSYNCRXCTL_ENABLED; u32 tsync_rx_mtrl = PTP_EV_PORT << 16; u32 aflags = adapter->flags; bool is_l2 = false; u32 regval; switch (config->tx_type) { case HWTSTAMP_TX_OFF: tsync_tx_ctl = 0; break; case HWTSTAMP_TX_ON: break; default: return -ERANGE; } switch (config->rx_filter) { case HWTSTAMP_FILTER_NONE: tsync_rx_ctl = 0; tsync_rx_mtrl = 0; aflags &= ~(IXGBE_FLAG_RX_HWTSTAMP_ENABLED | IXGBE_FLAG_RX_HWTSTAMP_IN_REGISTER); break; case HWTSTAMP_FILTER_PTP_V1_L4_SYNC: tsync_rx_ctl |= IXGBE_TSYNCRXCTL_TYPE_L4_V1; tsync_rx_mtrl |= IXGBE_RXMTRL_V1_SYNC_MSG; aflags |= (IXGBE_FLAG_RX_HWTSTAMP_ENABLED | IXGBE_FLAG_RX_HWTSTAMP_IN_REGISTER); break; case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ: tsync_rx_ctl |= IXGBE_TSYNCRXCTL_TYPE_L4_V1; tsync_rx_mtrl |= IXGBE_RXMTRL_V1_DELAY_REQ_MSG; aflags |= (IXGBE_FLAG_RX_HWTSTAMP_ENABLED | IXGBE_FLAG_RX_HWTSTAMP_IN_REGISTER); break; case HWTSTAMP_FILTER_PTP_V2_EVENT: case HWTSTAMP_FILTER_PTP_V2_L2_EVENT: case HWTSTAMP_FILTER_PTP_V2_L4_EVENT: case HWTSTAMP_FILTER_PTP_V2_SYNC: case HWTSTAMP_FILTER_PTP_V2_L2_SYNC: case HWTSTAMP_FILTER_PTP_V2_L4_SYNC: case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ: case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ: case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ: tsync_rx_ctl |= IXGBE_TSYNCRXCTL_TYPE_EVENT_V2; is_l2 = true; config->rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT; aflags |= (IXGBE_FLAG_RX_HWTSTAMP_ENABLED | IXGBE_FLAG_RX_HWTSTAMP_IN_REGISTER); break; case HWTSTAMP_FILTER_PTP_V1_L4_EVENT: case HWTSTAMP_FILTER_NTP_ALL: case HWTSTAMP_FILTER_ALL: /* The X550 controller is capable of timestamping all packets, * which allows it to accept any filter. */ if (hw->mac.type >= ixgbe_mac_X550) { tsync_rx_ctl |= IXGBE_TSYNCRXCTL_TYPE_ALL; config->rx_filter = HWTSTAMP_FILTER_ALL; aflags |= IXGBE_FLAG_RX_HWTSTAMP_ENABLED; break; } fallthrough; default: /* * register RXMTRL must be set in order to do V1 packets, * therefore it is not possible to time stamp both V1 Sync and * Delay_Req messages and hardware does not support * timestamping all packets => return error */ config->rx_filter = HWTSTAMP_FILTER_NONE; return -ERANGE; } if (hw->mac.type == ixgbe_mac_82598EB) { adapter->flags &= ~(IXGBE_FLAG_RX_HWTSTAMP_ENABLED | IXGBE_FLAG_RX_HWTSTAMP_IN_REGISTER); if (tsync_rx_ctl | tsync_tx_ctl) return -ERANGE; return 0; } /* Per-packet timestamping only works if the filter is set to all * packets. Since this is desired, always timestamp all packets as long * as any Rx filter was configured. */ switch (hw->mac.type) { case ixgbe_mac_X550: case ixgbe_mac_X550EM_x: case ixgbe_mac_x550em_a: /* enable timestamping all packets only if at least some * packets were requested. Otherwise, play nice and disable * timestamping */ if (config->rx_filter == HWTSTAMP_FILTER_NONE) break; tsync_rx_ctl = IXGBE_TSYNCRXCTL_ENABLED | IXGBE_TSYNCRXCTL_TYPE_ALL | IXGBE_TSYNCRXCTL_TSIP_UT_EN; config->rx_filter = HWTSTAMP_FILTER_ALL; aflags |= IXGBE_FLAG_RX_HWTSTAMP_ENABLED; aflags &= ~IXGBE_FLAG_RX_HWTSTAMP_IN_REGISTER; is_l2 = true; break; default: break; } /* define ethertype filter for timestamping L2 packets */ if (is_l2) IXGBE_WRITE_REG(hw, IXGBE_ETQF(IXGBE_ETQF_FILTER_1588), (IXGBE_ETQF_FILTER_EN | /* enable filter */ IXGBE_ETQF_1588 | /* enable timestamping */ ETH_P_1588)); /* 1588 eth protocol type */ else IXGBE_WRITE_REG(hw, IXGBE_ETQF(IXGBE_ETQF_FILTER_1588), 0); /* enable/disable TX */ regval = IXGBE_READ_REG(hw, IXGBE_TSYNCTXCTL); regval &= ~IXGBE_TSYNCTXCTL_ENABLED; regval |= tsync_tx_ctl; IXGBE_WRITE_REG(hw, IXGBE_TSYNCTXCTL, regval); /* enable/disable RX */ regval = IXGBE_READ_REG(hw, IXGBE_TSYNCRXCTL); regval &= ~(IXGBE_TSYNCRXCTL_ENABLED | IXGBE_TSYNCRXCTL_TYPE_MASK); regval |= tsync_rx_ctl; IXGBE_WRITE_REG(hw, IXGBE_TSYNCRXCTL, regval); /* define which PTP packets are time stamped */ IXGBE_WRITE_REG(hw, IXGBE_RXMTRL, tsync_rx_mtrl); IXGBE_WRITE_FLUSH(hw); /* configure adapter flags only when HW is actually configured */ adapter->flags = aflags; /* clear TX/RX time stamp registers, just to be sure */ ixgbe_ptp_clear_tx_timestamp(adapter); IXGBE_READ_REG(hw, IXGBE_RXSTMPH); return 0; } /** * ixgbe_ptp_set_ts_config - user entry point for timestamp mode * @adapter: pointer to adapter struct * @ifr: ioctl data * * Set hardware to requested mode. If unsupported, return an error with no * changes. Otherwise, store the mode for future reference. */ int ixgbe_ptp_set_ts_config(struct ixgbe_adapter *adapter, struct ifreq *ifr) { struct hwtstamp_config config; int err; if (copy_from_user(&config, ifr->ifr_data, sizeof(config))) return -EFAULT; err = ixgbe_ptp_set_timestamp_mode(adapter, &config); if (err) return err; /* save these settings for future reference */ memcpy(&adapter->tstamp_config, &config, sizeof(adapter->tstamp_config)); return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ? -EFAULT : 0; } static void ixgbe_ptp_link_speed_adjust(struct ixgbe_adapter *adapter, u32 *shift, u32 *incval) { /** * Scale the NIC cycle counter by a large factor so that * relatively small corrections to the frequency can be added * or subtracted. The drawbacks of a large factor include * (a) the clock register overflows more quickly, (b) the cycle * counter structure must be able to convert the systime value * to nanoseconds using only a multiplier and a right-shift, * and (c) the value must fit within the timinca register space * => math based on internal DMA clock rate and available bits * * Note that when there is no link, internal DMA clock is same as when * link speed is 10Gb. Set the registers correctly even when link is * down to preserve the clock setting */ switch (adapter->link_speed) { case IXGBE_LINK_SPEED_100_FULL: *shift = IXGBE_INCVAL_SHIFT_100; *incval = IXGBE_INCVAL_100; break; case IXGBE_LINK_SPEED_1GB_FULL: *shift = IXGBE_INCVAL_SHIFT_1GB; *incval = IXGBE_INCVAL_1GB; break; case IXGBE_LINK_SPEED_10GB_FULL: default: *shift = IXGBE_INCVAL_SHIFT_10GB; *incval = IXGBE_INCVAL_10GB; break; } } /** * ixgbe_ptp_start_cyclecounter - create the cycle counter from hw * @adapter: pointer to the adapter structure * * This function should be called to set the proper values for the TIMINCA * register and tell the cyclecounter structure what the tick rate of SYSTIME * is. It does not directly modify SYSTIME registers or the timecounter * structure. It should be called whenever a new TIMINCA value is necessary, * such as during initialization or when the link speed changes. */ void ixgbe_ptp_start_cyclecounter(struct ixgbe_adapter *adapter) { struct ixgbe_hw *hw = &adapter->hw; struct cyclecounter cc; unsigned long flags; u32 incval = 0; u32 fuse0 = 0; /* For some of the boards below this mask is technically incorrect. * The timestamp mask overflows at approximately 61bits. However the * particular hardware does not overflow on an even bitmask value. * Instead, it overflows due to conversion of upper 32bits billions of * cycles. Timecounters are not really intended for this purpose so * they do not properly function if the overflow point isn't 2^N-1. * However, the actual SYSTIME values in question take ~138 years to * overflow. In practice this means they won't actually overflow. A * proper fix to this problem would require modification of the * timecounter delta calculations. */ cc.mask = CLOCKSOURCE_MASK(64); cc.mult = 1; cc.shift = 0; switch (hw->mac.type) { case ixgbe_mac_X550EM_x: /* SYSTIME assumes X550EM_x board frequency is 300Mhz, and is * designed to represent seconds and nanoseconds when this is * the case. However, some revisions of hardware have a 400Mhz * clock and we have to compensate for this frequency * variation using corrected mult and shift values. */ fuse0 = IXGBE_READ_REG(hw, IXGBE_FUSES0_GROUP(0)); if (!(fuse0 & IXGBE_FUSES0_300MHZ)) { cc.mult = 3; cc.shift = 2; } fallthrough; case ixgbe_mac_x550em_a: case ixgbe_mac_X550: cc.read = ixgbe_ptp_read_X550; break; case ixgbe_mac_X540: cc.read = ixgbe_ptp_read_82599; ixgbe_ptp_link_speed_adjust(adapter, &cc.shift, &incval); IXGBE_WRITE_REG(hw, IXGBE_TIMINCA, incval); break; case ixgbe_mac_82599EB: cc.read = ixgbe_ptp_read_82599; ixgbe_ptp_link_speed_adjust(adapter, &cc.shift, &incval); incval >>= IXGBE_INCVAL_SHIFT_82599; cc.shift -= IXGBE_INCVAL_SHIFT_82599; IXGBE_WRITE_REG(hw, IXGBE_TIMINCA, BIT(IXGBE_INCPER_SHIFT_82599) | incval); break; default: /* other devices aren't supported */ return; } /* update the base incval used to calculate frequency adjustment */ WRITE_ONCE(adapter->base_incval, incval); smp_mb(); /* need lock to prevent incorrect read while modifying cyclecounter */ spin_lock_irqsave(&adapter->tmreg_lock, flags); memcpy(&adapter->hw_cc, &cc, sizeof(adapter->hw_cc)); spin_unlock_irqrestore(&adapter->tmreg_lock, flags); } /** * ixgbe_ptp_init_systime - Initialize SYSTIME registers * @adapter: the ixgbe private board structure * * Initialize and start the SYSTIME registers. */ static void ixgbe_ptp_init_systime(struct ixgbe_adapter *adapter) { struct ixgbe_hw *hw = &adapter->hw; u32 tsauxc; switch (hw->mac.type) { case ixgbe_mac_X550EM_x: case ixgbe_mac_x550em_a: case ixgbe_mac_X550: tsauxc = IXGBE_READ_REG(hw, IXGBE_TSAUXC); /* Reset SYSTIME registers to 0 */ IXGBE_WRITE_REG(hw, IXGBE_SYSTIMR, 0); IXGBE_WRITE_REG(hw, IXGBE_SYSTIML, 0); IXGBE_WRITE_REG(hw, IXGBE_SYSTIMH, 0); /* Reset interrupt settings */ IXGBE_WRITE_REG(hw, IXGBE_TSIM, IXGBE_TSIM_TXTS); IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMS_TIMESYNC); /* Activate the SYSTIME counter */ IXGBE_WRITE_REG(hw, IXGBE_TSAUXC, tsauxc & ~IXGBE_TSAUXC_DISABLE_SYSTIME); break; case ixgbe_mac_X540: case ixgbe_mac_82599EB: /* Reset SYSTIME registers to 0 */ IXGBE_WRITE_REG(hw, IXGBE_SYSTIML, 0); IXGBE_WRITE_REG(hw, IXGBE_SYSTIMH, 0); break; default: /* Other devices aren't supported */ return; } IXGBE_WRITE_FLUSH(hw); } /** * ixgbe_ptp_reset * @adapter: the ixgbe private board structure * * When the MAC resets, all the hardware bits for timesync are reset. This * function is used to re-enable the device for PTP based on current settings. * We do lose the current clock time, so just reset the cyclecounter to the * system real clock time. * * This function will maintain hwtstamp_config settings, and resets the SDP * output if it was enabled. */ void ixgbe_ptp_reset(struct ixgbe_adapter *adapter) { struct ixgbe_hw *hw = &adapter->hw; unsigned long flags; /* reset the hardware timestamping mode */ ixgbe_ptp_set_timestamp_mode(adapter, &adapter->tstamp_config); /* 82598 does not support PTP */ if (hw->mac.type == ixgbe_mac_82598EB) return; ixgbe_ptp_start_cyclecounter(adapter); ixgbe_ptp_init_systime(adapter); spin_lock_irqsave(&adapter->tmreg_lock, flags); timecounter_init(&adapter->hw_tc, &adapter->hw_cc, ktime_to_ns(ktime_get_real())); spin_unlock_irqrestore(&adapter->tmreg_lock, flags); adapter->last_overflow_check = jiffies; /* Now that the shift has been calculated and the systime * registers reset, (re-)enable the Clock out feature */ if (adapter->ptp_setup_sdp) adapter->ptp_setup_sdp(adapter); } /** * ixgbe_ptp_create_clock * @adapter: the ixgbe private adapter structure * * This function performs setup of the user entry point function table and * initializes the PTP clock device, which is used to access the clock-like * features of the PTP core. It will be called by ixgbe_ptp_init, and may * reuse a previously initialized clock (such as during a suspend/resume * cycle). */ static long ixgbe_ptp_create_clock(struct ixgbe_adapter *adapter) { struct net_device *netdev = adapter->netdev; long err; /* do nothing if we already have a clock device */ if (!IS_ERR_OR_NULL(adapter->ptp_clock)) return 0; switch (adapter->hw.mac.type) { case ixgbe_mac_X540: snprintf(adapter->ptp_caps.name, sizeof(adapter->ptp_caps.name), "%s", netdev->name); adapter->ptp_caps.owner = THIS_MODULE; adapter->ptp_caps.max_adj = 250000000; adapter->ptp_caps.n_alarm = 0; adapter->ptp_caps.n_ext_ts = 0; adapter->ptp_caps.n_per_out = 0; adapter->ptp_caps.pps = 1; adapter->ptp_caps.adjfine = ixgbe_ptp_adjfine_82599; adapter->ptp_caps.adjtime = ixgbe_ptp_adjtime; adapter->ptp_caps.gettimex64 = ixgbe_ptp_gettimex; adapter->ptp_caps.settime64 = ixgbe_ptp_settime; adapter->ptp_caps.enable = ixgbe_ptp_feature_enable; adapter->ptp_setup_sdp = ixgbe_ptp_setup_sdp_X540; break; case ixgbe_mac_82599EB: snprintf(adapter->ptp_caps.name, sizeof(adapter->ptp_caps.name), "%s", netdev->name); adapter->ptp_caps.owner = THIS_MODULE; adapter->ptp_caps.max_adj = 250000000; adapter->ptp_caps.n_alarm = 0; adapter->ptp_caps.n_ext_ts = 0; adapter->ptp_caps.n_per_out = 0; adapter->ptp_caps.pps = 0; adapter->ptp_caps.adjfine = ixgbe_ptp_adjfine_82599; adapter->ptp_caps.adjtime = ixgbe_ptp_adjtime; adapter->ptp_caps.gettimex64 = ixgbe_ptp_gettimex; adapter->ptp_caps.settime64 = ixgbe_ptp_settime; adapter->ptp_caps.enable = ixgbe_ptp_feature_enable; break; case ixgbe_mac_X550: case ixgbe_mac_X550EM_x: case ixgbe_mac_x550em_a: snprintf(adapter->ptp_caps.name, 16, "%s", netdev->name); adapter->ptp_caps.owner = THIS_MODULE; adapter->ptp_caps.max_adj = 30000000; adapter->ptp_caps.n_alarm = 0; adapter->ptp_caps.n_ext_ts = 0; adapter->ptp_caps.n_per_out = 0; adapter->ptp_caps.pps = 1; adapter->ptp_caps.adjfine = ixgbe_ptp_adjfine_X550; adapter->ptp_caps.adjtime = ixgbe_ptp_adjtime; adapter->ptp_caps.gettimex64 = ixgbe_ptp_gettimex; adapter->ptp_caps.settime64 = ixgbe_ptp_settime; adapter->ptp_caps.enable = ixgbe_ptp_feature_enable; adapter->ptp_setup_sdp = ixgbe_ptp_setup_sdp_X550; break; default: adapter->ptp_clock = NULL; adapter->ptp_setup_sdp = NULL; return -EOPNOTSUPP; } adapter->ptp_clock = ptp_clock_register(&adapter->ptp_caps, &adapter->pdev->dev); if (IS_ERR(adapter->ptp_clock)) { err = PTR_ERR(adapter->ptp_clock); adapter->ptp_clock = NULL; e_dev_err("ptp_clock_register failed\n"); return err; } else if (adapter->ptp_clock) e_dev_info("registered PHC device on %s\n", netdev->name); /* set default timestamp mode to disabled here. We do this in * create_clock instead of init, because we don't want to override the * previous settings during a resume cycle. */ adapter->tstamp_config.rx_filter = HWTSTAMP_FILTER_NONE; adapter->tstamp_config.tx_type = HWTSTAMP_TX_OFF; return 0; } /** * ixgbe_ptp_init * @adapter: the ixgbe private adapter structure * * This function performs the required steps for enabling PTP * support. If PTP support has already been loaded it simply calls the * cyclecounter init routine and exits. */ void ixgbe_ptp_init(struct ixgbe_adapter *adapter) { /* initialize the spin lock first since we can't control when a user * will call the entry functions once we have initialized the clock * device */ spin_lock_init(&adapter->tmreg_lock); /* obtain a PTP device, or re-use an existing device */ if (ixgbe_ptp_create_clock(adapter)) return; /* we have a clock so we can initialize work now */ INIT_WORK(&adapter->ptp_tx_work, ixgbe_ptp_tx_hwtstamp_work); /* reset the PTP related hardware bits */ ixgbe_ptp_reset(adapter); /* enter the IXGBE_PTP_RUNNING state */ set_bit(__IXGBE_PTP_RUNNING, &adapter->state); return; } /** * ixgbe_ptp_suspend - stop PTP work items * @adapter: pointer to adapter struct * * this function suspends PTP activity, and prevents more PTP work from being * generated, but does not destroy the PTP clock device. */ void ixgbe_ptp_suspend(struct ixgbe_adapter *adapter) { /* Leave the IXGBE_PTP_RUNNING state. */ if (!test_and_clear_bit(__IXGBE_PTP_RUNNING, &adapter->state)) return; adapter->flags2 &= ~IXGBE_FLAG2_PTP_PPS_ENABLED; if (adapter->ptp_setup_sdp) adapter->ptp_setup_sdp(adapter); /* ensure that we cancel any pending PTP Tx work item in progress */ cancel_work_sync(&adapter->ptp_tx_work); ixgbe_ptp_clear_tx_timestamp(adapter); } /** * ixgbe_ptp_stop - close the PTP device * @adapter: pointer to adapter struct * * completely destroy the PTP device, should only be called when the device is * being fully closed. */ void ixgbe_ptp_stop(struct ixgbe_adapter *adapter) { /* first, suspend PTP activity */ ixgbe_ptp_suspend(adapter); /* disable the PTP clock device */ if (adapter->ptp_clock) { ptp_clock_unregister(adapter->ptp_clock); adapter->ptp_clock = NULL; e_dev_info("removed PHC on %s\n", adapter->netdev->name); } }
linux-master
drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
// SPDX-License-Identifier: GPL-2.0 /* Copyright(c) 1999 - 2018 Intel Corporation. */ #include <linux/pci.h> #include <linux/delay.h> #include <linux/sched.h> #include <linux/netdevice.h> #include "ixgbe.h" #include "ixgbe_common.h" #include "ixgbe_phy.h" static s32 ixgbe_acquire_eeprom(struct ixgbe_hw *hw); static s32 ixgbe_get_eeprom_semaphore(struct ixgbe_hw *hw); static void ixgbe_release_eeprom_semaphore(struct ixgbe_hw *hw); static s32 ixgbe_ready_eeprom(struct ixgbe_hw *hw); static void ixgbe_standby_eeprom(struct ixgbe_hw *hw); static void ixgbe_shift_out_eeprom_bits(struct ixgbe_hw *hw, u16 data, u16 count); static u16 ixgbe_shift_in_eeprom_bits(struct ixgbe_hw *hw, u16 count); static void ixgbe_raise_eeprom_clk(struct ixgbe_hw *hw, u32 *eec); static void ixgbe_lower_eeprom_clk(struct ixgbe_hw *hw, u32 *eec); static void ixgbe_release_eeprom(struct ixgbe_hw *hw); static s32 ixgbe_mta_vector(struct ixgbe_hw *hw, u8 *mc_addr); static s32 ixgbe_poll_eerd_eewr_done(struct ixgbe_hw *hw, u32 ee_reg); static s32 ixgbe_read_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, u16 offset, u16 words, u16 *data); static s32 ixgbe_write_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, u16 offset, u16 words, u16 *data); static s32 ixgbe_detect_eeprom_page_size_generic(struct ixgbe_hw *hw, u16 offset); static s32 ixgbe_disable_pcie_primary(struct ixgbe_hw *hw); /* Base table for registers values that change by MAC */ const u32 ixgbe_mvals_8259X[IXGBE_MVALS_IDX_LIMIT] = { IXGBE_MVALS_INIT(8259X) }; /** * ixgbe_device_supports_autoneg_fc - Check if phy supports autoneg flow * control * @hw: pointer to hardware structure * * There are several phys that do not support autoneg flow control. This * function check the device id to see if the associated phy supports * autoneg flow control. **/ bool ixgbe_device_supports_autoneg_fc(struct ixgbe_hw *hw) { bool supported = false; ixgbe_link_speed speed; bool link_up; switch (hw->phy.media_type) { case ixgbe_media_type_fiber: /* flow control autoneg black list */ switch (hw->device_id) { case IXGBE_DEV_ID_X550EM_A_SFP: case IXGBE_DEV_ID_X550EM_A_SFP_N: supported = false; break; default: hw->mac.ops.check_link(hw, &speed, &link_up, false); /* if link is down, assume supported */ if (link_up) supported = speed == IXGBE_LINK_SPEED_1GB_FULL; else supported = true; } break; case ixgbe_media_type_backplane: if (hw->device_id == IXGBE_DEV_ID_X550EM_X_XFI) supported = false; else supported = true; break; case ixgbe_media_type_copper: /* only some copper devices support flow control autoneg */ switch (hw->device_id) { case IXGBE_DEV_ID_82599_T3_LOM: case IXGBE_DEV_ID_X540T: case IXGBE_DEV_ID_X540T1: case IXGBE_DEV_ID_X550T: case IXGBE_DEV_ID_X550T1: case IXGBE_DEV_ID_X550EM_X_10G_T: case IXGBE_DEV_ID_X550EM_A_10G_T: case IXGBE_DEV_ID_X550EM_A_1G_T: case IXGBE_DEV_ID_X550EM_A_1G_T_L: supported = true; break; default: break; } break; default: break; } if (!supported) hw_dbg(hw, "Device %x does not support flow control autoneg\n", hw->device_id); return supported; } /** * ixgbe_setup_fc_generic - Set up flow control * @hw: pointer to hardware structure * * Called at init time to set up flow control. **/ s32 ixgbe_setup_fc_generic(struct ixgbe_hw *hw) { s32 ret_val = 0; u32 reg = 0, reg_bp = 0; u16 reg_cu = 0; bool locked = false; /* * Validate the requested mode. Strict IEEE mode does not allow * ixgbe_fc_rx_pause because it will cause us to fail at UNH. */ if (hw->fc.strict_ieee && hw->fc.requested_mode == ixgbe_fc_rx_pause) { hw_dbg(hw, "ixgbe_fc_rx_pause not valid in strict IEEE mode\n"); return IXGBE_ERR_INVALID_LINK_SETTINGS; } /* * 10gig parts do not have a word in the EEPROM to determine the * default flow control setting, so we explicitly set it to full. */ if (hw->fc.requested_mode == ixgbe_fc_default) hw->fc.requested_mode = ixgbe_fc_full; /* * Set up the 1G and 10G flow control advertisement registers so the * HW will be able to do fc autoneg once the cable is plugged in. If * we link at 10G, the 1G advertisement is harmless and vice versa. */ switch (hw->phy.media_type) { case ixgbe_media_type_backplane: /* some MAC's need RMW protection on AUTOC */ ret_val = hw->mac.ops.prot_autoc_read(hw, &locked, &reg_bp); if (ret_val) return ret_val; fallthrough; /* only backplane uses autoc */ case ixgbe_media_type_fiber: reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANA); break; case ixgbe_media_type_copper: hw->phy.ops.read_reg(hw, MDIO_AN_ADVERTISE, MDIO_MMD_AN, &reg_cu); break; default: break; } /* * The possible values of fc.requested_mode are: * 0: Flow control is completely disabled * 1: Rx flow control is enabled (we can receive pause frames, * but not send pause frames). * 2: Tx flow control is enabled (we can send pause frames but * we do not support receiving pause frames). * 3: Both Rx and Tx flow control (symmetric) are enabled. * other: Invalid. */ switch (hw->fc.requested_mode) { case ixgbe_fc_none: /* Flow control completely disabled by software override. */ reg &= ~(IXGBE_PCS1GANA_SYM_PAUSE | IXGBE_PCS1GANA_ASM_PAUSE); if (hw->phy.media_type == ixgbe_media_type_backplane) reg_bp &= ~(IXGBE_AUTOC_SYM_PAUSE | IXGBE_AUTOC_ASM_PAUSE); else if (hw->phy.media_type == ixgbe_media_type_copper) reg_cu &= ~(IXGBE_TAF_SYM_PAUSE | IXGBE_TAF_ASM_PAUSE); break; case ixgbe_fc_tx_pause: /* * Tx Flow control is enabled, and Rx Flow control is * disabled by software override. */ reg |= IXGBE_PCS1GANA_ASM_PAUSE; reg &= ~IXGBE_PCS1GANA_SYM_PAUSE; if (hw->phy.media_type == ixgbe_media_type_backplane) { reg_bp |= IXGBE_AUTOC_ASM_PAUSE; reg_bp &= ~IXGBE_AUTOC_SYM_PAUSE; } else if (hw->phy.media_type == ixgbe_media_type_copper) { reg_cu |= IXGBE_TAF_ASM_PAUSE; reg_cu &= ~IXGBE_TAF_SYM_PAUSE; } break; case ixgbe_fc_rx_pause: /* * Rx Flow control is enabled and Tx Flow control is * disabled by software override. Since there really * isn't a way to advertise that we are capable of RX * Pause ONLY, we will advertise that we support both * symmetric and asymmetric Rx PAUSE, as such we fall * through to the fc_full statement. Later, we will * disable the adapter's ability to send PAUSE frames. */ case ixgbe_fc_full: /* Flow control (both Rx and Tx) is enabled by SW override. */ reg |= IXGBE_PCS1GANA_SYM_PAUSE | IXGBE_PCS1GANA_ASM_PAUSE; if (hw->phy.media_type == ixgbe_media_type_backplane) reg_bp |= IXGBE_AUTOC_SYM_PAUSE | IXGBE_AUTOC_ASM_PAUSE; else if (hw->phy.media_type == ixgbe_media_type_copper) reg_cu |= IXGBE_TAF_SYM_PAUSE | IXGBE_TAF_ASM_PAUSE; break; default: hw_dbg(hw, "Flow control param set incorrectly\n"); return IXGBE_ERR_CONFIG; } if (hw->mac.type != ixgbe_mac_X540) { /* * Enable auto-negotiation between the MAC & PHY; * the MAC will advertise clause 37 flow control. */ IXGBE_WRITE_REG(hw, IXGBE_PCS1GANA, reg); reg = IXGBE_READ_REG(hw, IXGBE_PCS1GLCTL); /* Disable AN timeout */ if (hw->fc.strict_ieee) reg &= ~IXGBE_PCS1GLCTL_AN_1G_TIMEOUT_EN; IXGBE_WRITE_REG(hw, IXGBE_PCS1GLCTL, reg); hw_dbg(hw, "Set up FC; PCS1GLCTL = 0x%08X\n", reg); } /* * AUTOC restart handles negotiation of 1G and 10G on backplane * and copper. There is no need to set the PCS1GCTL register. * */ if (hw->phy.media_type == ixgbe_media_type_backplane) { /* Need the SW/FW semaphore around AUTOC writes if 82599 and * LESM is on, likewise reset_pipeline requries the lock as * it also writes AUTOC. */ ret_val = hw->mac.ops.prot_autoc_write(hw, reg_bp, locked); if (ret_val) return ret_val; } else if ((hw->phy.media_type == ixgbe_media_type_copper) && ixgbe_device_supports_autoneg_fc(hw)) { hw->phy.ops.write_reg(hw, MDIO_AN_ADVERTISE, MDIO_MMD_AN, reg_cu); } hw_dbg(hw, "Set up FC; IXGBE_AUTOC = 0x%08X\n", reg); return ret_val; } /** * ixgbe_start_hw_generic - Prepare hardware for Tx/Rx * @hw: pointer to hardware structure * * Starts the hardware by filling the bus info structure and media type, clears * all on chip counters, initializes receive address registers, multicast * table, VLAN filter table, calls routine to set up link and flow control * settings, and leaves transmit and receive units disabled and uninitialized **/ s32 ixgbe_start_hw_generic(struct ixgbe_hw *hw) { s32 ret_val; u32 ctrl_ext; u16 device_caps; /* Set the media type */ hw->phy.media_type = hw->mac.ops.get_media_type(hw); /* Identify the PHY */ hw->phy.ops.identify(hw); /* Clear the VLAN filter table */ hw->mac.ops.clear_vfta(hw); /* Clear statistics registers */ hw->mac.ops.clear_hw_cntrs(hw); /* Set No Snoop Disable */ ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT); ctrl_ext |= IXGBE_CTRL_EXT_NS_DIS; IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext); IXGBE_WRITE_FLUSH(hw); /* Setup flow control if method for doing so */ if (hw->mac.ops.setup_fc) { ret_val = hw->mac.ops.setup_fc(hw); if (ret_val) return ret_val; } /* Cashe bit indicating need for crosstalk fix */ switch (hw->mac.type) { case ixgbe_mac_82599EB: case ixgbe_mac_X550EM_x: case ixgbe_mac_x550em_a: hw->mac.ops.get_device_caps(hw, &device_caps); if (device_caps & IXGBE_DEVICE_CAPS_NO_CROSSTALK_WR) hw->need_crosstalk_fix = false; else hw->need_crosstalk_fix = true; break; default: hw->need_crosstalk_fix = false; break; } /* Clear adapter stopped flag */ hw->adapter_stopped = false; return 0; } /** * ixgbe_start_hw_gen2 - Init sequence for common device family * @hw: pointer to hw structure * * Performs the init sequence common to the second generation * of 10 GbE devices. * Devices in the second generation: * 82599 * X540 **/ s32 ixgbe_start_hw_gen2(struct ixgbe_hw *hw) { u32 i; /* Clear the rate limiters */ for (i = 0; i < hw->mac.max_tx_queues; i++) { IXGBE_WRITE_REG(hw, IXGBE_RTTDQSEL, i); IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRC, 0); } IXGBE_WRITE_FLUSH(hw); return 0; } /** * ixgbe_init_hw_generic - Generic hardware initialization * @hw: pointer to hardware structure * * Initialize the hardware by resetting the hardware, filling the bus info * structure and media type, clears all on chip counters, initializes receive * address registers, multicast table, VLAN filter table, calls routine to set * up link and flow control settings, and leaves transmit and receive units * disabled and uninitialized **/ s32 ixgbe_init_hw_generic(struct ixgbe_hw *hw) { s32 status; /* Reset the hardware */ status = hw->mac.ops.reset_hw(hw); if (status == 0) { /* Start the HW */ status = hw->mac.ops.start_hw(hw); } /* Initialize the LED link active for LED blink support */ if (hw->mac.ops.init_led_link_act) hw->mac.ops.init_led_link_act(hw); return status; } /** * ixgbe_clear_hw_cntrs_generic - Generic clear hardware counters * @hw: pointer to hardware structure * * Clears all hardware statistics counters by reading them from the hardware * Statistics counters are clear on read. **/ s32 ixgbe_clear_hw_cntrs_generic(struct ixgbe_hw *hw) { u16 i = 0; IXGBE_READ_REG(hw, IXGBE_CRCERRS); IXGBE_READ_REG(hw, IXGBE_ILLERRC); IXGBE_READ_REG(hw, IXGBE_ERRBC); IXGBE_READ_REG(hw, IXGBE_MSPDC); for (i = 0; i < 8; i++) IXGBE_READ_REG(hw, IXGBE_MPC(i)); IXGBE_READ_REG(hw, IXGBE_MLFC); IXGBE_READ_REG(hw, IXGBE_MRFC); IXGBE_READ_REG(hw, IXGBE_RLEC); IXGBE_READ_REG(hw, IXGBE_LXONTXC); IXGBE_READ_REG(hw, IXGBE_LXOFFTXC); if (hw->mac.type >= ixgbe_mac_82599EB) { IXGBE_READ_REG(hw, IXGBE_LXONRXCNT); IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT); } else { IXGBE_READ_REG(hw, IXGBE_LXONRXC); IXGBE_READ_REG(hw, IXGBE_LXOFFRXC); } for (i = 0; i < 8; i++) { IXGBE_READ_REG(hw, IXGBE_PXONTXC(i)); IXGBE_READ_REG(hw, IXGBE_PXOFFTXC(i)); if (hw->mac.type >= ixgbe_mac_82599EB) { IXGBE_READ_REG(hw, IXGBE_PXONRXCNT(i)); IXGBE_READ_REG(hw, IXGBE_PXOFFRXCNT(i)); } else { IXGBE_READ_REG(hw, IXGBE_PXONRXC(i)); IXGBE_READ_REG(hw, IXGBE_PXOFFRXC(i)); } } if (hw->mac.type >= ixgbe_mac_82599EB) for (i = 0; i < 8; i++) IXGBE_READ_REG(hw, IXGBE_PXON2OFFCNT(i)); IXGBE_READ_REG(hw, IXGBE_PRC64); IXGBE_READ_REG(hw, IXGBE_PRC127); IXGBE_READ_REG(hw, IXGBE_PRC255); IXGBE_READ_REG(hw, IXGBE_PRC511); IXGBE_READ_REG(hw, IXGBE_PRC1023); IXGBE_READ_REG(hw, IXGBE_PRC1522); IXGBE_READ_REG(hw, IXGBE_GPRC); IXGBE_READ_REG(hw, IXGBE_BPRC); IXGBE_READ_REG(hw, IXGBE_MPRC); IXGBE_READ_REG(hw, IXGBE_GPTC); IXGBE_READ_REG(hw, IXGBE_GORCL); IXGBE_READ_REG(hw, IXGBE_GORCH); IXGBE_READ_REG(hw, IXGBE_GOTCL); IXGBE_READ_REG(hw, IXGBE_GOTCH); if (hw->mac.type == ixgbe_mac_82598EB) for (i = 0; i < 8; i++) IXGBE_READ_REG(hw, IXGBE_RNBC(i)); IXGBE_READ_REG(hw, IXGBE_RUC); IXGBE_READ_REG(hw, IXGBE_RFC); IXGBE_READ_REG(hw, IXGBE_ROC); IXGBE_READ_REG(hw, IXGBE_RJC); IXGBE_READ_REG(hw, IXGBE_MNGPRC); IXGBE_READ_REG(hw, IXGBE_MNGPDC); IXGBE_READ_REG(hw, IXGBE_MNGPTC); IXGBE_READ_REG(hw, IXGBE_TORL); IXGBE_READ_REG(hw, IXGBE_TORH); IXGBE_READ_REG(hw, IXGBE_TPR); IXGBE_READ_REG(hw, IXGBE_TPT); IXGBE_READ_REG(hw, IXGBE_PTC64); IXGBE_READ_REG(hw, IXGBE_PTC127); IXGBE_READ_REG(hw, IXGBE_PTC255); IXGBE_READ_REG(hw, IXGBE_PTC511); IXGBE_READ_REG(hw, IXGBE_PTC1023); IXGBE_READ_REG(hw, IXGBE_PTC1522); IXGBE_READ_REG(hw, IXGBE_MPTC); IXGBE_READ_REG(hw, IXGBE_BPTC); for (i = 0; i < 16; i++) { IXGBE_READ_REG(hw, IXGBE_QPRC(i)); IXGBE_READ_REG(hw, IXGBE_QPTC(i)); if (hw->mac.type >= ixgbe_mac_82599EB) { IXGBE_READ_REG(hw, IXGBE_QBRC_L(i)); IXGBE_READ_REG(hw, IXGBE_QBRC_H(i)); IXGBE_READ_REG(hw, IXGBE_QBTC_L(i)); IXGBE_READ_REG(hw, IXGBE_QBTC_H(i)); IXGBE_READ_REG(hw, IXGBE_QPRDC(i)); } else { IXGBE_READ_REG(hw, IXGBE_QBRC(i)); IXGBE_READ_REG(hw, IXGBE_QBTC(i)); } } if (hw->mac.type == ixgbe_mac_X550 || hw->mac.type == ixgbe_mac_X540) { if (hw->phy.id == 0) hw->phy.ops.identify(hw); hw->phy.ops.read_reg(hw, IXGBE_PCRC8ECL, MDIO_MMD_PCS, &i); hw->phy.ops.read_reg(hw, IXGBE_PCRC8ECH, MDIO_MMD_PCS, &i); hw->phy.ops.read_reg(hw, IXGBE_LDPCECL, MDIO_MMD_PCS, &i); hw->phy.ops.read_reg(hw, IXGBE_LDPCECH, MDIO_MMD_PCS, &i); } return 0; } /** * ixgbe_read_pba_string_generic - Reads part number string from EEPROM * @hw: pointer to hardware structure * @pba_num: stores the part number string from the EEPROM * @pba_num_size: part number string buffer length * * Reads the part number string from the EEPROM. **/ s32 ixgbe_read_pba_string_generic(struct ixgbe_hw *hw, u8 *pba_num, u32 pba_num_size) { s32 ret_val; u16 data; u16 pba_ptr; u16 offset; u16 length; if (pba_num == NULL) { hw_dbg(hw, "PBA string buffer was null\n"); return IXGBE_ERR_INVALID_ARGUMENT; } ret_val = hw->eeprom.ops.read(hw, IXGBE_PBANUM0_PTR, &data); if (ret_val) { hw_dbg(hw, "NVM Read Error\n"); return ret_val; } ret_val = hw->eeprom.ops.read(hw, IXGBE_PBANUM1_PTR, &pba_ptr); if (ret_val) { hw_dbg(hw, "NVM Read Error\n"); return ret_val; } /* * if data is not ptr guard the PBA must be in legacy format which * means pba_ptr is actually our second data word for the PBA number * and we can decode it into an ascii string */ if (data != IXGBE_PBANUM_PTR_GUARD) { hw_dbg(hw, "NVM PBA number is not stored as string\n"); /* we will need 11 characters to store the PBA */ if (pba_num_size < 11) { hw_dbg(hw, "PBA string buffer too small\n"); return IXGBE_ERR_NO_SPACE; } /* extract hex string from data and pba_ptr */ pba_num[0] = (data >> 12) & 0xF; pba_num[1] = (data >> 8) & 0xF; pba_num[2] = (data >> 4) & 0xF; pba_num[3] = data & 0xF; pba_num[4] = (pba_ptr >> 12) & 0xF; pba_num[5] = (pba_ptr >> 8) & 0xF; pba_num[6] = '-'; pba_num[7] = 0; pba_num[8] = (pba_ptr >> 4) & 0xF; pba_num[9] = pba_ptr & 0xF; /* put a null character on the end of our string */ pba_num[10] = '\0'; /* switch all the data but the '-' to hex char */ for (offset = 0; offset < 10; offset++) { if (pba_num[offset] < 0xA) pba_num[offset] += '0'; else if (pba_num[offset] < 0x10) pba_num[offset] += 'A' - 0xA; } return 0; } ret_val = hw->eeprom.ops.read(hw, pba_ptr, &length); if (ret_val) { hw_dbg(hw, "NVM Read Error\n"); return ret_val; } if (length == 0xFFFF || length == 0) { hw_dbg(hw, "NVM PBA number section invalid length\n"); return IXGBE_ERR_PBA_SECTION; } /* check if pba_num buffer is big enough */ if (pba_num_size < (((u32)length * 2) - 1)) { hw_dbg(hw, "PBA string buffer too small\n"); return IXGBE_ERR_NO_SPACE; } /* trim pba length from start of string */ pba_ptr++; length--; for (offset = 0; offset < length; offset++) { ret_val = hw->eeprom.ops.read(hw, pba_ptr + offset, &data); if (ret_val) { hw_dbg(hw, "NVM Read Error\n"); return ret_val; } pba_num[offset * 2] = (u8)(data >> 8); pba_num[(offset * 2) + 1] = (u8)(data & 0xFF); } pba_num[offset * 2] = '\0'; return 0; } /** * ixgbe_get_mac_addr_generic - Generic get MAC address * @hw: pointer to hardware structure * @mac_addr: Adapter MAC address * * Reads the adapter's MAC address from first Receive Address Register (RAR0) * A reset of the adapter must be performed prior to calling this function * in order for the MAC address to have been loaded from the EEPROM into RAR0 **/ s32 ixgbe_get_mac_addr_generic(struct ixgbe_hw *hw, u8 *mac_addr) { u32 rar_high; u32 rar_low; u16 i; rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(0)); rar_low = IXGBE_READ_REG(hw, IXGBE_RAL(0)); for (i = 0; i < 4; i++) mac_addr[i] = (u8)(rar_low >> (i*8)); for (i = 0; i < 2; i++) mac_addr[i+4] = (u8)(rar_high >> (i*8)); return 0; } enum ixgbe_bus_width ixgbe_convert_bus_width(u16 link_status) { switch (link_status & IXGBE_PCI_LINK_WIDTH) { case IXGBE_PCI_LINK_WIDTH_1: return ixgbe_bus_width_pcie_x1; case IXGBE_PCI_LINK_WIDTH_2: return ixgbe_bus_width_pcie_x2; case IXGBE_PCI_LINK_WIDTH_4: return ixgbe_bus_width_pcie_x4; case IXGBE_PCI_LINK_WIDTH_8: return ixgbe_bus_width_pcie_x8; default: return ixgbe_bus_width_unknown; } } enum ixgbe_bus_speed ixgbe_convert_bus_speed(u16 link_status) { switch (link_status & IXGBE_PCI_LINK_SPEED) { case IXGBE_PCI_LINK_SPEED_2500: return ixgbe_bus_speed_2500; case IXGBE_PCI_LINK_SPEED_5000: return ixgbe_bus_speed_5000; case IXGBE_PCI_LINK_SPEED_8000: return ixgbe_bus_speed_8000; default: return ixgbe_bus_speed_unknown; } } /** * ixgbe_get_bus_info_generic - Generic set PCI bus info * @hw: pointer to hardware structure * * Sets the PCI bus info (speed, width, type) within the ixgbe_hw structure **/ s32 ixgbe_get_bus_info_generic(struct ixgbe_hw *hw) { u16 link_status; hw->bus.type = ixgbe_bus_type_pci_express; /* Get the negotiated link width and speed from PCI config space */ link_status = ixgbe_read_pci_cfg_word(hw, IXGBE_PCI_LINK_STATUS); hw->bus.width = ixgbe_convert_bus_width(link_status); hw->bus.speed = ixgbe_convert_bus_speed(link_status); hw->mac.ops.set_lan_id(hw); return 0; } /** * ixgbe_set_lan_id_multi_port_pcie - Set LAN id for PCIe multiple port devices * @hw: pointer to the HW structure * * Determines the LAN function id by reading memory-mapped registers * and swaps the port value if requested. **/ void ixgbe_set_lan_id_multi_port_pcie(struct ixgbe_hw *hw) { struct ixgbe_bus_info *bus = &hw->bus; u16 ee_ctrl_4; u32 reg; reg = IXGBE_READ_REG(hw, IXGBE_STATUS); bus->func = (reg & IXGBE_STATUS_LAN_ID) >> IXGBE_STATUS_LAN_ID_SHIFT; bus->lan_id = bus->func; /* check for a port swap */ reg = IXGBE_READ_REG(hw, IXGBE_FACTPS(hw)); if (reg & IXGBE_FACTPS_LFS) bus->func ^= 0x1; /* Get MAC instance from EEPROM for configuring CS4227 */ if (hw->device_id == IXGBE_DEV_ID_X550EM_A_SFP) { hw->eeprom.ops.read(hw, IXGBE_EEPROM_CTRL_4, &ee_ctrl_4); bus->instance_id = (ee_ctrl_4 & IXGBE_EE_CTRL_4_INST_ID) >> IXGBE_EE_CTRL_4_INST_ID_SHIFT; } } /** * ixgbe_stop_adapter_generic - Generic stop Tx/Rx units * @hw: pointer to hardware structure * * Sets the adapter_stopped flag within ixgbe_hw struct. Clears interrupts, * disables transmit and receive units. The adapter_stopped flag is used by * the shared code and drivers to determine if the adapter is in a stopped * state and should not touch the hardware. **/ s32 ixgbe_stop_adapter_generic(struct ixgbe_hw *hw) { u32 reg_val; u16 i; /* * Set the adapter_stopped flag so other driver functions stop touching * the hardware */ hw->adapter_stopped = true; /* Disable the receive unit */ hw->mac.ops.disable_rx(hw); /* Clear interrupt mask to stop interrupts from being generated */ IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_IRQ_CLEAR_MASK); /* Clear any pending interrupts, flush previous writes */ IXGBE_READ_REG(hw, IXGBE_EICR); /* Disable the transmit unit. Each queue must be disabled. */ for (i = 0; i < hw->mac.max_tx_queues; i++) IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(i), IXGBE_TXDCTL_SWFLSH); /* Disable the receive unit by stopping each queue */ for (i = 0; i < hw->mac.max_rx_queues; i++) { reg_val = IXGBE_READ_REG(hw, IXGBE_RXDCTL(i)); reg_val &= ~IXGBE_RXDCTL_ENABLE; reg_val |= IXGBE_RXDCTL_SWFLSH; IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(i), reg_val); } /* flush all queues disables */ IXGBE_WRITE_FLUSH(hw); usleep_range(1000, 2000); /* * Prevent the PCI-E bus from hanging by disabling PCI-E primary * access and verify no pending requests */ return ixgbe_disable_pcie_primary(hw); } /** * ixgbe_init_led_link_act_generic - Store the LED index link/activity. * @hw: pointer to hardware structure * * Store the index for the link active LED. This will be used to support * blinking the LED. **/ s32 ixgbe_init_led_link_act_generic(struct ixgbe_hw *hw) { struct ixgbe_mac_info *mac = &hw->mac; u32 led_reg, led_mode; u16 i; led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL); /* Get LED link active from the LEDCTL register */ for (i = 0; i < 4; i++) { led_mode = led_reg >> IXGBE_LED_MODE_SHIFT(i); if ((led_mode & IXGBE_LED_MODE_MASK_BASE) == IXGBE_LED_LINK_ACTIVE) { mac->led_link_act = i; return 0; } } /* If LEDCTL register does not have the LED link active set, then use * known MAC defaults. */ switch (hw->mac.type) { case ixgbe_mac_x550em_a: mac->led_link_act = 0; break; case ixgbe_mac_X550EM_x: mac->led_link_act = 1; break; default: mac->led_link_act = 2; } return 0; } /** * ixgbe_led_on_generic - Turns on the software controllable LEDs. * @hw: pointer to hardware structure * @index: led number to turn on **/ s32 ixgbe_led_on_generic(struct ixgbe_hw *hw, u32 index) { u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL); if (index > 3) return IXGBE_ERR_PARAM; /* To turn on the LED, set mode to ON. */ led_reg &= ~IXGBE_LED_MODE_MASK(index); led_reg |= IXGBE_LED_ON << IXGBE_LED_MODE_SHIFT(index); IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg); IXGBE_WRITE_FLUSH(hw); return 0; } /** * ixgbe_led_off_generic - Turns off the software controllable LEDs. * @hw: pointer to hardware structure * @index: led number to turn off **/ s32 ixgbe_led_off_generic(struct ixgbe_hw *hw, u32 index) { u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL); if (index > 3) return IXGBE_ERR_PARAM; /* To turn off the LED, set mode to OFF. */ led_reg &= ~IXGBE_LED_MODE_MASK(index); led_reg |= IXGBE_LED_OFF << IXGBE_LED_MODE_SHIFT(index); IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg); IXGBE_WRITE_FLUSH(hw); return 0; } /** * ixgbe_init_eeprom_params_generic - Initialize EEPROM params * @hw: pointer to hardware structure * * Initializes the EEPROM parameters ixgbe_eeprom_info within the * ixgbe_hw struct in order to set up EEPROM access. **/ s32 ixgbe_init_eeprom_params_generic(struct ixgbe_hw *hw) { struct ixgbe_eeprom_info *eeprom = &hw->eeprom; u32 eec; u16 eeprom_size; if (eeprom->type == ixgbe_eeprom_uninitialized) { eeprom->type = ixgbe_eeprom_none; /* Set default semaphore delay to 10ms which is a well * tested value */ eeprom->semaphore_delay = 10; /* Clear EEPROM page size, it will be initialized as needed */ eeprom->word_page_size = 0; /* * Check for EEPROM present first. * If not present leave as none */ eec = IXGBE_READ_REG(hw, IXGBE_EEC(hw)); if (eec & IXGBE_EEC_PRES) { eeprom->type = ixgbe_eeprom_spi; /* * SPI EEPROM is assumed here. This code would need to * change if a future EEPROM is not SPI. */ eeprom_size = (u16)((eec & IXGBE_EEC_SIZE) >> IXGBE_EEC_SIZE_SHIFT); eeprom->word_size = BIT(eeprom_size + IXGBE_EEPROM_WORD_SIZE_SHIFT); } if (eec & IXGBE_EEC_ADDR_SIZE) eeprom->address_bits = 16; else eeprom->address_bits = 8; hw_dbg(hw, "Eeprom params: type = %d, size = %d, address bits: %d\n", eeprom->type, eeprom->word_size, eeprom->address_bits); } return 0; } /** * ixgbe_write_eeprom_buffer_bit_bang_generic - Write EEPROM using bit-bang * @hw: pointer to hardware structure * @offset: offset within the EEPROM to write * @words: number of words * @data: 16 bit word(s) to write to EEPROM * * Reads 16 bit word(s) from EEPROM through bit-bang method **/ s32 ixgbe_write_eeprom_buffer_bit_bang_generic(struct ixgbe_hw *hw, u16 offset, u16 words, u16 *data) { s32 status; u16 i, count; hw->eeprom.ops.init_params(hw); if (words == 0) return IXGBE_ERR_INVALID_ARGUMENT; if (offset + words > hw->eeprom.word_size) return IXGBE_ERR_EEPROM; /* * The EEPROM page size cannot be queried from the chip. We do lazy * initialization. It is worth to do that when we write large buffer. */ if ((hw->eeprom.word_page_size == 0) && (words > IXGBE_EEPROM_PAGE_SIZE_MAX)) ixgbe_detect_eeprom_page_size_generic(hw, offset); /* * We cannot hold synchronization semaphores for too long * to avoid other entity starvation. However it is more efficient * to read in bursts than synchronizing access for each word. */ for (i = 0; i < words; i += IXGBE_EEPROM_RD_BUFFER_MAX_COUNT) { count = (words - i) / IXGBE_EEPROM_RD_BUFFER_MAX_COUNT > 0 ? IXGBE_EEPROM_RD_BUFFER_MAX_COUNT : (words - i); status = ixgbe_write_eeprom_buffer_bit_bang(hw, offset + i, count, &data[i]); if (status != 0) break; } return status; } /** * ixgbe_write_eeprom_buffer_bit_bang - Writes 16 bit word(s) to EEPROM * @hw: pointer to hardware structure * @offset: offset within the EEPROM to be written to * @words: number of word(s) * @data: 16 bit word(s) to be written to the EEPROM * * If ixgbe_eeprom_update_checksum is not called after this function, the * EEPROM will most likely contain an invalid checksum. **/ static s32 ixgbe_write_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, u16 offset, u16 words, u16 *data) { s32 status; u16 word; u16 page_size; u16 i; u8 write_opcode = IXGBE_EEPROM_WRITE_OPCODE_SPI; /* Prepare the EEPROM for writing */ status = ixgbe_acquire_eeprom(hw); if (status) return status; if (ixgbe_ready_eeprom(hw) != 0) { ixgbe_release_eeprom(hw); return IXGBE_ERR_EEPROM; } for (i = 0; i < words; i++) { ixgbe_standby_eeprom(hw); /* Send the WRITE ENABLE command (8 bit opcode) */ ixgbe_shift_out_eeprom_bits(hw, IXGBE_EEPROM_WREN_OPCODE_SPI, IXGBE_EEPROM_OPCODE_BITS); ixgbe_standby_eeprom(hw); /* Some SPI eeproms use the 8th address bit embedded * in the opcode */ if ((hw->eeprom.address_bits == 8) && ((offset + i) >= 128)) write_opcode |= IXGBE_EEPROM_A8_OPCODE_SPI; /* Send the Write command (8-bit opcode + addr) */ ixgbe_shift_out_eeprom_bits(hw, write_opcode, IXGBE_EEPROM_OPCODE_BITS); ixgbe_shift_out_eeprom_bits(hw, (u16)((offset + i) * 2), hw->eeprom.address_bits); page_size = hw->eeprom.word_page_size; /* Send the data in burst via SPI */ do { word = data[i]; word = (word >> 8) | (word << 8); ixgbe_shift_out_eeprom_bits(hw, word, 16); if (page_size == 0) break; /* do not wrap around page */ if (((offset + i) & (page_size - 1)) == (page_size - 1)) break; } while (++i < words); ixgbe_standby_eeprom(hw); usleep_range(10000, 20000); } /* Done with writing - release the EEPROM */ ixgbe_release_eeprom(hw); return 0; } /** * ixgbe_write_eeprom_generic - Writes 16 bit value to EEPROM * @hw: pointer to hardware structure * @offset: offset within the EEPROM to be written to * @data: 16 bit word to be written to the EEPROM * * If ixgbe_eeprom_update_checksum is not called after this function, the * EEPROM will most likely contain an invalid checksum. **/ s32 ixgbe_write_eeprom_generic(struct ixgbe_hw *hw, u16 offset, u16 data) { hw->eeprom.ops.init_params(hw); if (offset >= hw->eeprom.word_size) return IXGBE_ERR_EEPROM; return ixgbe_write_eeprom_buffer_bit_bang(hw, offset, 1, &data); } /** * ixgbe_read_eeprom_buffer_bit_bang_generic - Read EEPROM using bit-bang * @hw: pointer to hardware structure * @offset: offset within the EEPROM to be read * @words: number of word(s) * @data: read 16 bit words(s) from EEPROM * * Reads 16 bit word(s) from EEPROM through bit-bang method **/ s32 ixgbe_read_eeprom_buffer_bit_bang_generic(struct ixgbe_hw *hw, u16 offset, u16 words, u16 *data) { s32 status; u16 i, count; hw->eeprom.ops.init_params(hw); if (words == 0) return IXGBE_ERR_INVALID_ARGUMENT; if (offset + words > hw->eeprom.word_size) return IXGBE_ERR_EEPROM; /* * We cannot hold synchronization semaphores for too long * to avoid other entity starvation. However it is more efficient * to read in bursts than synchronizing access for each word. */ for (i = 0; i < words; i += IXGBE_EEPROM_RD_BUFFER_MAX_COUNT) { count = (words - i) / IXGBE_EEPROM_RD_BUFFER_MAX_COUNT > 0 ? IXGBE_EEPROM_RD_BUFFER_MAX_COUNT : (words - i); status = ixgbe_read_eeprom_buffer_bit_bang(hw, offset + i, count, &data[i]); if (status) return status; } return 0; } /** * ixgbe_read_eeprom_buffer_bit_bang - Read EEPROM using bit-bang * @hw: pointer to hardware structure * @offset: offset within the EEPROM to be read * @words: number of word(s) * @data: read 16 bit word(s) from EEPROM * * Reads 16 bit word(s) from EEPROM through bit-bang method **/ static s32 ixgbe_read_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, u16 offset, u16 words, u16 *data) { s32 status; u16 word_in; u8 read_opcode = IXGBE_EEPROM_READ_OPCODE_SPI; u16 i; /* Prepare the EEPROM for reading */ status = ixgbe_acquire_eeprom(hw); if (status) return status; if (ixgbe_ready_eeprom(hw) != 0) { ixgbe_release_eeprom(hw); return IXGBE_ERR_EEPROM; } for (i = 0; i < words; i++) { ixgbe_standby_eeprom(hw); /* Some SPI eeproms use the 8th address bit embedded * in the opcode */ if ((hw->eeprom.address_bits == 8) && ((offset + i) >= 128)) read_opcode |= IXGBE_EEPROM_A8_OPCODE_SPI; /* Send the READ command (opcode + addr) */ ixgbe_shift_out_eeprom_bits(hw, read_opcode, IXGBE_EEPROM_OPCODE_BITS); ixgbe_shift_out_eeprom_bits(hw, (u16)((offset + i) * 2), hw->eeprom.address_bits); /* Read the data. */ word_in = ixgbe_shift_in_eeprom_bits(hw, 16); data[i] = (word_in >> 8) | (word_in << 8); } /* End this read operation */ ixgbe_release_eeprom(hw); return 0; } /** * ixgbe_read_eeprom_bit_bang_generic - Read EEPROM word using bit-bang * @hw: pointer to hardware structure * @offset: offset within the EEPROM to be read * @data: read 16 bit value from EEPROM * * Reads 16 bit value from EEPROM through bit-bang method **/ s32 ixgbe_read_eeprom_bit_bang_generic(struct ixgbe_hw *hw, u16 offset, u16 *data) { hw->eeprom.ops.init_params(hw); if (offset >= hw->eeprom.word_size) return IXGBE_ERR_EEPROM; return ixgbe_read_eeprom_buffer_bit_bang(hw, offset, 1, data); } /** * ixgbe_read_eerd_buffer_generic - Read EEPROM word(s) using EERD * @hw: pointer to hardware structure * @offset: offset of word in the EEPROM to read * @words: number of word(s) * @data: 16 bit word(s) from the EEPROM * * Reads a 16 bit word(s) from the EEPROM using the EERD register. **/ s32 ixgbe_read_eerd_buffer_generic(struct ixgbe_hw *hw, u16 offset, u16 words, u16 *data) { u32 eerd; s32 status; u32 i; hw->eeprom.ops.init_params(hw); if (words == 0) return IXGBE_ERR_INVALID_ARGUMENT; if (offset >= hw->eeprom.word_size) return IXGBE_ERR_EEPROM; for (i = 0; i < words; i++) { eerd = ((offset + i) << IXGBE_EEPROM_RW_ADDR_SHIFT) | IXGBE_EEPROM_RW_REG_START; IXGBE_WRITE_REG(hw, IXGBE_EERD, eerd); status = ixgbe_poll_eerd_eewr_done(hw, IXGBE_NVM_POLL_READ); if (status == 0) { data[i] = (IXGBE_READ_REG(hw, IXGBE_EERD) >> IXGBE_EEPROM_RW_REG_DATA); } else { hw_dbg(hw, "Eeprom read timed out\n"); return status; } } return 0; } /** * ixgbe_detect_eeprom_page_size_generic - Detect EEPROM page size * @hw: pointer to hardware structure * @offset: offset within the EEPROM to be used as a scratch pad * * Discover EEPROM page size by writing marching data at given offset. * This function is called only when we are writing a new large buffer * at given offset so the data would be overwritten anyway. **/ static s32 ixgbe_detect_eeprom_page_size_generic(struct ixgbe_hw *hw, u16 offset) { u16 data[IXGBE_EEPROM_PAGE_SIZE_MAX]; s32 status; u16 i; for (i = 0; i < IXGBE_EEPROM_PAGE_SIZE_MAX; i++) data[i] = i; hw->eeprom.word_page_size = IXGBE_EEPROM_PAGE_SIZE_MAX; status = ixgbe_write_eeprom_buffer_bit_bang(hw, offset, IXGBE_EEPROM_PAGE_SIZE_MAX, data); hw->eeprom.word_page_size = 0; if (status) return status; status = ixgbe_read_eeprom_buffer_bit_bang(hw, offset, 1, data); if (status) return status; /* * When writing in burst more than the actual page size * EEPROM address wraps around current page. */ hw->eeprom.word_page_size = IXGBE_EEPROM_PAGE_SIZE_MAX - data[0]; hw_dbg(hw, "Detected EEPROM page size = %d words.\n", hw->eeprom.word_page_size); return 0; } /** * ixgbe_read_eerd_generic - Read EEPROM word using EERD * @hw: pointer to hardware structure * @offset: offset of word in the EEPROM to read * @data: word read from the EEPROM * * Reads a 16 bit word from the EEPROM using the EERD register. **/ s32 ixgbe_read_eerd_generic(struct ixgbe_hw *hw, u16 offset, u16 *data) { return ixgbe_read_eerd_buffer_generic(hw, offset, 1, data); } /** * ixgbe_write_eewr_buffer_generic - Write EEPROM word(s) using EEWR * @hw: pointer to hardware structure * @offset: offset of word in the EEPROM to write * @words: number of words * @data: word(s) write to the EEPROM * * Write a 16 bit word(s) to the EEPROM using the EEWR register. **/ s32 ixgbe_write_eewr_buffer_generic(struct ixgbe_hw *hw, u16 offset, u16 words, u16 *data) { u32 eewr; s32 status; u16 i; hw->eeprom.ops.init_params(hw); if (words == 0) return IXGBE_ERR_INVALID_ARGUMENT; if (offset >= hw->eeprom.word_size) return IXGBE_ERR_EEPROM; for (i = 0; i < words; i++) { eewr = ((offset + i) << IXGBE_EEPROM_RW_ADDR_SHIFT) | (data[i] << IXGBE_EEPROM_RW_REG_DATA) | IXGBE_EEPROM_RW_REG_START; status = ixgbe_poll_eerd_eewr_done(hw, IXGBE_NVM_POLL_WRITE); if (status) { hw_dbg(hw, "Eeprom write EEWR timed out\n"); return status; } IXGBE_WRITE_REG(hw, IXGBE_EEWR, eewr); status = ixgbe_poll_eerd_eewr_done(hw, IXGBE_NVM_POLL_WRITE); if (status) { hw_dbg(hw, "Eeprom write EEWR timed out\n"); return status; } } return 0; } /** * ixgbe_write_eewr_generic - Write EEPROM word using EEWR * @hw: pointer to hardware structure * @offset: offset of word in the EEPROM to write * @data: word write to the EEPROM * * Write a 16 bit word to the EEPROM using the EEWR register. **/ s32 ixgbe_write_eewr_generic(struct ixgbe_hw *hw, u16 offset, u16 data) { return ixgbe_write_eewr_buffer_generic(hw, offset, 1, &data); } /** * ixgbe_poll_eerd_eewr_done - Poll EERD read or EEWR write status * @hw: pointer to hardware structure * @ee_reg: EEPROM flag for polling * * Polls the status bit (bit 1) of the EERD or EEWR to determine when the * read or write is done respectively. **/ static s32 ixgbe_poll_eerd_eewr_done(struct ixgbe_hw *hw, u32 ee_reg) { u32 i; u32 reg; for (i = 0; i < IXGBE_EERD_EEWR_ATTEMPTS; i++) { if (ee_reg == IXGBE_NVM_POLL_READ) reg = IXGBE_READ_REG(hw, IXGBE_EERD); else reg = IXGBE_READ_REG(hw, IXGBE_EEWR); if (reg & IXGBE_EEPROM_RW_REG_DONE) { return 0; } udelay(5); } return IXGBE_ERR_EEPROM; } /** * ixgbe_acquire_eeprom - Acquire EEPROM using bit-bang * @hw: pointer to hardware structure * * Prepares EEPROM for access using bit-bang method. This function should * be called before issuing a command to the EEPROM. **/ static s32 ixgbe_acquire_eeprom(struct ixgbe_hw *hw) { u32 eec; u32 i; if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM) != 0) return IXGBE_ERR_SWFW_SYNC; eec = IXGBE_READ_REG(hw, IXGBE_EEC(hw)); /* Request EEPROM Access */ eec |= IXGBE_EEC_REQ; IXGBE_WRITE_REG(hw, IXGBE_EEC(hw), eec); for (i = 0; i < IXGBE_EEPROM_GRANT_ATTEMPTS; i++) { eec = IXGBE_READ_REG(hw, IXGBE_EEC(hw)); if (eec & IXGBE_EEC_GNT) break; udelay(5); } /* Release if grant not acquired */ if (!(eec & IXGBE_EEC_GNT)) { eec &= ~IXGBE_EEC_REQ; IXGBE_WRITE_REG(hw, IXGBE_EEC(hw), eec); hw_dbg(hw, "Could not acquire EEPROM grant\n"); hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM); return IXGBE_ERR_EEPROM; } /* Setup EEPROM for Read/Write */ /* Clear CS and SK */ eec &= ~(IXGBE_EEC_CS | IXGBE_EEC_SK); IXGBE_WRITE_REG(hw, IXGBE_EEC(hw), eec); IXGBE_WRITE_FLUSH(hw); udelay(1); return 0; } /** * ixgbe_get_eeprom_semaphore - Get hardware semaphore * @hw: pointer to hardware structure * * Sets the hardware semaphores so EEPROM access can occur for bit-bang method **/ static s32 ixgbe_get_eeprom_semaphore(struct ixgbe_hw *hw) { u32 timeout = 2000; u32 i; u32 swsm; /* Get SMBI software semaphore between device drivers first */ for (i = 0; i < timeout; i++) { /* * If the SMBI bit is 0 when we read it, then the bit will be * set and we have the semaphore */ swsm = IXGBE_READ_REG(hw, IXGBE_SWSM(hw)); if (!(swsm & IXGBE_SWSM_SMBI)) break; usleep_range(50, 100); } if (i == timeout) { hw_dbg(hw, "Driver can't access the Eeprom - SMBI Semaphore not granted.\n"); /* this release is particularly important because our attempts * above to get the semaphore may have succeeded, and if there * was a timeout, we should unconditionally clear the semaphore * bits to free the driver to make progress */ ixgbe_release_eeprom_semaphore(hw); usleep_range(50, 100); /* one last try * If the SMBI bit is 0 when we read it, then the bit will be * set and we have the semaphore */ swsm = IXGBE_READ_REG(hw, IXGBE_SWSM(hw)); if (swsm & IXGBE_SWSM_SMBI) { hw_dbg(hw, "Software semaphore SMBI between device drivers not granted.\n"); return IXGBE_ERR_EEPROM; } } /* Now get the semaphore between SW/FW through the SWESMBI bit */ for (i = 0; i < timeout; i++) { swsm = IXGBE_READ_REG(hw, IXGBE_SWSM(hw)); /* Set the SW EEPROM semaphore bit to request access */ swsm |= IXGBE_SWSM_SWESMBI; IXGBE_WRITE_REG(hw, IXGBE_SWSM(hw), swsm); /* If we set the bit successfully then we got the * semaphore. */ swsm = IXGBE_READ_REG(hw, IXGBE_SWSM(hw)); if (swsm & IXGBE_SWSM_SWESMBI) break; usleep_range(50, 100); } /* Release semaphores and return error if SW EEPROM semaphore * was not granted because we don't have access to the EEPROM */ if (i >= timeout) { hw_dbg(hw, "SWESMBI Software EEPROM semaphore not granted.\n"); ixgbe_release_eeprom_semaphore(hw); return IXGBE_ERR_EEPROM; } return 0; } /** * ixgbe_release_eeprom_semaphore - Release hardware semaphore * @hw: pointer to hardware structure * * This function clears hardware semaphore bits. **/ static void ixgbe_release_eeprom_semaphore(struct ixgbe_hw *hw) { u32 swsm; swsm = IXGBE_READ_REG(hw, IXGBE_SWSM(hw)); /* Release both semaphores by writing 0 to the bits SWESMBI and SMBI */ swsm &= ~(IXGBE_SWSM_SWESMBI | IXGBE_SWSM_SMBI); IXGBE_WRITE_REG(hw, IXGBE_SWSM(hw), swsm); IXGBE_WRITE_FLUSH(hw); } /** * ixgbe_ready_eeprom - Polls for EEPROM ready * @hw: pointer to hardware structure **/ static s32 ixgbe_ready_eeprom(struct ixgbe_hw *hw) { u16 i; u8 spi_stat_reg; /* * Read "Status Register" repeatedly until the LSB is cleared. The * EEPROM will signal that the command has been completed by clearing * bit 0 of the internal status register. If it's not cleared within * 5 milliseconds, then error out. */ for (i = 0; i < IXGBE_EEPROM_MAX_RETRY_SPI; i += 5) { ixgbe_shift_out_eeprom_bits(hw, IXGBE_EEPROM_RDSR_OPCODE_SPI, IXGBE_EEPROM_OPCODE_BITS); spi_stat_reg = (u8)ixgbe_shift_in_eeprom_bits(hw, 8); if (!(spi_stat_reg & IXGBE_EEPROM_STATUS_RDY_SPI)) break; udelay(5); ixgbe_standby_eeprom(hw); } /* * On some parts, SPI write time could vary from 0-20mSec on 3.3V * devices (and only 0-5mSec on 5V devices) */ if (i >= IXGBE_EEPROM_MAX_RETRY_SPI) { hw_dbg(hw, "SPI EEPROM Status error\n"); return IXGBE_ERR_EEPROM; } return 0; } /** * ixgbe_standby_eeprom - Returns EEPROM to a "standby" state * @hw: pointer to hardware structure **/ static void ixgbe_standby_eeprom(struct ixgbe_hw *hw) { u32 eec; eec = IXGBE_READ_REG(hw, IXGBE_EEC(hw)); /* Toggle CS to flush commands */ eec |= IXGBE_EEC_CS; IXGBE_WRITE_REG(hw, IXGBE_EEC(hw), eec); IXGBE_WRITE_FLUSH(hw); udelay(1); eec &= ~IXGBE_EEC_CS; IXGBE_WRITE_REG(hw, IXGBE_EEC(hw), eec); IXGBE_WRITE_FLUSH(hw); udelay(1); } /** * ixgbe_shift_out_eeprom_bits - Shift data bits out to the EEPROM. * @hw: pointer to hardware structure * @data: data to send to the EEPROM * @count: number of bits to shift out **/ static void ixgbe_shift_out_eeprom_bits(struct ixgbe_hw *hw, u16 data, u16 count) { u32 eec; u32 mask; u32 i; eec = IXGBE_READ_REG(hw, IXGBE_EEC(hw)); /* * Mask is used to shift "count" bits of "data" out to the EEPROM * one bit at a time. Determine the starting bit based on count */ mask = BIT(count - 1); for (i = 0; i < count; i++) { /* * A "1" is shifted out to the EEPROM by setting bit "DI" to a * "1", and then raising and then lowering the clock (the SK * bit controls the clock input to the EEPROM). A "0" is * shifted out to the EEPROM by setting "DI" to "0" and then * raising and then lowering the clock. */ if (data & mask) eec |= IXGBE_EEC_DI; else eec &= ~IXGBE_EEC_DI; IXGBE_WRITE_REG(hw, IXGBE_EEC(hw), eec); IXGBE_WRITE_FLUSH(hw); udelay(1); ixgbe_raise_eeprom_clk(hw, &eec); ixgbe_lower_eeprom_clk(hw, &eec); /* * Shift mask to signify next bit of data to shift in to the * EEPROM */ mask = mask >> 1; } /* We leave the "DI" bit set to "0" when we leave this routine. */ eec &= ~IXGBE_EEC_DI; IXGBE_WRITE_REG(hw, IXGBE_EEC(hw), eec); IXGBE_WRITE_FLUSH(hw); } /** * ixgbe_shift_in_eeprom_bits - Shift data bits in from the EEPROM * @hw: pointer to hardware structure * @count: number of bits to shift **/ static u16 ixgbe_shift_in_eeprom_bits(struct ixgbe_hw *hw, u16 count) { u32 eec; u32 i; u16 data = 0; /* * In order to read a register from the EEPROM, we need to shift * 'count' bits in from the EEPROM. Bits are "shifted in" by raising * the clock input to the EEPROM (setting the SK bit), and then reading * the value of the "DO" bit. During this "shifting in" process the * "DI" bit should always be clear. */ eec = IXGBE_READ_REG(hw, IXGBE_EEC(hw)); eec &= ~(IXGBE_EEC_DO | IXGBE_EEC_DI); for (i = 0; i < count; i++) { data = data << 1; ixgbe_raise_eeprom_clk(hw, &eec); eec = IXGBE_READ_REG(hw, IXGBE_EEC(hw)); eec &= ~(IXGBE_EEC_DI); if (eec & IXGBE_EEC_DO) data |= 1; ixgbe_lower_eeprom_clk(hw, &eec); } return data; } /** * ixgbe_raise_eeprom_clk - Raises the EEPROM's clock input. * @hw: pointer to hardware structure * @eec: EEC register's current value **/ static void ixgbe_raise_eeprom_clk(struct ixgbe_hw *hw, u32 *eec) { /* * Raise the clock input to the EEPROM * (setting the SK bit), then delay */ *eec = *eec | IXGBE_EEC_SK; IXGBE_WRITE_REG(hw, IXGBE_EEC(hw), *eec); IXGBE_WRITE_FLUSH(hw); udelay(1); } /** * ixgbe_lower_eeprom_clk - Lowers the EEPROM's clock input. * @hw: pointer to hardware structure * @eec: EEC's current value **/ static void ixgbe_lower_eeprom_clk(struct ixgbe_hw *hw, u32 *eec) { /* * Lower the clock input to the EEPROM (clearing the SK bit), then * delay */ *eec = *eec & ~IXGBE_EEC_SK; IXGBE_WRITE_REG(hw, IXGBE_EEC(hw), *eec); IXGBE_WRITE_FLUSH(hw); udelay(1); } /** * ixgbe_release_eeprom - Release EEPROM, release semaphores * @hw: pointer to hardware structure **/ static void ixgbe_release_eeprom(struct ixgbe_hw *hw) { u32 eec; eec = IXGBE_READ_REG(hw, IXGBE_EEC(hw)); eec |= IXGBE_EEC_CS; /* Pull CS high */ eec &= ~IXGBE_EEC_SK; /* Lower SCK */ IXGBE_WRITE_REG(hw, IXGBE_EEC(hw), eec); IXGBE_WRITE_FLUSH(hw); udelay(1); /* Stop requesting EEPROM access */ eec &= ~IXGBE_EEC_REQ; IXGBE_WRITE_REG(hw, IXGBE_EEC(hw), eec); hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM); /* * Delay before attempt to obtain semaphore again to allow FW * access. semaphore_delay is in ms we need us for usleep_range */ usleep_range(hw->eeprom.semaphore_delay * 1000, hw->eeprom.semaphore_delay * 2000); } /** * ixgbe_calc_eeprom_checksum_generic - Calculates and returns the checksum * @hw: pointer to hardware structure **/ s32 ixgbe_calc_eeprom_checksum_generic(struct ixgbe_hw *hw) { u16 i; u16 j; u16 checksum = 0; u16 length = 0; u16 pointer = 0; u16 word = 0; /* Include 0x0-0x3F in the checksum */ for (i = 0; i < IXGBE_EEPROM_CHECKSUM; i++) { if (hw->eeprom.ops.read(hw, i, &word)) { hw_dbg(hw, "EEPROM read failed\n"); break; } checksum += word; } /* Include all data from pointers except for the fw pointer */ for (i = IXGBE_PCIE_ANALOG_PTR; i < IXGBE_FW_PTR; i++) { if (hw->eeprom.ops.read(hw, i, &pointer)) { hw_dbg(hw, "EEPROM read failed\n"); return IXGBE_ERR_EEPROM; } /* If the pointer seems invalid */ if (pointer == 0xFFFF || pointer == 0) continue; if (hw->eeprom.ops.read(hw, pointer, &length)) { hw_dbg(hw, "EEPROM read failed\n"); return IXGBE_ERR_EEPROM; } if (length == 0xFFFF || length == 0) continue; for (j = pointer + 1; j <= pointer + length; j++) { if (hw->eeprom.ops.read(hw, j, &word)) { hw_dbg(hw, "EEPROM read failed\n"); return IXGBE_ERR_EEPROM; } checksum += word; } } checksum = (u16)IXGBE_EEPROM_SUM - checksum; return (s32)checksum; } /** * ixgbe_validate_eeprom_checksum_generic - Validate EEPROM checksum * @hw: pointer to hardware structure * @checksum_val: calculated checksum * * Performs checksum calculation and validates the EEPROM checksum. If the * caller does not need checksum_val, the value can be NULL. **/ s32 ixgbe_validate_eeprom_checksum_generic(struct ixgbe_hw *hw, u16 *checksum_val) { s32 status; u16 checksum; u16 read_checksum = 0; /* * Read the first word from the EEPROM. If this times out or fails, do * not continue or we could be in for a very long wait while every * EEPROM read fails */ status = hw->eeprom.ops.read(hw, 0, &checksum); if (status) { hw_dbg(hw, "EEPROM read failed\n"); return status; } status = hw->eeprom.ops.calc_checksum(hw); if (status < 0) return status; checksum = (u16)(status & 0xffff); status = hw->eeprom.ops.read(hw, IXGBE_EEPROM_CHECKSUM, &read_checksum); if (status) { hw_dbg(hw, "EEPROM read failed\n"); return status; } /* Verify read checksum from EEPROM is the same as * calculated checksum */ if (read_checksum != checksum) status = IXGBE_ERR_EEPROM_CHECKSUM; /* If the user cares, return the calculated checksum */ if (checksum_val) *checksum_val = checksum; return status; } /** * ixgbe_update_eeprom_checksum_generic - Updates the EEPROM checksum * @hw: pointer to hardware structure **/ s32 ixgbe_update_eeprom_checksum_generic(struct ixgbe_hw *hw) { s32 status; u16 checksum; /* * Read the first word from the EEPROM. If this times out or fails, do * not continue or we could be in for a very long wait while every * EEPROM read fails */ status = hw->eeprom.ops.read(hw, 0, &checksum); if (status) { hw_dbg(hw, "EEPROM read failed\n"); return status; } status = hw->eeprom.ops.calc_checksum(hw); if (status < 0) return status; checksum = (u16)(status & 0xffff); status = hw->eeprom.ops.write(hw, IXGBE_EEPROM_CHECKSUM, checksum); return status; } /** * ixgbe_set_rar_generic - Set Rx address register * @hw: pointer to hardware structure * @index: Receive address register to write * @addr: Address to put into receive address register * @vmdq: VMDq "set" or "pool" index * @enable_addr: set flag that address is active * * Puts an ethernet address into a receive address register. **/ s32 ixgbe_set_rar_generic(struct ixgbe_hw *hw, u32 index, u8 *addr, u32 vmdq, u32 enable_addr) { u32 rar_low, rar_high; u32 rar_entries = hw->mac.num_rar_entries; /* Make sure we are using a valid rar index range */ if (index >= rar_entries) { hw_dbg(hw, "RAR index %d is out of range.\n", index); return IXGBE_ERR_INVALID_ARGUMENT; } /* setup VMDq pool selection before this RAR gets enabled */ hw->mac.ops.set_vmdq(hw, index, vmdq); /* * HW expects these in little endian so we reverse the byte * order from network order (big endian) to little endian */ rar_low = ((u32)addr[0] | ((u32)addr[1] << 8) | ((u32)addr[2] << 16) | ((u32)addr[3] << 24)); /* * Some parts put the VMDq setting in the extra RAH bits, * so save everything except the lower 16 bits that hold part * of the address and the address valid bit. */ rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(index)); rar_high &= ~(0x0000FFFF | IXGBE_RAH_AV); rar_high |= ((u32)addr[4] | ((u32)addr[5] << 8)); if (enable_addr != 0) rar_high |= IXGBE_RAH_AV; /* Record lower 32 bits of MAC address and then make * sure that write is flushed to hardware before writing * the upper 16 bits and setting the valid bit. */ IXGBE_WRITE_REG(hw, IXGBE_RAL(index), rar_low); IXGBE_WRITE_FLUSH(hw); IXGBE_WRITE_REG(hw, IXGBE_RAH(index), rar_high); return 0; } /** * ixgbe_clear_rar_generic - Remove Rx address register * @hw: pointer to hardware structure * @index: Receive address register to write * * Clears an ethernet address from a receive address register. **/ s32 ixgbe_clear_rar_generic(struct ixgbe_hw *hw, u32 index) { u32 rar_high; u32 rar_entries = hw->mac.num_rar_entries; /* Make sure we are using a valid rar index range */ if (index >= rar_entries) { hw_dbg(hw, "RAR index %d is out of range.\n", index); return IXGBE_ERR_INVALID_ARGUMENT; } /* * Some parts put the VMDq setting in the extra RAH bits, * so save everything except the lower 16 bits that hold part * of the address and the address valid bit. */ rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(index)); rar_high &= ~(0x0000FFFF | IXGBE_RAH_AV); /* Clear the address valid bit and upper 16 bits of the address * before clearing the lower bits. This way we aren't updating * a live filter. */ IXGBE_WRITE_REG(hw, IXGBE_RAH(index), rar_high); IXGBE_WRITE_FLUSH(hw); IXGBE_WRITE_REG(hw, IXGBE_RAL(index), 0); /* clear VMDq pool/queue selection for this RAR */ hw->mac.ops.clear_vmdq(hw, index, IXGBE_CLEAR_VMDQ_ALL); return 0; } /** * ixgbe_init_rx_addrs_generic - Initializes receive address filters. * @hw: pointer to hardware structure * * Places the MAC address in receive address register 0 and clears the rest * of the receive address registers. Clears the multicast table. Assumes * the receiver is in reset when the routine is called. **/ s32 ixgbe_init_rx_addrs_generic(struct ixgbe_hw *hw) { u32 i; u32 rar_entries = hw->mac.num_rar_entries; /* * If the current mac address is valid, assume it is a software override * to the permanent address. * Otherwise, use the permanent address from the eeprom. */ if (!is_valid_ether_addr(hw->mac.addr)) { /* Get the MAC address from the RAR0 for later reference */ hw->mac.ops.get_mac_addr(hw, hw->mac.addr); hw_dbg(hw, " Keeping Current RAR0 Addr =%pM\n", hw->mac.addr); } else { /* Setup the receive address. */ hw_dbg(hw, "Overriding MAC Address in RAR[0]\n"); hw_dbg(hw, " New MAC Addr =%pM\n", hw->mac.addr); hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV); } /* clear VMDq pool/queue selection for RAR 0 */ hw->mac.ops.clear_vmdq(hw, 0, IXGBE_CLEAR_VMDQ_ALL); hw->addr_ctrl.overflow_promisc = 0; hw->addr_ctrl.rar_used_count = 1; /* Zero out the other receive addresses. */ hw_dbg(hw, "Clearing RAR[1-%d]\n", rar_entries - 1); for (i = 1; i < rar_entries; i++) { IXGBE_WRITE_REG(hw, IXGBE_RAL(i), 0); IXGBE_WRITE_REG(hw, IXGBE_RAH(i), 0); } /* Clear the MTA */ hw->addr_ctrl.mta_in_use = 0; IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, hw->mac.mc_filter_type); hw_dbg(hw, " Clearing MTA\n"); for (i = 0; i < hw->mac.mcft_size; i++) IXGBE_WRITE_REG(hw, IXGBE_MTA(i), 0); if (hw->mac.ops.init_uta_tables) hw->mac.ops.init_uta_tables(hw); return 0; } /** * ixgbe_mta_vector - Determines bit-vector in multicast table to set * @hw: pointer to hardware structure * @mc_addr: the multicast address * * Extracts the 12 bits, from a multicast address, to determine which * bit-vector to set in the multicast table. The hardware uses 12 bits, from * incoming rx multicast addresses, to determine the bit-vector to check in * the MTA. Which of the 4 combination, of 12-bits, the hardware uses is set * by the MO field of the MCSTCTRL. The MO field is set during initialization * to mc_filter_type. **/ static s32 ixgbe_mta_vector(struct ixgbe_hw *hw, u8 *mc_addr) { u32 vector = 0; switch (hw->mac.mc_filter_type) { case 0: /* use bits [47:36] of the address */ vector = ((mc_addr[4] >> 4) | (((u16)mc_addr[5]) << 4)); break; case 1: /* use bits [46:35] of the address */ vector = ((mc_addr[4] >> 3) | (((u16)mc_addr[5]) << 5)); break; case 2: /* use bits [45:34] of the address */ vector = ((mc_addr[4] >> 2) | (((u16)mc_addr[5]) << 6)); break; case 3: /* use bits [43:32] of the address */ vector = ((mc_addr[4]) | (((u16)mc_addr[5]) << 8)); break; default: /* Invalid mc_filter_type */ hw_dbg(hw, "MC filter type param set incorrectly\n"); break; } /* vector can only be 12-bits or boundary will be exceeded */ vector &= 0xFFF; return vector; } /** * ixgbe_set_mta - Set bit-vector in multicast table * @hw: pointer to hardware structure * @mc_addr: Multicast address * * Sets the bit-vector in the multicast table. **/ static void ixgbe_set_mta(struct ixgbe_hw *hw, u8 *mc_addr) { u32 vector; u32 vector_bit; u32 vector_reg; hw->addr_ctrl.mta_in_use++; vector = ixgbe_mta_vector(hw, mc_addr); hw_dbg(hw, " bit-vector = 0x%03X\n", vector); /* * The MTA is a register array of 128 32-bit registers. It is treated * like an array of 4096 bits. We want to set bit * BitArray[vector_value]. So we figure out what register the bit is * in, read it, OR in the new bit, then write back the new value. The * register is determined by the upper 7 bits of the vector value and * the bit within that register are determined by the lower 5 bits of * the value. */ vector_reg = (vector >> 5) & 0x7F; vector_bit = vector & 0x1F; hw->mac.mta_shadow[vector_reg] |= BIT(vector_bit); } /** * ixgbe_update_mc_addr_list_generic - Updates MAC list of multicast addresses * @hw: pointer to hardware structure * @netdev: pointer to net device structure * * The given list replaces any existing list. Clears the MC addrs from receive * address registers and the multicast table. Uses unused receive address * registers for the first multicast addresses, and hashes the rest into the * multicast table. **/ s32 ixgbe_update_mc_addr_list_generic(struct ixgbe_hw *hw, struct net_device *netdev) { struct netdev_hw_addr *ha; u32 i; /* * Set the new number of MC addresses that we are being requested to * use. */ hw->addr_ctrl.num_mc_addrs = netdev_mc_count(netdev); hw->addr_ctrl.mta_in_use = 0; /* Clear mta_shadow */ hw_dbg(hw, " Clearing MTA\n"); memset(&hw->mac.mta_shadow, 0, sizeof(hw->mac.mta_shadow)); /* Update mta shadow */ netdev_for_each_mc_addr(ha, netdev) { hw_dbg(hw, " Adding the multicast addresses:\n"); ixgbe_set_mta(hw, ha->addr); } /* Enable mta */ for (i = 0; i < hw->mac.mcft_size; i++) IXGBE_WRITE_REG_ARRAY(hw, IXGBE_MTA(0), i, hw->mac.mta_shadow[i]); if (hw->addr_ctrl.mta_in_use > 0) IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, IXGBE_MCSTCTRL_MFE | hw->mac.mc_filter_type); hw_dbg(hw, "ixgbe_update_mc_addr_list_generic Complete\n"); return 0; } /** * ixgbe_enable_mc_generic - Enable multicast address in RAR * @hw: pointer to hardware structure * * Enables multicast address in RAR and the use of the multicast hash table. **/ s32 ixgbe_enable_mc_generic(struct ixgbe_hw *hw) { struct ixgbe_addr_filter_info *a = &hw->addr_ctrl; if (a->mta_in_use > 0) IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, IXGBE_MCSTCTRL_MFE | hw->mac.mc_filter_type); return 0; } /** * ixgbe_disable_mc_generic - Disable multicast address in RAR * @hw: pointer to hardware structure * * Disables multicast address in RAR and the use of the multicast hash table. **/ s32 ixgbe_disable_mc_generic(struct ixgbe_hw *hw) { struct ixgbe_addr_filter_info *a = &hw->addr_ctrl; if (a->mta_in_use > 0) IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, hw->mac.mc_filter_type); return 0; } /** * ixgbe_fc_enable_generic - Enable flow control * @hw: pointer to hardware structure * * Enable flow control according to the current settings. **/ s32 ixgbe_fc_enable_generic(struct ixgbe_hw *hw) { u32 mflcn_reg, fccfg_reg; u32 reg; u32 fcrtl, fcrth; int i; /* Validate the water mark configuration. */ if (!hw->fc.pause_time) return IXGBE_ERR_INVALID_LINK_SETTINGS; /* Low water mark of zero causes XOFF floods */ for (i = 0; i < MAX_TRAFFIC_CLASS; i++) { if ((hw->fc.current_mode & ixgbe_fc_tx_pause) && hw->fc.high_water[i]) { if (!hw->fc.low_water[i] || hw->fc.low_water[i] >= hw->fc.high_water[i]) { hw_dbg(hw, "Invalid water mark configuration\n"); return IXGBE_ERR_INVALID_LINK_SETTINGS; } } } /* Negotiate the fc mode to use */ hw->mac.ops.fc_autoneg(hw); /* Disable any previous flow control settings */ mflcn_reg = IXGBE_READ_REG(hw, IXGBE_MFLCN); mflcn_reg &= ~(IXGBE_MFLCN_RPFCE_MASK | IXGBE_MFLCN_RFCE); fccfg_reg = IXGBE_READ_REG(hw, IXGBE_FCCFG); fccfg_reg &= ~(IXGBE_FCCFG_TFCE_802_3X | IXGBE_FCCFG_TFCE_PRIORITY); /* * The possible values of fc.current_mode are: * 0: Flow control is completely disabled * 1: Rx flow control is enabled (we can receive pause frames, * but not send pause frames). * 2: Tx flow control is enabled (we can send pause frames but * we do not support receiving pause frames). * 3: Both Rx and Tx flow control (symmetric) are enabled. * other: Invalid. */ switch (hw->fc.current_mode) { case ixgbe_fc_none: /* * Flow control is disabled by software override or autoneg. * The code below will actually disable it in the HW. */ break; case ixgbe_fc_rx_pause: /* * Rx Flow control is enabled and Tx Flow control is * disabled by software override. Since there really * isn't a way to advertise that we are capable of RX * Pause ONLY, we will advertise that we support both * symmetric and asymmetric Rx PAUSE. Later, we will * disable the adapter's ability to send PAUSE frames. */ mflcn_reg |= IXGBE_MFLCN_RFCE; break; case ixgbe_fc_tx_pause: /* * Tx Flow control is enabled, and Rx Flow control is * disabled by software override. */ fccfg_reg |= IXGBE_FCCFG_TFCE_802_3X; break; case ixgbe_fc_full: /* Flow control (both Rx and Tx) is enabled by SW override. */ mflcn_reg |= IXGBE_MFLCN_RFCE; fccfg_reg |= IXGBE_FCCFG_TFCE_802_3X; break; default: hw_dbg(hw, "Flow control param set incorrectly\n"); return IXGBE_ERR_CONFIG; } /* Set 802.3x based flow control settings. */ mflcn_reg |= IXGBE_MFLCN_DPF; IXGBE_WRITE_REG(hw, IXGBE_MFLCN, mflcn_reg); IXGBE_WRITE_REG(hw, IXGBE_FCCFG, fccfg_reg); /* Set up and enable Rx high/low water mark thresholds, enable XON. */ for (i = 0; i < MAX_TRAFFIC_CLASS; i++) { if ((hw->fc.current_mode & ixgbe_fc_tx_pause) && hw->fc.high_water[i]) { fcrtl = (hw->fc.low_water[i] << 10) | IXGBE_FCRTL_XONE; IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(i), fcrtl); fcrth = (hw->fc.high_water[i] << 10) | IXGBE_FCRTH_FCEN; } else { IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(i), 0); /* * In order to prevent Tx hangs when the internal Tx * switch is enabled we must set the high water mark * to the Rx packet buffer size - 24KB. This allows * the Tx switch to function even under heavy Rx * workloads. */ fcrth = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(i)) - 24576; } IXGBE_WRITE_REG(hw, IXGBE_FCRTH_82599(i), fcrth); } /* Configure pause time (2 TCs per register) */ reg = hw->fc.pause_time * 0x00010001U; for (i = 0; i < (MAX_TRAFFIC_CLASS / 2); i++) IXGBE_WRITE_REG(hw, IXGBE_FCTTV(i), reg); IXGBE_WRITE_REG(hw, IXGBE_FCRTV, hw->fc.pause_time / 2); return 0; } /** * ixgbe_negotiate_fc - Negotiate flow control * @hw: pointer to hardware structure * @adv_reg: flow control advertised settings * @lp_reg: link partner's flow control settings * @adv_sym: symmetric pause bit in advertisement * @adv_asm: asymmetric pause bit in advertisement * @lp_sym: symmetric pause bit in link partner advertisement * @lp_asm: asymmetric pause bit in link partner advertisement * * Find the intersection between advertised settings and link partner's * advertised settings **/ s32 ixgbe_negotiate_fc(struct ixgbe_hw *hw, u32 adv_reg, u32 lp_reg, u32 adv_sym, u32 adv_asm, u32 lp_sym, u32 lp_asm) { if ((!(adv_reg)) || (!(lp_reg))) return IXGBE_ERR_FC_NOT_NEGOTIATED; if ((adv_reg & adv_sym) && (lp_reg & lp_sym)) { /* * Now we need to check if the user selected Rx ONLY * of pause frames. In this case, we had to advertise * FULL flow control because we could not advertise RX * ONLY. Hence, we must now check to see if we need to * turn OFF the TRANSMISSION of PAUSE frames. */ if (hw->fc.requested_mode == ixgbe_fc_full) { hw->fc.current_mode = ixgbe_fc_full; hw_dbg(hw, "Flow Control = FULL.\n"); } else { hw->fc.current_mode = ixgbe_fc_rx_pause; hw_dbg(hw, "Flow Control=RX PAUSE frames only\n"); } } else if (!(adv_reg & adv_sym) && (adv_reg & adv_asm) && (lp_reg & lp_sym) && (lp_reg & lp_asm)) { hw->fc.current_mode = ixgbe_fc_tx_pause; hw_dbg(hw, "Flow Control = TX PAUSE frames only.\n"); } else if ((adv_reg & adv_sym) && (adv_reg & adv_asm) && !(lp_reg & lp_sym) && (lp_reg & lp_asm)) { hw->fc.current_mode = ixgbe_fc_rx_pause; hw_dbg(hw, "Flow Control = RX PAUSE frames only.\n"); } else { hw->fc.current_mode = ixgbe_fc_none; hw_dbg(hw, "Flow Control = NONE.\n"); } return 0; } /** * ixgbe_fc_autoneg_fiber - Enable flow control on 1 gig fiber * @hw: pointer to hardware structure * * Enable flow control according on 1 gig fiber. **/ static s32 ixgbe_fc_autoneg_fiber(struct ixgbe_hw *hw) { u32 pcs_anadv_reg, pcs_lpab_reg, linkstat; s32 ret_val; /* * On multispeed fiber at 1g, bail out if * - link is up but AN did not complete, or if * - link is up and AN completed but timed out */ linkstat = IXGBE_READ_REG(hw, IXGBE_PCS1GLSTA); if ((!!(linkstat & IXGBE_PCS1GLSTA_AN_COMPLETE) == 0) || (!!(linkstat & IXGBE_PCS1GLSTA_AN_TIMED_OUT) == 1)) return IXGBE_ERR_FC_NOT_NEGOTIATED; pcs_anadv_reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANA); pcs_lpab_reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANLP); ret_val = ixgbe_negotiate_fc(hw, pcs_anadv_reg, pcs_lpab_reg, IXGBE_PCS1GANA_SYM_PAUSE, IXGBE_PCS1GANA_ASM_PAUSE, IXGBE_PCS1GANA_SYM_PAUSE, IXGBE_PCS1GANA_ASM_PAUSE); return ret_val; } /** * ixgbe_fc_autoneg_backplane - Enable flow control IEEE clause 37 * @hw: pointer to hardware structure * * Enable flow control according to IEEE clause 37. **/ static s32 ixgbe_fc_autoneg_backplane(struct ixgbe_hw *hw) { u32 links2, anlp1_reg, autoc_reg, links; s32 ret_val; /* * On backplane, bail out if * - backplane autoneg was not completed, or if * - we are 82599 and link partner is not AN enabled */ links = IXGBE_READ_REG(hw, IXGBE_LINKS); if ((links & IXGBE_LINKS_KX_AN_COMP) == 0) return IXGBE_ERR_FC_NOT_NEGOTIATED; if (hw->mac.type == ixgbe_mac_82599EB) { links2 = IXGBE_READ_REG(hw, IXGBE_LINKS2); if ((links2 & IXGBE_LINKS2_AN_SUPPORTED) == 0) return IXGBE_ERR_FC_NOT_NEGOTIATED; } /* * Read the 10g AN autoc and LP ability registers and resolve * local flow control settings accordingly */ autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC); anlp1_reg = IXGBE_READ_REG(hw, IXGBE_ANLP1); ret_val = ixgbe_negotiate_fc(hw, autoc_reg, anlp1_reg, IXGBE_AUTOC_SYM_PAUSE, IXGBE_AUTOC_ASM_PAUSE, IXGBE_ANLP1_SYM_PAUSE, IXGBE_ANLP1_ASM_PAUSE); return ret_val; } /** * ixgbe_fc_autoneg_copper - Enable flow control IEEE clause 37 * @hw: pointer to hardware structure * * Enable flow control according to IEEE clause 37. **/ static s32 ixgbe_fc_autoneg_copper(struct ixgbe_hw *hw) { u16 technology_ability_reg = 0; u16 lp_technology_ability_reg = 0; hw->phy.ops.read_reg(hw, MDIO_AN_ADVERTISE, MDIO_MMD_AN, &technology_ability_reg); hw->phy.ops.read_reg(hw, MDIO_AN_LPA, MDIO_MMD_AN, &lp_technology_ability_reg); return ixgbe_negotiate_fc(hw, (u32)technology_ability_reg, (u32)lp_technology_ability_reg, IXGBE_TAF_SYM_PAUSE, IXGBE_TAF_ASM_PAUSE, IXGBE_TAF_SYM_PAUSE, IXGBE_TAF_ASM_PAUSE); } /** * ixgbe_fc_autoneg - Configure flow control * @hw: pointer to hardware structure * * Compares our advertised flow control capabilities to those advertised by * our link partner, and determines the proper flow control mode to use. **/ void ixgbe_fc_autoneg(struct ixgbe_hw *hw) { s32 ret_val = IXGBE_ERR_FC_NOT_NEGOTIATED; ixgbe_link_speed speed; bool link_up; /* * AN should have completed when the cable was plugged in. * Look for reasons to bail out. Bail out if: * - FC autoneg is disabled, or if * - link is not up. * * Since we're being called from an LSC, link is already known to be up. * So use link_up_wait_to_complete=false. */ if (hw->fc.disable_fc_autoneg) goto out; hw->mac.ops.check_link(hw, &speed, &link_up, false); if (!link_up) goto out; switch (hw->phy.media_type) { /* Autoneg flow control on fiber adapters */ case ixgbe_media_type_fiber: if (speed == IXGBE_LINK_SPEED_1GB_FULL) ret_val = ixgbe_fc_autoneg_fiber(hw); break; /* Autoneg flow control on backplane adapters */ case ixgbe_media_type_backplane: ret_val = ixgbe_fc_autoneg_backplane(hw); break; /* Autoneg flow control on copper adapters */ case ixgbe_media_type_copper: if (ixgbe_device_supports_autoneg_fc(hw)) ret_val = ixgbe_fc_autoneg_copper(hw); break; default: break; } out: if (ret_val == 0) { hw->fc.fc_was_autonegged = true; } else { hw->fc.fc_was_autonegged = false; hw->fc.current_mode = hw->fc.requested_mode; } } /** * ixgbe_pcie_timeout_poll - Return number of times to poll for completion * @hw: pointer to hardware structure * * System-wide timeout range is encoded in PCIe Device Control2 register. * * Add 10% to specified maximum and return the number of times to poll for * completion timeout, in units of 100 microsec. Never return less than * 800 = 80 millisec. **/ static u32 ixgbe_pcie_timeout_poll(struct ixgbe_hw *hw) { s16 devctl2; u32 pollcnt; devctl2 = ixgbe_read_pci_cfg_word(hw, IXGBE_PCI_DEVICE_CONTROL2); devctl2 &= IXGBE_PCIDEVCTRL2_TIMEO_MASK; switch (devctl2) { case IXGBE_PCIDEVCTRL2_65_130ms: pollcnt = 1300; /* 130 millisec */ break; case IXGBE_PCIDEVCTRL2_260_520ms: pollcnt = 5200; /* 520 millisec */ break; case IXGBE_PCIDEVCTRL2_1_2s: pollcnt = 20000; /* 2 sec */ break; case IXGBE_PCIDEVCTRL2_4_8s: pollcnt = 80000; /* 8 sec */ break; case IXGBE_PCIDEVCTRL2_17_34s: pollcnt = 34000; /* 34 sec */ break; case IXGBE_PCIDEVCTRL2_50_100us: /* 100 microsecs */ case IXGBE_PCIDEVCTRL2_1_2ms: /* 2 millisecs */ case IXGBE_PCIDEVCTRL2_16_32ms: /* 32 millisec */ case IXGBE_PCIDEVCTRL2_16_32ms_def: /* 32 millisec default */ default: pollcnt = 800; /* 80 millisec minimum */ break; } /* add 10% to spec maximum */ return (pollcnt * 11) / 10; } /** * ixgbe_disable_pcie_primary - Disable PCI-express primary access * @hw: pointer to hardware structure * * Disables PCI-Express primary access and verifies there are no pending * requests. IXGBE_ERR_PRIMARY_REQUESTS_PENDING is returned if primary disable * bit hasn't caused the primary requests to be disabled, else 0 * is returned signifying primary requests disabled. **/ static s32 ixgbe_disable_pcie_primary(struct ixgbe_hw *hw) { u32 i, poll; u16 value; /* Always set this bit to ensure any future transactions are blocked */ IXGBE_WRITE_REG(hw, IXGBE_CTRL, IXGBE_CTRL_GIO_DIS); /* Poll for bit to read as set */ for (i = 0; i < IXGBE_PCI_PRIMARY_DISABLE_TIMEOUT; i++) { if (IXGBE_READ_REG(hw, IXGBE_CTRL) & IXGBE_CTRL_GIO_DIS) break; usleep_range(100, 120); } if (i >= IXGBE_PCI_PRIMARY_DISABLE_TIMEOUT) { hw_dbg(hw, "GIO disable did not set - requesting resets\n"); goto gio_disable_fail; } /* Exit if primary requests are blocked */ if (!(IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_GIO) || ixgbe_removed(hw->hw_addr)) return 0; /* Poll for primary request bit to clear */ for (i = 0; i < IXGBE_PCI_PRIMARY_DISABLE_TIMEOUT; i++) { udelay(100); if (!(IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_GIO)) return 0; } /* * Two consecutive resets are required via CTRL.RST per datasheet * 5.2.5.3.2 Primary Disable. We set a flag to inform the reset routine * of this need. The first reset prevents new primary requests from * being issued by our device. We then must wait 1usec or more for any * remaining completions from the PCIe bus to trickle in, and then reset * again to clear out any effects they may have had on our device. */ hw_dbg(hw, "GIO Primary Disable bit didn't clear - requesting resets\n"); gio_disable_fail: hw->mac.flags |= IXGBE_FLAGS_DOUBLE_RESET_REQUIRED; if (hw->mac.type >= ixgbe_mac_X550) return 0; /* * Before proceeding, make sure that the PCIe block does not have * transactions pending. */ poll = ixgbe_pcie_timeout_poll(hw); for (i = 0; i < poll; i++) { udelay(100); value = ixgbe_read_pci_cfg_word(hw, IXGBE_PCI_DEVICE_STATUS); if (ixgbe_removed(hw->hw_addr)) return 0; if (!(value & IXGBE_PCI_DEVICE_STATUS_TRANSACTION_PENDING)) return 0; } hw_dbg(hw, "PCIe transaction pending bit also did not clear.\n"); return IXGBE_ERR_PRIMARY_REQUESTS_PENDING; } /** * ixgbe_acquire_swfw_sync - Acquire SWFW semaphore * @hw: pointer to hardware structure * @mask: Mask to specify which semaphore to acquire * * Acquires the SWFW semaphore through the GSSR register for the specified * function (CSR, PHY0, PHY1, EEPROM, Flash) **/ s32 ixgbe_acquire_swfw_sync(struct ixgbe_hw *hw, u32 mask) { u32 gssr = 0; u32 swmask = mask; u32 fwmask = mask << 5; u32 timeout = 200; u32 i; for (i = 0; i < timeout; i++) { /* * SW NVM semaphore bit is used for access to all * SW_FW_SYNC bits (not just NVM) */ if (ixgbe_get_eeprom_semaphore(hw)) return IXGBE_ERR_SWFW_SYNC; gssr = IXGBE_READ_REG(hw, IXGBE_GSSR); if (!(gssr & (fwmask | swmask))) { gssr |= swmask; IXGBE_WRITE_REG(hw, IXGBE_GSSR, gssr); ixgbe_release_eeprom_semaphore(hw); return 0; } else { /* Resource is currently in use by FW or SW */ ixgbe_release_eeprom_semaphore(hw); usleep_range(5000, 10000); } } /* If time expired clear the bits holding the lock and retry */ if (gssr & (fwmask | swmask)) ixgbe_release_swfw_sync(hw, gssr & (fwmask | swmask)); usleep_range(5000, 10000); return IXGBE_ERR_SWFW_SYNC; } /** * ixgbe_release_swfw_sync - Release SWFW semaphore * @hw: pointer to hardware structure * @mask: Mask to specify which semaphore to release * * Releases the SWFW semaphore through the GSSR register for the specified * function (CSR, PHY0, PHY1, EEPROM, Flash) **/ void ixgbe_release_swfw_sync(struct ixgbe_hw *hw, u32 mask) { u32 gssr; u32 swmask = mask; ixgbe_get_eeprom_semaphore(hw); gssr = IXGBE_READ_REG(hw, IXGBE_GSSR); gssr &= ~swmask; IXGBE_WRITE_REG(hw, IXGBE_GSSR, gssr); ixgbe_release_eeprom_semaphore(hw); } /** * prot_autoc_read_generic - Hides MAC differences needed for AUTOC read * @hw: pointer to hardware structure * @reg_val: Value we read from AUTOC * @locked: bool to indicate whether the SW/FW lock should be taken. Never * true in this the generic case. * * The default case requires no protection so just to the register read. **/ s32 prot_autoc_read_generic(struct ixgbe_hw *hw, bool *locked, u32 *reg_val) { *locked = false; *reg_val = IXGBE_READ_REG(hw, IXGBE_AUTOC); return 0; } /** * prot_autoc_write_generic - Hides MAC differences needed for AUTOC write * @hw: pointer to hardware structure * @reg_val: value to write to AUTOC * @locked: bool to indicate whether the SW/FW lock was already taken by * previous read. **/ s32 prot_autoc_write_generic(struct ixgbe_hw *hw, u32 reg_val, bool locked) { IXGBE_WRITE_REG(hw, IXGBE_AUTOC, reg_val); return 0; } /** * ixgbe_disable_rx_buff_generic - Stops the receive data path * @hw: pointer to hardware structure * * Stops the receive data path and waits for the HW to internally * empty the Rx security block. **/ s32 ixgbe_disable_rx_buff_generic(struct ixgbe_hw *hw) { #define IXGBE_MAX_SECRX_POLL 40 int i; int secrxreg; secrxreg = IXGBE_READ_REG(hw, IXGBE_SECRXCTRL); secrxreg |= IXGBE_SECRXCTRL_RX_DIS; IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, secrxreg); for (i = 0; i < IXGBE_MAX_SECRX_POLL; i++) { secrxreg = IXGBE_READ_REG(hw, IXGBE_SECRXSTAT); if (secrxreg & IXGBE_SECRXSTAT_SECRX_RDY) break; else /* Use interrupt-safe sleep just in case */ udelay(1000); } /* For informational purposes only */ if (i >= IXGBE_MAX_SECRX_POLL) hw_dbg(hw, "Rx unit being enabled before security path fully disabled. Continuing with init.\n"); return 0; } /** * ixgbe_enable_rx_buff_generic - Enables the receive data path * @hw: pointer to hardware structure * * Enables the receive data path **/ s32 ixgbe_enable_rx_buff_generic(struct ixgbe_hw *hw) { u32 secrxreg; secrxreg = IXGBE_READ_REG(hw, IXGBE_SECRXCTRL); secrxreg &= ~IXGBE_SECRXCTRL_RX_DIS; IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, secrxreg); IXGBE_WRITE_FLUSH(hw); return 0; } /** * ixgbe_enable_rx_dma_generic - Enable the Rx DMA unit * @hw: pointer to hardware structure * @regval: register value to write to RXCTRL * * Enables the Rx DMA unit **/ s32 ixgbe_enable_rx_dma_generic(struct ixgbe_hw *hw, u32 regval) { if (regval & IXGBE_RXCTRL_RXEN) hw->mac.ops.enable_rx(hw); else hw->mac.ops.disable_rx(hw); return 0; } /** * ixgbe_blink_led_start_generic - Blink LED based on index. * @hw: pointer to hardware structure * @index: led number to blink **/ s32 ixgbe_blink_led_start_generic(struct ixgbe_hw *hw, u32 index) { ixgbe_link_speed speed = 0; bool link_up = false; u32 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC); u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL); bool locked = false; s32 ret_val; if (index > 3) return IXGBE_ERR_PARAM; /* * Link must be up to auto-blink the LEDs; * Force it if link is down. */ hw->mac.ops.check_link(hw, &speed, &link_up, false); if (!link_up) { ret_val = hw->mac.ops.prot_autoc_read(hw, &locked, &autoc_reg); if (ret_val) return ret_val; autoc_reg |= IXGBE_AUTOC_AN_RESTART; autoc_reg |= IXGBE_AUTOC_FLU; ret_val = hw->mac.ops.prot_autoc_write(hw, autoc_reg, locked); if (ret_val) return ret_val; IXGBE_WRITE_FLUSH(hw); usleep_range(10000, 20000); } led_reg &= ~IXGBE_LED_MODE_MASK(index); led_reg |= IXGBE_LED_BLINK(index); IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg); IXGBE_WRITE_FLUSH(hw); return 0; } /** * ixgbe_blink_led_stop_generic - Stop blinking LED based on index. * @hw: pointer to hardware structure * @index: led number to stop blinking **/ s32 ixgbe_blink_led_stop_generic(struct ixgbe_hw *hw, u32 index) { u32 autoc_reg = 0; u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL); bool locked = false; s32 ret_val; if (index > 3) return IXGBE_ERR_PARAM; ret_val = hw->mac.ops.prot_autoc_read(hw, &locked, &autoc_reg); if (ret_val) return ret_val; autoc_reg &= ~IXGBE_AUTOC_FLU; autoc_reg |= IXGBE_AUTOC_AN_RESTART; ret_val = hw->mac.ops.prot_autoc_write(hw, autoc_reg, locked); if (ret_val) return ret_val; led_reg &= ~IXGBE_LED_MODE_MASK(index); led_reg &= ~IXGBE_LED_BLINK(index); led_reg |= IXGBE_LED_LINK_ACTIVE << IXGBE_LED_MODE_SHIFT(index); IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg); IXGBE_WRITE_FLUSH(hw); return 0; } /** * ixgbe_get_san_mac_addr_offset - Get SAN MAC address offset from the EEPROM * @hw: pointer to hardware structure * @san_mac_offset: SAN MAC address offset * * This function will read the EEPROM location for the SAN MAC address * pointer, and returns the value at that location. This is used in both * get and set mac_addr routines. **/ static s32 ixgbe_get_san_mac_addr_offset(struct ixgbe_hw *hw, u16 *san_mac_offset) { s32 ret_val; /* * First read the EEPROM pointer to see if the MAC addresses are * available. */ ret_val = hw->eeprom.ops.read(hw, IXGBE_SAN_MAC_ADDR_PTR, san_mac_offset); if (ret_val) hw_err(hw, "eeprom read at offset %d failed\n", IXGBE_SAN_MAC_ADDR_PTR); return ret_val; } /** * ixgbe_get_san_mac_addr_generic - SAN MAC address retrieval from the EEPROM * @hw: pointer to hardware structure * @san_mac_addr: SAN MAC address * * Reads the SAN MAC address from the EEPROM, if it's available. This is * per-port, so set_lan_id() must be called before reading the addresses. * set_lan_id() is called by identify_sfp(), but this cannot be relied * upon for non-SFP connections, so we must call it here. **/ s32 ixgbe_get_san_mac_addr_generic(struct ixgbe_hw *hw, u8 *san_mac_addr) { u16 san_mac_data, san_mac_offset; u8 i; s32 ret_val; /* * First read the EEPROM pointer to see if the MAC addresses are * available. If they're not, no point in calling set_lan_id() here. */ ret_val = ixgbe_get_san_mac_addr_offset(hw, &san_mac_offset); if (ret_val || san_mac_offset == 0 || san_mac_offset == 0xFFFF) goto san_mac_addr_clr; /* make sure we know which port we need to program */ hw->mac.ops.set_lan_id(hw); /* apply the port offset to the address offset */ (hw->bus.func) ? (san_mac_offset += IXGBE_SAN_MAC_ADDR_PORT1_OFFSET) : (san_mac_offset += IXGBE_SAN_MAC_ADDR_PORT0_OFFSET); for (i = 0; i < 3; i++) { ret_val = hw->eeprom.ops.read(hw, san_mac_offset, &san_mac_data); if (ret_val) { hw_err(hw, "eeprom read at offset %d failed\n", san_mac_offset); goto san_mac_addr_clr; } san_mac_addr[i * 2] = (u8)(san_mac_data); san_mac_addr[i * 2 + 1] = (u8)(san_mac_data >> 8); san_mac_offset++; } return 0; san_mac_addr_clr: /* No addresses available in this EEPROM. It's not necessarily an * error though, so just wipe the local address and return. */ for (i = 0; i < 6; i++) san_mac_addr[i] = 0xFF; return ret_val; } /** * ixgbe_get_pcie_msix_count_generic - Gets MSI-X vector count * @hw: pointer to hardware structure * * Read PCIe configuration space, and get the MSI-X vector count from * the capabilities table. **/ u16 ixgbe_get_pcie_msix_count_generic(struct ixgbe_hw *hw) { u16 msix_count; u16 max_msix_count; u16 pcie_offset; switch (hw->mac.type) { case ixgbe_mac_82598EB: pcie_offset = IXGBE_PCIE_MSIX_82598_CAPS; max_msix_count = IXGBE_MAX_MSIX_VECTORS_82598; break; case ixgbe_mac_82599EB: case ixgbe_mac_X540: case ixgbe_mac_X550: case ixgbe_mac_X550EM_x: case ixgbe_mac_x550em_a: pcie_offset = IXGBE_PCIE_MSIX_82599_CAPS; max_msix_count = IXGBE_MAX_MSIX_VECTORS_82599; break; default: return 1; } msix_count = ixgbe_read_pci_cfg_word(hw, pcie_offset); if (ixgbe_removed(hw->hw_addr)) msix_count = 0; msix_count &= IXGBE_PCIE_MSIX_TBL_SZ_MASK; /* MSI-X count is zero-based in HW */ msix_count++; if (msix_count > max_msix_count) msix_count = max_msix_count; return msix_count; } /** * ixgbe_clear_vmdq_generic - Disassociate a VMDq pool index from a rx address * @hw: pointer to hardware struct * @rar: receive address register index to disassociate * @vmdq: VMDq pool index to remove from the rar **/ s32 ixgbe_clear_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq) { u32 mpsar_lo, mpsar_hi; u32 rar_entries = hw->mac.num_rar_entries; /* Make sure we are using a valid rar index range */ if (rar >= rar_entries) { hw_dbg(hw, "RAR index %d is out of range.\n", rar); return IXGBE_ERR_INVALID_ARGUMENT; } mpsar_lo = IXGBE_READ_REG(hw, IXGBE_MPSAR_LO(rar)); mpsar_hi = IXGBE_READ_REG(hw, IXGBE_MPSAR_HI(rar)); if (ixgbe_removed(hw->hw_addr)) return 0; if (!mpsar_lo && !mpsar_hi) return 0; if (vmdq == IXGBE_CLEAR_VMDQ_ALL) { if (mpsar_lo) { IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), 0); mpsar_lo = 0; } if (mpsar_hi) { IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), 0); mpsar_hi = 0; } } else if (vmdq < 32) { mpsar_lo &= ~BIT(vmdq); IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), mpsar_lo); } else { mpsar_hi &= ~BIT(vmdq - 32); IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), mpsar_hi); } /* was that the last pool using this rar? */ if (mpsar_lo == 0 && mpsar_hi == 0 && rar != 0 && rar != hw->mac.san_mac_rar_index) hw->mac.ops.clear_rar(hw, rar); return 0; } /** * ixgbe_set_vmdq_generic - Associate a VMDq pool index with a rx address * @hw: pointer to hardware struct * @rar: receive address register index to associate with a VMDq index * @vmdq: VMDq pool index **/ s32 ixgbe_set_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq) { u32 mpsar; u32 rar_entries = hw->mac.num_rar_entries; /* Make sure we are using a valid rar index range */ if (rar >= rar_entries) { hw_dbg(hw, "RAR index %d is out of range.\n", rar); return IXGBE_ERR_INVALID_ARGUMENT; } if (vmdq < 32) { mpsar = IXGBE_READ_REG(hw, IXGBE_MPSAR_LO(rar)); mpsar |= BIT(vmdq); IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), mpsar); } else { mpsar = IXGBE_READ_REG(hw, IXGBE_MPSAR_HI(rar)); mpsar |= BIT(vmdq - 32); IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), mpsar); } return 0; } /** * ixgbe_set_vmdq_san_mac_generic - Associate VMDq pool index with a rx address * @hw: pointer to hardware struct * @vmdq: VMDq pool index * * This function should only be involved in the IOV mode. * In IOV mode, Default pool is next pool after the number of * VFs advertized and not 0. * MPSAR table needs to be updated for SAN_MAC RAR [hw->mac.san_mac_rar_index] **/ s32 ixgbe_set_vmdq_san_mac_generic(struct ixgbe_hw *hw, u32 vmdq) { u32 rar = hw->mac.san_mac_rar_index; if (vmdq < 32) { IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), BIT(vmdq)); IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), 0); } else { IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), 0); IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), BIT(vmdq - 32)); } return 0; } /** * ixgbe_init_uta_tables_generic - Initialize the Unicast Table Array * @hw: pointer to hardware structure **/ s32 ixgbe_init_uta_tables_generic(struct ixgbe_hw *hw) { int i; for (i = 0; i < 128; i++) IXGBE_WRITE_REG(hw, IXGBE_UTA(i), 0); return 0; } /** * ixgbe_find_vlvf_slot - find the vlanid or the first empty slot * @hw: pointer to hardware structure * @vlan: VLAN id to write to VLAN filter * @vlvf_bypass: true to find vlanid only, false returns first empty slot if * vlanid not found * * return the VLVF index where this VLAN id should be placed * **/ static s32 ixgbe_find_vlvf_slot(struct ixgbe_hw *hw, u32 vlan, bool vlvf_bypass) { s32 regindex, first_empty_slot; u32 bits; /* short cut the special case */ if (vlan == 0) return 0; /* if vlvf_bypass is set we don't want to use an empty slot, we * will simply bypass the VLVF if there are no entries present in the * VLVF that contain our VLAN */ first_empty_slot = vlvf_bypass ? IXGBE_ERR_NO_SPACE : 0; /* add VLAN enable bit for comparison */ vlan |= IXGBE_VLVF_VIEN; /* Search for the vlan id in the VLVF entries. Save off the first empty * slot found along the way. * * pre-decrement loop covering (IXGBE_VLVF_ENTRIES - 1) .. 1 */ for (regindex = IXGBE_VLVF_ENTRIES; --regindex;) { bits = IXGBE_READ_REG(hw, IXGBE_VLVF(regindex)); if (bits == vlan) return regindex; if (!first_empty_slot && !bits) first_empty_slot = regindex; } /* If we are here then we didn't find the VLAN. Return first empty * slot we found during our search, else error. */ if (!first_empty_slot) hw_dbg(hw, "No space in VLVF.\n"); return first_empty_slot ? : IXGBE_ERR_NO_SPACE; } /** * ixgbe_set_vfta_generic - Set VLAN filter table * @hw: pointer to hardware structure * @vlan: VLAN id to write to VLAN filter * @vind: VMDq output index that maps queue to VLAN id in VFVFB * @vlan_on: boolean flag to turn on/off VLAN in VFVF * @vlvf_bypass: boolean flag indicating updating default pool is okay * * Turn on/off specified VLAN in the VLAN filter table. **/ s32 ixgbe_set_vfta_generic(struct ixgbe_hw *hw, u32 vlan, u32 vind, bool vlan_on, bool vlvf_bypass) { u32 regidx, vfta_delta, vfta, bits; s32 vlvf_index; if ((vlan > 4095) || (vind > 63)) return IXGBE_ERR_PARAM; /* * this is a 2 part operation - first the VFTA, then the * VLVF and VLVFB if VT Mode is set * We don't write the VFTA until we know the VLVF part succeeded. */ /* Part 1 * The VFTA is a bitstring made up of 128 32-bit registers * that enable the particular VLAN id, much like the MTA: * bits[11-5]: which register * bits[4-0]: which bit in the register */ regidx = vlan / 32; vfta_delta = BIT(vlan % 32); vfta = IXGBE_READ_REG(hw, IXGBE_VFTA(regidx)); /* vfta_delta represents the difference between the current value * of vfta and the value we want in the register. Since the diff * is an XOR mask we can just update vfta using an XOR. */ vfta_delta &= vlan_on ? ~vfta : vfta; vfta ^= vfta_delta; /* Part 2 * If VT Mode is set * Either vlan_on * make sure the vlan is in VLVF * set the vind bit in the matching VLVFB * Or !vlan_on * clear the pool bit and possibly the vind */ if (!(IXGBE_READ_REG(hw, IXGBE_VT_CTL) & IXGBE_VT_CTL_VT_ENABLE)) goto vfta_update; vlvf_index = ixgbe_find_vlvf_slot(hw, vlan, vlvf_bypass); if (vlvf_index < 0) { if (vlvf_bypass) goto vfta_update; return vlvf_index; } bits = IXGBE_READ_REG(hw, IXGBE_VLVFB(vlvf_index * 2 + vind / 32)); /* set the pool bit */ bits |= BIT(vind % 32); if (vlan_on) goto vlvf_update; /* clear the pool bit */ bits ^= BIT(vind % 32); if (!bits && !IXGBE_READ_REG(hw, IXGBE_VLVFB(vlvf_index * 2 + 1 - vind / 32))) { /* Clear VFTA first, then disable VLVF. Otherwise * we run the risk of stray packets leaking into * the PF via the default pool */ if (vfta_delta) IXGBE_WRITE_REG(hw, IXGBE_VFTA(regidx), vfta); /* disable VLVF and clear remaining bit from pool */ IXGBE_WRITE_REG(hw, IXGBE_VLVF(vlvf_index), 0); IXGBE_WRITE_REG(hw, IXGBE_VLVFB(vlvf_index * 2 + vind / 32), 0); return 0; } /* If there are still bits set in the VLVFB registers * for the VLAN ID indicated we need to see if the * caller is requesting that we clear the VFTA entry bit. * If the caller has requested that we clear the VFTA * entry bit but there are still pools/VFs using this VLAN * ID entry then ignore the request. We're not worried * about the case where we're turning the VFTA VLAN ID * entry bit on, only when requested to turn it off as * there may be multiple pools and/or VFs using the * VLAN ID entry. In that case we cannot clear the * VFTA bit until all pools/VFs using that VLAN ID have also * been cleared. This will be indicated by "bits" being * zero. */ vfta_delta = 0; vlvf_update: /* record pool change and enable VLAN ID if not already enabled */ IXGBE_WRITE_REG(hw, IXGBE_VLVFB(vlvf_index * 2 + vind / 32), bits); IXGBE_WRITE_REG(hw, IXGBE_VLVF(vlvf_index), IXGBE_VLVF_VIEN | vlan); vfta_update: /* Update VFTA now that we are ready for traffic */ if (vfta_delta) IXGBE_WRITE_REG(hw, IXGBE_VFTA(regidx), vfta); return 0; } /** * ixgbe_clear_vfta_generic - Clear VLAN filter table * @hw: pointer to hardware structure * * Clears the VLAN filter table, and the VMDq index associated with the filter **/ s32 ixgbe_clear_vfta_generic(struct ixgbe_hw *hw) { u32 offset; for (offset = 0; offset < hw->mac.vft_size; offset++) IXGBE_WRITE_REG(hw, IXGBE_VFTA(offset), 0); for (offset = 0; offset < IXGBE_VLVF_ENTRIES; offset++) { IXGBE_WRITE_REG(hw, IXGBE_VLVF(offset), 0); IXGBE_WRITE_REG(hw, IXGBE_VLVFB(offset * 2), 0); IXGBE_WRITE_REG(hw, IXGBE_VLVFB(offset * 2 + 1), 0); } return 0; } /** * ixgbe_need_crosstalk_fix - Determine if we need to do cross talk fix * @hw: pointer to hardware structure * * Contains the logic to identify if we need to verify link for the * crosstalk fix **/ static bool ixgbe_need_crosstalk_fix(struct ixgbe_hw *hw) { /* Does FW say we need the fix */ if (!hw->need_crosstalk_fix) return false; /* Only consider SFP+ PHYs i.e. media type fiber */ switch (hw->mac.ops.get_media_type(hw)) { case ixgbe_media_type_fiber: case ixgbe_media_type_fiber_qsfp: break; default: return false; } return true; } /** * ixgbe_check_mac_link_generic - Determine link and speed status * @hw: pointer to hardware structure * @speed: pointer to link speed * @link_up: true when link is up * @link_up_wait_to_complete: bool used to wait for link up or not * * Reads the links register to determine if link is up and the current speed **/ s32 ixgbe_check_mac_link_generic(struct ixgbe_hw *hw, ixgbe_link_speed *speed, bool *link_up, bool link_up_wait_to_complete) { bool crosstalk_fix_active = ixgbe_need_crosstalk_fix(hw); u32 links_reg, links_orig; u32 i; /* If Crosstalk fix enabled do the sanity check of making sure * the SFP+ cage is full. */ if (crosstalk_fix_active) { u32 sfp_cage_full; switch (hw->mac.type) { case ixgbe_mac_82599EB: sfp_cage_full = IXGBE_READ_REG(hw, IXGBE_ESDP) & IXGBE_ESDP_SDP2; break; case ixgbe_mac_X550EM_x: case ixgbe_mac_x550em_a: sfp_cage_full = IXGBE_READ_REG(hw, IXGBE_ESDP) & IXGBE_ESDP_SDP0; break; default: /* sanity check - No SFP+ devices here */ sfp_cage_full = false; break; } if (!sfp_cage_full) { *link_up = false; *speed = IXGBE_LINK_SPEED_UNKNOWN; return 0; } } /* clear the old state */ links_orig = IXGBE_READ_REG(hw, IXGBE_LINKS); links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS); if (links_orig != links_reg) { hw_dbg(hw, "LINKS changed from %08X to %08X\n", links_orig, links_reg); } if (link_up_wait_to_complete) { for (i = 0; i < IXGBE_LINK_UP_TIME; i++) { if (links_reg & IXGBE_LINKS_UP) { *link_up = true; break; } else { *link_up = false; } msleep(100); links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS); } } else { if (links_reg & IXGBE_LINKS_UP) { if (crosstalk_fix_active) { /* Check the link state again after a delay * to filter out spurious link up * notifications. */ mdelay(5); links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS); if (!(links_reg & IXGBE_LINKS_UP)) { *link_up = false; *speed = IXGBE_LINK_SPEED_UNKNOWN; return 0; } } *link_up = true; } else { *link_up = false; } } switch (links_reg & IXGBE_LINKS_SPEED_82599) { case IXGBE_LINKS_SPEED_10G_82599: if ((hw->mac.type >= ixgbe_mac_X550) && (links_reg & IXGBE_LINKS_SPEED_NON_STD)) *speed = IXGBE_LINK_SPEED_2_5GB_FULL; else *speed = IXGBE_LINK_SPEED_10GB_FULL; break; case IXGBE_LINKS_SPEED_1G_82599: *speed = IXGBE_LINK_SPEED_1GB_FULL; break; case IXGBE_LINKS_SPEED_100_82599: if ((hw->mac.type >= ixgbe_mac_X550) && (links_reg & IXGBE_LINKS_SPEED_NON_STD)) *speed = IXGBE_LINK_SPEED_5GB_FULL; else *speed = IXGBE_LINK_SPEED_100_FULL; break; case IXGBE_LINKS_SPEED_10_X550EM_A: *speed = IXGBE_LINK_SPEED_UNKNOWN; if (hw->device_id == IXGBE_DEV_ID_X550EM_A_1G_T || hw->device_id == IXGBE_DEV_ID_X550EM_A_1G_T_L) { *speed = IXGBE_LINK_SPEED_10_FULL; } break; default: *speed = IXGBE_LINK_SPEED_UNKNOWN; } return 0; } /** * ixgbe_get_wwn_prefix_generic - Get alternative WWNN/WWPN prefix from * the EEPROM * @hw: pointer to hardware structure * @wwnn_prefix: the alternative WWNN prefix * @wwpn_prefix: the alternative WWPN prefix * * This function will read the EEPROM from the alternative SAN MAC address * block to check the support for the alternative WWNN/WWPN prefix support. **/ s32 ixgbe_get_wwn_prefix_generic(struct ixgbe_hw *hw, u16 *wwnn_prefix, u16 *wwpn_prefix) { u16 offset, caps; u16 alt_san_mac_blk_offset; /* clear output first */ *wwnn_prefix = 0xFFFF; *wwpn_prefix = 0xFFFF; /* check if alternative SAN MAC is supported */ offset = IXGBE_ALT_SAN_MAC_ADDR_BLK_PTR; if (hw->eeprom.ops.read(hw, offset, &alt_san_mac_blk_offset)) goto wwn_prefix_err; if ((alt_san_mac_blk_offset == 0) || (alt_san_mac_blk_offset == 0xFFFF)) return 0; /* check capability in alternative san mac address block */ offset = alt_san_mac_blk_offset + IXGBE_ALT_SAN_MAC_ADDR_CAPS_OFFSET; if (hw->eeprom.ops.read(hw, offset, &caps)) goto wwn_prefix_err; if (!(caps & IXGBE_ALT_SAN_MAC_ADDR_CAPS_ALTWWN)) return 0; /* get the corresponding prefix for WWNN/WWPN */ offset = alt_san_mac_blk_offset + IXGBE_ALT_SAN_MAC_ADDR_WWNN_OFFSET; if (hw->eeprom.ops.read(hw, offset, wwnn_prefix)) hw_err(hw, "eeprom read at offset %d failed\n", offset); offset = alt_san_mac_blk_offset + IXGBE_ALT_SAN_MAC_ADDR_WWPN_OFFSET; if (hw->eeprom.ops.read(hw, offset, wwpn_prefix)) goto wwn_prefix_err; return 0; wwn_prefix_err: hw_err(hw, "eeprom read at offset %d failed\n", offset); return 0; } /** * ixgbe_set_mac_anti_spoofing - Enable/Disable MAC anti-spoofing * @hw: pointer to hardware structure * @enable: enable or disable switch for MAC anti-spoofing * @vf: Virtual Function pool - VF Pool to set for MAC anti-spoofing * **/ void ixgbe_set_mac_anti_spoofing(struct ixgbe_hw *hw, bool enable, int vf) { int vf_target_reg = vf >> 3; int vf_target_shift = vf % 8; u32 pfvfspoof; if (hw->mac.type == ixgbe_mac_82598EB) return; pfvfspoof = IXGBE_READ_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg)); if (enable) pfvfspoof |= BIT(vf_target_shift); else pfvfspoof &= ~BIT(vf_target_shift); IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg), pfvfspoof); } /** * ixgbe_set_vlan_anti_spoofing - Enable/Disable VLAN anti-spoofing * @hw: pointer to hardware structure * @enable: enable or disable switch for VLAN anti-spoofing * @vf: Virtual Function pool - VF Pool to set for VLAN anti-spoofing * **/ void ixgbe_set_vlan_anti_spoofing(struct ixgbe_hw *hw, bool enable, int vf) { int vf_target_reg = vf >> 3; int vf_target_shift = vf % 8 + IXGBE_SPOOF_VLANAS_SHIFT; u32 pfvfspoof; if (hw->mac.type == ixgbe_mac_82598EB) return; pfvfspoof = IXGBE_READ_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg)); if (enable) pfvfspoof |= BIT(vf_target_shift); else pfvfspoof &= ~BIT(vf_target_shift); IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg), pfvfspoof); } /** * ixgbe_get_device_caps_generic - Get additional device capabilities * @hw: pointer to hardware structure * @device_caps: the EEPROM word with the extra device capabilities * * This function will read the EEPROM location for the device capabilities, * and return the word through device_caps. **/ s32 ixgbe_get_device_caps_generic(struct ixgbe_hw *hw, u16 *device_caps) { hw->eeprom.ops.read(hw, IXGBE_DEVICE_CAPS, device_caps); return 0; } /** * ixgbe_set_rxpba_generic - Initialize RX packet buffer * @hw: pointer to hardware structure * @num_pb: number of packet buffers to allocate * @headroom: reserve n KB of headroom * @strategy: packet buffer allocation strategy **/ void ixgbe_set_rxpba_generic(struct ixgbe_hw *hw, int num_pb, u32 headroom, int strategy) { u32 pbsize = hw->mac.rx_pb_size; int i = 0; u32 rxpktsize, txpktsize, txpbthresh; /* Reserve headroom */ pbsize -= headroom; if (!num_pb) num_pb = 1; /* Divide remaining packet buffer space amongst the number * of packet buffers requested using supplied strategy. */ switch (strategy) { case (PBA_STRATEGY_WEIGHTED): /* pba_80_48 strategy weight first half of packet buffer with * 5/8 of the packet buffer space. */ rxpktsize = ((pbsize * 5 * 2) / (num_pb * 8)); pbsize -= rxpktsize * (num_pb / 2); rxpktsize <<= IXGBE_RXPBSIZE_SHIFT; for (; i < (num_pb / 2); i++) IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpktsize); fallthrough; /* configure remaining packet buffers */ case (PBA_STRATEGY_EQUAL): /* Divide the remaining Rx packet buffer evenly among the TCs */ rxpktsize = (pbsize / (num_pb - i)) << IXGBE_RXPBSIZE_SHIFT; for (; i < num_pb; i++) IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpktsize); break; default: break; } /* * Setup Tx packet buffer and threshold equally for all TCs * TXPBTHRESH register is set in K so divide by 1024 and subtract * 10 since the largest packet we support is just over 9K. */ txpktsize = IXGBE_TXPBSIZE_MAX / num_pb; txpbthresh = (txpktsize / 1024) - IXGBE_TXPKT_SIZE_MAX; for (i = 0; i < num_pb; i++) { IXGBE_WRITE_REG(hw, IXGBE_TXPBSIZE(i), txpktsize); IXGBE_WRITE_REG(hw, IXGBE_TXPBTHRESH(i), txpbthresh); } /* Clear unused TCs, if any, to zero buffer size*/ for (; i < IXGBE_MAX_PB; i++) { IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), 0); IXGBE_WRITE_REG(hw, IXGBE_TXPBSIZE(i), 0); IXGBE_WRITE_REG(hw, IXGBE_TXPBTHRESH(i), 0); } } /** * ixgbe_calculate_checksum - Calculate checksum for buffer * @buffer: pointer to EEPROM * @length: size of EEPROM to calculate a checksum for * * Calculates the checksum for some buffer on a specified length. The * checksum calculated is returned. **/ u8 ixgbe_calculate_checksum(u8 *buffer, u32 length) { u32 i; u8 sum = 0; if (!buffer) return 0; for (i = 0; i < length; i++) sum += buffer[i]; return (u8) (0 - sum); } /** * ixgbe_hic_unlocked - Issue command to manageability block unlocked * @hw: pointer to the HW structure * @buffer: command to write and where the return status will be placed * @length: length of buffer, must be multiple of 4 bytes * @timeout: time in ms to wait for command completion * * Communicates with the manageability block. On success return 0 * else returns semaphore error when encountering an error acquiring * semaphore or IXGBE_ERR_HOST_INTERFACE_COMMAND when command fails. * * This function assumes that the IXGBE_GSSR_SW_MNG_SM semaphore is held * by the caller. **/ s32 ixgbe_hic_unlocked(struct ixgbe_hw *hw, u32 *buffer, u32 length, u32 timeout) { u32 hicr, i, fwsts; u16 dword_len; if (!length || length > IXGBE_HI_MAX_BLOCK_BYTE_LENGTH) { hw_dbg(hw, "Buffer length failure buffersize-%d.\n", length); return IXGBE_ERR_HOST_INTERFACE_COMMAND; } /* Set bit 9 of FWSTS clearing FW reset indication */ fwsts = IXGBE_READ_REG(hw, IXGBE_FWSTS); IXGBE_WRITE_REG(hw, IXGBE_FWSTS, fwsts | IXGBE_FWSTS_FWRI); /* Check that the host interface is enabled. */ hicr = IXGBE_READ_REG(hw, IXGBE_HICR); if (!(hicr & IXGBE_HICR_EN)) { hw_dbg(hw, "IXGBE_HOST_EN bit disabled.\n"); return IXGBE_ERR_HOST_INTERFACE_COMMAND; } /* Calculate length in DWORDs. We must be DWORD aligned */ if (length % sizeof(u32)) { hw_dbg(hw, "Buffer length failure, not aligned to dword"); return IXGBE_ERR_INVALID_ARGUMENT; } dword_len = length >> 2; /* The device driver writes the relevant command block * into the ram area. */ for (i = 0; i < dword_len; i++) IXGBE_WRITE_REG_ARRAY(hw, IXGBE_FLEX_MNG, i, (__force u32)cpu_to_le32(buffer[i])); /* Setting this bit tells the ARC that a new command is pending. */ IXGBE_WRITE_REG(hw, IXGBE_HICR, hicr | IXGBE_HICR_C); for (i = 0; i < timeout; i++) { hicr = IXGBE_READ_REG(hw, IXGBE_HICR); if (!(hicr & IXGBE_HICR_C)) break; usleep_range(1000, 2000); } /* Check command successful completion. */ if ((timeout && i == timeout) || !(IXGBE_READ_REG(hw, IXGBE_HICR) & IXGBE_HICR_SV)) return IXGBE_ERR_HOST_INTERFACE_COMMAND; return 0; } /** * ixgbe_host_interface_command - Issue command to manageability block * @hw: pointer to the HW structure * @buffer: contains the command to write and where the return status will * be placed * @length: length of buffer, must be multiple of 4 bytes * @timeout: time in ms to wait for command completion * @return_data: read and return data from the buffer (true) or not (false) * Needed because FW structures are big endian and decoding of * these fields can be 8 bit or 16 bit based on command. Decoding * is not easily understood without making a table of commands. * So we will leave this up to the caller to read back the data * in these cases. * * Communicates with the manageability block. On success return 0 * else return IXGBE_ERR_HOST_INTERFACE_COMMAND. **/ s32 ixgbe_host_interface_command(struct ixgbe_hw *hw, void *buffer, u32 length, u32 timeout, bool return_data) { u32 hdr_size = sizeof(struct ixgbe_hic_hdr); struct ixgbe_hic_hdr *hdr = buffer; u32 *u32arr = buffer; u16 buf_len, dword_len; s32 status; u32 bi; if (!length || length > IXGBE_HI_MAX_BLOCK_BYTE_LENGTH) { hw_dbg(hw, "Buffer length failure buffersize-%d.\n", length); return IXGBE_ERR_HOST_INTERFACE_COMMAND; } /* Take management host interface semaphore */ status = hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_SW_MNG_SM); if (status) return status; status = ixgbe_hic_unlocked(hw, buffer, length, timeout); if (status) goto rel_out; if (!return_data) goto rel_out; /* Calculate length in DWORDs */ dword_len = hdr_size >> 2; /* first pull in the header so we know the buffer length */ for (bi = 0; bi < dword_len; bi++) { u32arr[bi] = IXGBE_READ_REG_ARRAY(hw, IXGBE_FLEX_MNG, bi); le32_to_cpus(&u32arr[bi]); } /* If there is any thing in data position pull it in */ buf_len = hdr->buf_len; if (!buf_len) goto rel_out; if (length < round_up(buf_len, 4) + hdr_size) { hw_dbg(hw, "Buffer not large enough for reply message.\n"); status = IXGBE_ERR_HOST_INTERFACE_COMMAND; goto rel_out; } /* Calculate length in DWORDs, add 3 for odd lengths */ dword_len = (buf_len + 3) >> 2; /* Pull in the rest of the buffer (bi is where we left off) */ for (; bi <= dword_len; bi++) { u32arr[bi] = IXGBE_READ_REG_ARRAY(hw, IXGBE_FLEX_MNG, bi); le32_to_cpus(&u32arr[bi]); } rel_out: hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_SW_MNG_SM); return status; } /** * ixgbe_set_fw_drv_ver_generic - Sends driver version to firmware * @hw: pointer to the HW structure * @maj: driver version major number * @min: driver version minor number * @build: driver version build number * @sub: driver version sub build number * @len: length of driver_ver string * @driver_ver: driver string * * Sends driver version number to firmware through the manageability * block. On success return 0 * else returns IXGBE_ERR_SWFW_SYNC when encountering an error acquiring * semaphore or IXGBE_ERR_HOST_INTERFACE_COMMAND when command fails. **/ s32 ixgbe_set_fw_drv_ver_generic(struct ixgbe_hw *hw, u8 maj, u8 min, u8 build, u8 sub, __always_unused u16 len, __always_unused const char *driver_ver) { struct ixgbe_hic_drv_info fw_cmd; int i; s32 ret_val; fw_cmd.hdr.cmd = FW_CEM_CMD_DRIVER_INFO; fw_cmd.hdr.buf_len = FW_CEM_CMD_DRIVER_INFO_LEN; fw_cmd.hdr.cmd_or_resp.cmd_resv = FW_CEM_CMD_RESERVED; fw_cmd.port_num = hw->bus.func; fw_cmd.ver_maj = maj; fw_cmd.ver_min = min; fw_cmd.ver_build = build; fw_cmd.ver_sub = sub; fw_cmd.hdr.checksum = 0; fw_cmd.pad = 0; fw_cmd.pad2 = 0; fw_cmd.hdr.checksum = ixgbe_calculate_checksum((u8 *)&fw_cmd, (FW_CEM_HDR_LEN + fw_cmd.hdr.buf_len)); for (i = 0; i <= FW_CEM_MAX_RETRIES; i++) { ret_val = ixgbe_host_interface_command(hw, &fw_cmd, sizeof(fw_cmd), IXGBE_HI_COMMAND_TIMEOUT, true); if (ret_val != 0) continue; if (fw_cmd.hdr.cmd_or_resp.ret_status == FW_CEM_RESP_STATUS_SUCCESS) ret_val = 0; else ret_val = IXGBE_ERR_HOST_INTERFACE_COMMAND; break; } return ret_val; } /** * ixgbe_clear_tx_pending - Clear pending TX work from the PCIe fifo * @hw: pointer to the hardware structure * * The 82599 and x540 MACs can experience issues if TX work is still pending * when a reset occurs. This function prevents this by flushing the PCIe * buffers on the system. **/ void ixgbe_clear_tx_pending(struct ixgbe_hw *hw) { u32 gcr_ext, hlreg0, i, poll; u16 value; /* * If double reset is not requested then all transactions should * already be clear and as such there is no work to do */ if (!(hw->mac.flags & IXGBE_FLAGS_DOUBLE_RESET_REQUIRED)) return; /* * Set loopback enable to prevent any transmits from being sent * should the link come up. This assumes that the RXCTRL.RXEN bit * has already been cleared. */ hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0); IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0 | IXGBE_HLREG0_LPBK); /* wait for a last completion before clearing buffers */ IXGBE_WRITE_FLUSH(hw); usleep_range(3000, 6000); /* Before proceeding, make sure that the PCIe block does not have * transactions pending. */ poll = ixgbe_pcie_timeout_poll(hw); for (i = 0; i < poll; i++) { usleep_range(100, 200); value = ixgbe_read_pci_cfg_word(hw, IXGBE_PCI_DEVICE_STATUS); if (ixgbe_removed(hw->hw_addr)) break; if (!(value & IXGBE_PCI_DEVICE_STATUS_TRANSACTION_PENDING)) break; } /* initiate cleaning flow for buffers in the PCIe transaction layer */ gcr_ext = IXGBE_READ_REG(hw, IXGBE_GCR_EXT); IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT, gcr_ext | IXGBE_GCR_EXT_BUFFERS_CLEAR); /* Flush all writes and allow 20usec for all transactions to clear */ IXGBE_WRITE_FLUSH(hw); udelay(20); /* restore previous register values */ IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT, gcr_ext); IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0); } static const u8 ixgbe_emc_temp_data[4] = { IXGBE_EMC_INTERNAL_DATA, IXGBE_EMC_DIODE1_DATA, IXGBE_EMC_DIODE2_DATA, IXGBE_EMC_DIODE3_DATA }; static const u8 ixgbe_emc_therm_limit[4] = { IXGBE_EMC_INTERNAL_THERM_LIMIT, IXGBE_EMC_DIODE1_THERM_LIMIT, IXGBE_EMC_DIODE2_THERM_LIMIT, IXGBE_EMC_DIODE3_THERM_LIMIT }; /** * ixgbe_get_ets_data - Extracts the ETS bit data * @hw: pointer to hardware structure * @ets_cfg: extected ETS data * @ets_offset: offset of ETS data * * Returns error code. **/ static s32 ixgbe_get_ets_data(struct ixgbe_hw *hw, u16 *ets_cfg, u16 *ets_offset) { s32 status; status = hw->eeprom.ops.read(hw, IXGBE_ETS_CFG, ets_offset); if (status) return status; if ((*ets_offset == 0x0000) || (*ets_offset == 0xFFFF)) return IXGBE_NOT_IMPLEMENTED; status = hw->eeprom.ops.read(hw, *ets_offset, ets_cfg); if (status) return status; if ((*ets_cfg & IXGBE_ETS_TYPE_MASK) != IXGBE_ETS_TYPE_EMC_SHIFTED) return IXGBE_NOT_IMPLEMENTED; return 0; } /** * ixgbe_get_thermal_sensor_data_generic - Gathers thermal sensor data * @hw: pointer to hardware structure * * Returns the thermal sensor data structure **/ s32 ixgbe_get_thermal_sensor_data_generic(struct ixgbe_hw *hw) { s32 status; u16 ets_offset; u16 ets_cfg; u16 ets_sensor; u8 num_sensors; u8 i; struct ixgbe_thermal_sensor_data *data = &hw->mac.thermal_sensor_data; /* Only support thermal sensors attached to physical port 0 */ if ((IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_LAN_ID_1)) return IXGBE_NOT_IMPLEMENTED; status = ixgbe_get_ets_data(hw, &ets_cfg, &ets_offset); if (status) return status; num_sensors = (ets_cfg & IXGBE_ETS_NUM_SENSORS_MASK); if (num_sensors > IXGBE_MAX_SENSORS) num_sensors = IXGBE_MAX_SENSORS; for (i = 0; i < num_sensors; i++) { u8 sensor_index; u8 sensor_location; status = hw->eeprom.ops.read(hw, (ets_offset + 1 + i), &ets_sensor); if (status) return status; sensor_index = ((ets_sensor & IXGBE_ETS_DATA_INDEX_MASK) >> IXGBE_ETS_DATA_INDEX_SHIFT); sensor_location = ((ets_sensor & IXGBE_ETS_DATA_LOC_MASK) >> IXGBE_ETS_DATA_LOC_SHIFT); if (sensor_location != 0) { status = hw->phy.ops.read_i2c_byte(hw, ixgbe_emc_temp_data[sensor_index], IXGBE_I2C_THERMAL_SENSOR_ADDR, &data->sensor[i].temp); if (status) return status; } } return 0; } /** * ixgbe_init_thermal_sensor_thresh_generic - Inits thermal sensor thresholds * @hw: pointer to hardware structure * * Inits the thermal sensor thresholds according to the NVM map * and save off the threshold and location values into mac.thermal_sensor_data **/ s32 ixgbe_init_thermal_sensor_thresh_generic(struct ixgbe_hw *hw) { s32 status; u16 ets_offset; u16 ets_cfg; u16 ets_sensor; u8 low_thresh_delta; u8 num_sensors; u8 therm_limit; u8 i; struct ixgbe_thermal_sensor_data *data = &hw->mac.thermal_sensor_data; memset(data, 0, sizeof(struct ixgbe_thermal_sensor_data)); /* Only support thermal sensors attached to physical port 0 */ if ((IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_LAN_ID_1)) return IXGBE_NOT_IMPLEMENTED; status = ixgbe_get_ets_data(hw, &ets_cfg, &ets_offset); if (status) return status; low_thresh_delta = ((ets_cfg & IXGBE_ETS_LTHRES_DELTA_MASK) >> IXGBE_ETS_LTHRES_DELTA_SHIFT); num_sensors = (ets_cfg & IXGBE_ETS_NUM_SENSORS_MASK); if (num_sensors > IXGBE_MAX_SENSORS) num_sensors = IXGBE_MAX_SENSORS; for (i = 0; i < num_sensors; i++) { u8 sensor_index; u8 sensor_location; if (hw->eeprom.ops.read(hw, ets_offset + 1 + i, &ets_sensor)) { hw_err(hw, "eeprom read at offset %d failed\n", ets_offset + 1 + i); continue; } sensor_index = ((ets_sensor & IXGBE_ETS_DATA_INDEX_MASK) >> IXGBE_ETS_DATA_INDEX_SHIFT); sensor_location = ((ets_sensor & IXGBE_ETS_DATA_LOC_MASK) >> IXGBE_ETS_DATA_LOC_SHIFT); therm_limit = ets_sensor & IXGBE_ETS_DATA_HTHRESH_MASK; hw->phy.ops.write_i2c_byte(hw, ixgbe_emc_therm_limit[sensor_index], IXGBE_I2C_THERMAL_SENSOR_ADDR, therm_limit); if (sensor_location == 0) continue; data->sensor[i].location = sensor_location; data->sensor[i].caution_thresh = therm_limit; data->sensor[i].max_op_thresh = therm_limit - low_thresh_delta; } return 0; } /** * ixgbe_get_orom_version - Return option ROM from EEPROM * * @hw: pointer to hardware structure * @nvm_ver: pointer to output structure * * if valid option ROM version, nvm_ver->or_valid set to true * else nvm_ver->or_valid is false. **/ void ixgbe_get_orom_version(struct ixgbe_hw *hw, struct ixgbe_nvm_version *nvm_ver) { u16 offset, eeprom_cfg_blkh, eeprom_cfg_blkl; nvm_ver->or_valid = false; /* Option Rom may or may not be present. Start with pointer */ hw->eeprom.ops.read(hw, NVM_OROM_OFFSET, &offset); /* make sure offset is valid */ if (offset == 0x0 || offset == NVM_INVALID_PTR) return; hw->eeprom.ops.read(hw, offset + NVM_OROM_BLK_HI, &eeprom_cfg_blkh); hw->eeprom.ops.read(hw, offset + NVM_OROM_BLK_LOW, &eeprom_cfg_blkl); /* option rom exists and is valid */ if ((eeprom_cfg_blkl | eeprom_cfg_blkh) == 0x0 || eeprom_cfg_blkl == NVM_VER_INVALID || eeprom_cfg_blkh == NVM_VER_INVALID) return; nvm_ver->or_valid = true; nvm_ver->or_major = eeprom_cfg_blkl >> NVM_OROM_SHIFT; nvm_ver->or_build = (eeprom_cfg_blkl << NVM_OROM_SHIFT) | (eeprom_cfg_blkh >> NVM_OROM_SHIFT); nvm_ver->or_patch = eeprom_cfg_blkh & NVM_OROM_PATCH_MASK; } /** * ixgbe_get_oem_prod_version - Etrack ID from EEPROM * @hw: pointer to hardware structure * @nvm_ver: pointer to output structure * * if valid OEM product version, nvm_ver->oem_valid set to true * else nvm_ver->oem_valid is false. **/ void ixgbe_get_oem_prod_version(struct ixgbe_hw *hw, struct ixgbe_nvm_version *nvm_ver) { u16 rel_num, prod_ver, mod_len, cap, offset; nvm_ver->oem_valid = false; hw->eeprom.ops.read(hw, NVM_OEM_PROD_VER_PTR, &offset); /* Return is offset to OEM Product Version block is invalid */ if (offset == 0x0 || offset == NVM_INVALID_PTR) return; /* Read product version block */ hw->eeprom.ops.read(hw, offset, &mod_len); hw->eeprom.ops.read(hw, offset + NVM_OEM_PROD_VER_CAP_OFF, &cap); /* Return if OEM product version block is invalid */ if (mod_len != NVM_OEM_PROD_VER_MOD_LEN || (cap & NVM_OEM_PROD_VER_CAP_MASK) != 0x0) return; hw->eeprom.ops.read(hw, offset + NVM_OEM_PROD_VER_OFF_L, &prod_ver); hw->eeprom.ops.read(hw, offset + NVM_OEM_PROD_VER_OFF_H, &rel_num); /* Return if version is invalid */ if ((rel_num | prod_ver) == 0x0 || rel_num == NVM_VER_INVALID || prod_ver == NVM_VER_INVALID) return; nvm_ver->oem_major = prod_ver >> NVM_VER_SHIFT; nvm_ver->oem_minor = prod_ver & NVM_VER_MASK; nvm_ver->oem_release = rel_num; nvm_ver->oem_valid = true; } /** * ixgbe_get_etk_id - Return Etrack ID from EEPROM * * @hw: pointer to hardware structure * @nvm_ver: pointer to output structure * * word read errors will return 0xFFFF **/ void ixgbe_get_etk_id(struct ixgbe_hw *hw, struct ixgbe_nvm_version *nvm_ver) { u16 etk_id_l, etk_id_h; if (hw->eeprom.ops.read(hw, NVM_ETK_OFF_LOW, &etk_id_l)) etk_id_l = NVM_VER_INVALID; if (hw->eeprom.ops.read(hw, NVM_ETK_OFF_HI, &etk_id_h)) etk_id_h = NVM_VER_INVALID; /* The word order for the version format is determined by high order * word bit 15. */ if ((etk_id_h & NVM_ETK_VALID) == 0) { nvm_ver->etk_id = etk_id_h; nvm_ver->etk_id |= (etk_id_l << NVM_ETK_SHIFT); } else { nvm_ver->etk_id = etk_id_l; nvm_ver->etk_id |= (etk_id_h << NVM_ETK_SHIFT); } } void ixgbe_disable_rx_generic(struct ixgbe_hw *hw) { u32 rxctrl; rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL); if (rxctrl & IXGBE_RXCTRL_RXEN) { if (hw->mac.type != ixgbe_mac_82598EB) { u32 pfdtxgswc; pfdtxgswc = IXGBE_READ_REG(hw, IXGBE_PFDTXGSWC); if (pfdtxgswc & IXGBE_PFDTXGSWC_VT_LBEN) { pfdtxgswc &= ~IXGBE_PFDTXGSWC_VT_LBEN; IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, pfdtxgswc); hw->mac.set_lben = true; } else { hw->mac.set_lben = false; } } rxctrl &= ~IXGBE_RXCTRL_RXEN; IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, rxctrl); } } void ixgbe_enable_rx_generic(struct ixgbe_hw *hw) { u32 rxctrl; rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL); IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, (rxctrl | IXGBE_RXCTRL_RXEN)); if (hw->mac.type != ixgbe_mac_82598EB) { if (hw->mac.set_lben) { u32 pfdtxgswc; pfdtxgswc = IXGBE_READ_REG(hw, IXGBE_PFDTXGSWC); pfdtxgswc |= IXGBE_PFDTXGSWC_VT_LBEN; IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, pfdtxgswc); hw->mac.set_lben = false; } } } /** ixgbe_mng_present - returns true when management capability is present * @hw: pointer to hardware structure **/ bool ixgbe_mng_present(struct ixgbe_hw *hw) { u32 fwsm; if (hw->mac.type < ixgbe_mac_82599EB) return false; fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM(hw)); return !!(fwsm & IXGBE_FWSM_FW_MODE_PT); } /** * ixgbe_setup_mac_link_multispeed_fiber - Set MAC link speed * @hw: pointer to hardware structure * @speed: new link speed * @autoneg_wait_to_complete: true when waiting for completion is needed * * Set the link speed in the MAC and/or PHY register and restarts link. */ s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw, ixgbe_link_speed speed, bool autoneg_wait_to_complete) { ixgbe_link_speed link_speed = IXGBE_LINK_SPEED_UNKNOWN; ixgbe_link_speed highest_link_speed = IXGBE_LINK_SPEED_UNKNOWN; s32 status = 0; u32 speedcnt = 0; u32 i = 0; bool autoneg, link_up = false; /* Mask off requested but non-supported speeds */ status = hw->mac.ops.get_link_capabilities(hw, &link_speed, &autoneg); if (status) return status; speed &= link_speed; /* Try each speed one by one, highest priority first. We do this in * software because 10Gb fiber doesn't support speed autonegotiation. */ if (speed & IXGBE_LINK_SPEED_10GB_FULL) { speedcnt++; highest_link_speed = IXGBE_LINK_SPEED_10GB_FULL; /* Set the module link speed */ switch (hw->phy.media_type) { case ixgbe_media_type_fiber: hw->mac.ops.set_rate_select_speed(hw, IXGBE_LINK_SPEED_10GB_FULL); break; case ixgbe_media_type_fiber_qsfp: /* QSFP module automatically detects MAC link speed */ break; default: hw_dbg(hw, "Unexpected media type\n"); break; } /* Allow module to change analog characteristics (1G->10G) */ msleep(40); status = hw->mac.ops.setup_mac_link(hw, IXGBE_LINK_SPEED_10GB_FULL, autoneg_wait_to_complete); if (status) return status; /* Flap the Tx laser if it has not already been done */ if (hw->mac.ops.flap_tx_laser) hw->mac.ops.flap_tx_laser(hw); /* Wait for the controller to acquire link. Per IEEE 802.3ap, * Section 73.10.2, we may have to wait up to 500ms if KR is * attempted. 82599 uses the same timing for 10g SFI. */ for (i = 0; i < 5; i++) { /* Wait for the link partner to also set speed */ msleep(100); /* If we have link, just jump out */ status = hw->mac.ops.check_link(hw, &link_speed, &link_up, false); if (status) return status; if (link_up) goto out; } } if (speed & IXGBE_LINK_SPEED_1GB_FULL) { speedcnt++; if (highest_link_speed == IXGBE_LINK_SPEED_UNKNOWN) highest_link_speed = IXGBE_LINK_SPEED_1GB_FULL; /* Set the module link speed */ switch (hw->phy.media_type) { case ixgbe_media_type_fiber: hw->mac.ops.set_rate_select_speed(hw, IXGBE_LINK_SPEED_1GB_FULL); break; case ixgbe_media_type_fiber_qsfp: /* QSFP module automatically detects link speed */ break; default: hw_dbg(hw, "Unexpected media type\n"); break; } /* Allow module to change analog characteristics (10G->1G) */ msleep(40); status = hw->mac.ops.setup_mac_link(hw, IXGBE_LINK_SPEED_1GB_FULL, autoneg_wait_to_complete); if (status) return status; /* Flap the Tx laser if it has not already been done */ if (hw->mac.ops.flap_tx_laser) hw->mac.ops.flap_tx_laser(hw); /* Wait for the link partner to also set speed */ msleep(100); /* If we have link, just jump out */ status = hw->mac.ops.check_link(hw, &link_speed, &link_up, false); if (status) return status; if (link_up) goto out; } /* We didn't get link. Configure back to the highest speed we tried, * (if there was more than one). We call ourselves back with just the * single highest speed that the user requested. */ if (speedcnt > 1) status = ixgbe_setup_mac_link_multispeed_fiber(hw, highest_link_speed, autoneg_wait_to_complete); out: /* Set autoneg_advertised value based on input link speed */ hw->phy.autoneg_advertised = 0; if (speed & IXGBE_LINK_SPEED_10GB_FULL) hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_10GB_FULL; if (speed & IXGBE_LINK_SPEED_1GB_FULL) hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_1GB_FULL; return status; } /** * ixgbe_set_soft_rate_select_speed - Set module link speed * @hw: pointer to hardware structure * @speed: link speed to set * * Set module link speed via the soft rate select. */ void ixgbe_set_soft_rate_select_speed(struct ixgbe_hw *hw, ixgbe_link_speed speed) { s32 status; u8 rs, eeprom_data; switch (speed) { case IXGBE_LINK_SPEED_10GB_FULL: /* one bit mask same as setting on */ rs = IXGBE_SFF_SOFT_RS_SELECT_10G; break; case IXGBE_LINK_SPEED_1GB_FULL: rs = IXGBE_SFF_SOFT_RS_SELECT_1G; break; default: hw_dbg(hw, "Invalid fixed module speed\n"); return; } /* Set RS0 */ status = hw->phy.ops.read_i2c_byte(hw, IXGBE_SFF_SFF_8472_OSCB, IXGBE_I2C_EEPROM_DEV_ADDR2, &eeprom_data); if (status) { hw_dbg(hw, "Failed to read Rx Rate Select RS0\n"); return; } eeprom_data = (eeprom_data & ~IXGBE_SFF_SOFT_RS_SELECT_MASK) | rs; status = hw->phy.ops.write_i2c_byte(hw, IXGBE_SFF_SFF_8472_OSCB, IXGBE_I2C_EEPROM_DEV_ADDR2, eeprom_data); if (status) { hw_dbg(hw, "Failed to write Rx Rate Select RS0\n"); return; } /* Set RS1 */ status = hw->phy.ops.read_i2c_byte(hw, IXGBE_SFF_SFF_8472_ESCB, IXGBE_I2C_EEPROM_DEV_ADDR2, &eeprom_data); if (status) { hw_dbg(hw, "Failed to read Rx Rate Select RS1\n"); return; } eeprom_data = (eeprom_data & ~IXGBE_SFF_SOFT_RS_SELECT_MASK) | rs; status = hw->phy.ops.write_i2c_byte(hw, IXGBE_SFF_SFF_8472_ESCB, IXGBE_I2C_EEPROM_DEV_ADDR2, eeprom_data); if (status) { hw_dbg(hw, "Failed to write Rx Rate Select RS1\n"); return; } }
linux-master
drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
// SPDX-License-Identifier: GPL-2.0 /* Copyright(c) 1999 - 2018 Intel Corporation. */ #include "ixgbe.h" #include "ixgbe_common.h" #include "ixgbe_type.h" #include <linux/module.h> #include <linux/types.h> #include <linux/sysfs.h> #include <linux/kobject.h> #include <linux/device.h> #include <linux/netdevice.h> #include <linux/hwmon.h> /* hwmon callback functions */ static ssize_t ixgbe_hwmon_show_location(struct device *dev, struct device_attribute *attr, char *buf) { struct hwmon_attr *ixgbe_attr = container_of(attr, struct hwmon_attr, dev_attr); return sprintf(buf, "loc%u\n", ixgbe_attr->sensor->location); } static ssize_t ixgbe_hwmon_show_temp(struct device *dev, struct device_attribute *attr, char *buf) { struct hwmon_attr *ixgbe_attr = container_of(attr, struct hwmon_attr, dev_attr); unsigned int value; /* reset the temp field */ ixgbe_attr->hw->mac.ops.get_thermal_sensor_data(ixgbe_attr->hw); value = ixgbe_attr->sensor->temp; /* display millidegree */ value *= 1000; return sprintf(buf, "%u\n", value); } static ssize_t ixgbe_hwmon_show_cautionthresh(struct device *dev, struct device_attribute *attr, char *buf) { struct hwmon_attr *ixgbe_attr = container_of(attr, struct hwmon_attr, dev_attr); unsigned int value = ixgbe_attr->sensor->caution_thresh; /* display millidegree */ value *= 1000; return sprintf(buf, "%u\n", value); } static ssize_t ixgbe_hwmon_show_maxopthresh(struct device *dev, struct device_attribute *attr, char *buf) { struct hwmon_attr *ixgbe_attr = container_of(attr, struct hwmon_attr, dev_attr); unsigned int value = ixgbe_attr->sensor->max_op_thresh; /* display millidegree */ value *= 1000; return sprintf(buf, "%u\n", value); } /** * ixgbe_add_hwmon_attr - Create hwmon attr table for a hwmon sysfs file. * @adapter: pointer to the adapter structure * @offset: offset in the eeprom sensor data table * @type: type of sensor data to display * * For each file we want in hwmon's sysfs interface we need a device_attribute * This is included in our hwmon_attr struct that contains the references to * the data structures we need to get the data to display. */ static int ixgbe_add_hwmon_attr(struct ixgbe_adapter *adapter, unsigned int offset, int type) { int rc; unsigned int n_attr; struct hwmon_attr *ixgbe_attr; n_attr = adapter->ixgbe_hwmon_buff->n_hwmon; ixgbe_attr = &adapter->ixgbe_hwmon_buff->hwmon_list[n_attr]; switch (type) { case IXGBE_HWMON_TYPE_LOC: ixgbe_attr->dev_attr.show = ixgbe_hwmon_show_location; snprintf(ixgbe_attr->name, sizeof(ixgbe_attr->name), "temp%u_label", offset + 1); break; case IXGBE_HWMON_TYPE_TEMP: ixgbe_attr->dev_attr.show = ixgbe_hwmon_show_temp; snprintf(ixgbe_attr->name, sizeof(ixgbe_attr->name), "temp%u_input", offset + 1); break; case IXGBE_HWMON_TYPE_CAUTION: ixgbe_attr->dev_attr.show = ixgbe_hwmon_show_cautionthresh; snprintf(ixgbe_attr->name, sizeof(ixgbe_attr->name), "temp%u_max", offset + 1); break; case IXGBE_HWMON_TYPE_MAX: ixgbe_attr->dev_attr.show = ixgbe_hwmon_show_maxopthresh; snprintf(ixgbe_attr->name, sizeof(ixgbe_attr->name), "temp%u_crit", offset + 1); break; default: rc = -EPERM; return rc; } /* These always the same regardless of type */ ixgbe_attr->sensor = &adapter->hw.mac.thermal_sensor_data.sensor[offset]; ixgbe_attr->hw = &adapter->hw; ixgbe_attr->dev_attr.store = NULL; ixgbe_attr->dev_attr.attr.mode = 0444; ixgbe_attr->dev_attr.attr.name = ixgbe_attr->name; sysfs_attr_init(&ixgbe_attr->dev_attr.attr); adapter->ixgbe_hwmon_buff->attrs[n_attr] = &ixgbe_attr->dev_attr.attr; ++adapter->ixgbe_hwmon_buff->n_hwmon; return 0; } static void ixgbe_sysfs_del_adapter(struct ixgbe_adapter *adapter) { } /* called from ixgbe_main.c */ void ixgbe_sysfs_exit(struct ixgbe_adapter *adapter) { ixgbe_sysfs_del_adapter(adapter); } /* called from ixgbe_main.c */ int ixgbe_sysfs_init(struct ixgbe_adapter *adapter) { struct hwmon_buff *ixgbe_hwmon; struct device *hwmon_dev; unsigned int i; int rc = 0; /* If this method isn't defined we don't support thermals */ if (adapter->hw.mac.ops.init_thermal_sensor_thresh == NULL) { goto exit; } /* Don't create thermal hwmon interface if no sensors present */ if (adapter->hw.mac.ops.init_thermal_sensor_thresh(&adapter->hw)) goto exit; ixgbe_hwmon = devm_kzalloc(&adapter->pdev->dev, sizeof(*ixgbe_hwmon), GFP_KERNEL); if (ixgbe_hwmon == NULL) { rc = -ENOMEM; goto exit; } adapter->ixgbe_hwmon_buff = ixgbe_hwmon; for (i = 0; i < IXGBE_MAX_SENSORS; i++) { /* * Only create hwmon sysfs entries for sensors that have * meaningful data for. */ if (adapter->hw.mac.thermal_sensor_data.sensor[i].location == 0) continue; /* Bail if any hwmon attr struct fails to initialize */ rc = ixgbe_add_hwmon_attr(adapter, i, IXGBE_HWMON_TYPE_CAUTION); if (rc) goto exit; rc = ixgbe_add_hwmon_attr(adapter, i, IXGBE_HWMON_TYPE_LOC); if (rc) goto exit; rc = ixgbe_add_hwmon_attr(adapter, i, IXGBE_HWMON_TYPE_TEMP); if (rc) goto exit; rc = ixgbe_add_hwmon_attr(adapter, i, IXGBE_HWMON_TYPE_MAX); if (rc) goto exit; } ixgbe_hwmon->groups[0] = &ixgbe_hwmon->group; ixgbe_hwmon->group.attrs = ixgbe_hwmon->attrs; hwmon_dev = devm_hwmon_device_register_with_groups(&adapter->pdev->dev, "ixgbe", ixgbe_hwmon, ixgbe_hwmon->groups); if (IS_ERR(hwmon_dev)) rc = PTR_ERR(hwmon_dev); exit: return rc; }
linux-master
drivers/net/ethernet/intel/ixgbe/ixgbe_sysfs.c
// SPDX-License-Identifier: GPL-2.0 /* Copyright(c) 2009 - 2018 Intel Corporation. */ /* ethtool support for igbvf */ #include <linux/netdevice.h> #include <linux/ethtool.h> #include <linux/pci.h> #include <linux/vmalloc.h> #include <linux/delay.h> #include "igbvf.h" #include <linux/if_vlan.h> struct igbvf_stats { char stat_string[ETH_GSTRING_LEN]; int sizeof_stat; int stat_offset; int base_stat_offset; }; #define IGBVF_STAT(current, base) \ sizeof(((struct igbvf_adapter *)0)->current), \ offsetof(struct igbvf_adapter, current), \ offsetof(struct igbvf_adapter, base) static const struct igbvf_stats igbvf_gstrings_stats[] = { { "rx_packets", IGBVF_STAT(stats.gprc, stats.base_gprc) }, { "tx_packets", IGBVF_STAT(stats.gptc, stats.base_gptc) }, { "rx_bytes", IGBVF_STAT(stats.gorc, stats.base_gorc) }, { "tx_bytes", IGBVF_STAT(stats.gotc, stats.base_gotc) }, { "multicast", IGBVF_STAT(stats.mprc, stats.base_mprc) }, { "lbrx_bytes", IGBVF_STAT(stats.gorlbc, stats.base_gorlbc) }, { "lbrx_packets", IGBVF_STAT(stats.gprlbc, stats.base_gprlbc) }, { "tx_restart_queue", IGBVF_STAT(restart_queue, zero_base) }, { "rx_long_byte_count", IGBVF_STAT(stats.gorc, stats.base_gorc) }, { "rx_csum_offload_good", IGBVF_STAT(hw_csum_good, zero_base) }, { "rx_csum_offload_errors", IGBVF_STAT(hw_csum_err, zero_base) }, { "rx_header_split", IGBVF_STAT(rx_hdr_split, zero_base) }, { "alloc_rx_buff_failed", IGBVF_STAT(alloc_rx_buff_failed, zero_base) }, }; #define IGBVF_GLOBAL_STATS_LEN ARRAY_SIZE(igbvf_gstrings_stats) static const char igbvf_gstrings_test[][ETH_GSTRING_LEN] = { "Link test (on/offline)" }; #define IGBVF_TEST_LEN ARRAY_SIZE(igbvf_gstrings_test) static int igbvf_get_link_ksettings(struct net_device *netdev, struct ethtool_link_ksettings *cmd) { struct igbvf_adapter *adapter = netdev_priv(netdev); struct e1000_hw *hw = &adapter->hw; u32 status; ethtool_link_ksettings_zero_link_mode(cmd, supported); ethtool_link_ksettings_add_link_mode(cmd, supported, 1000baseT_Full); ethtool_link_ksettings_zero_link_mode(cmd, advertising); ethtool_link_ksettings_add_link_mode(cmd, advertising, 1000baseT_Full); cmd->base.port = -1; status = er32(STATUS); if (status & E1000_STATUS_LU) { if (status & E1000_STATUS_SPEED_1000) cmd->base.speed = SPEED_1000; else if (status & E1000_STATUS_SPEED_100) cmd->base.speed = SPEED_100; else cmd->base.speed = SPEED_10; if (status & E1000_STATUS_FD) cmd->base.duplex = DUPLEX_FULL; else cmd->base.duplex = DUPLEX_HALF; } else { cmd->base.speed = SPEED_UNKNOWN; cmd->base.duplex = DUPLEX_UNKNOWN; } cmd->base.autoneg = AUTONEG_DISABLE; return 0; } static int igbvf_set_link_ksettings(struct net_device *netdev, const struct ethtool_link_ksettings *cmd) { return -EOPNOTSUPP; } static void igbvf_get_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *pause) { } static int igbvf_set_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *pause) { return -EOPNOTSUPP; } static u32 igbvf_get_msglevel(struct net_device *netdev) { struct igbvf_adapter *adapter = netdev_priv(netdev); return adapter->msg_enable; } static void igbvf_set_msglevel(struct net_device *netdev, u32 data) { struct igbvf_adapter *adapter = netdev_priv(netdev); adapter->msg_enable = data; } static int igbvf_get_regs_len(struct net_device *netdev) { #define IGBVF_REGS_LEN 8 return IGBVF_REGS_LEN * sizeof(u32); } static void igbvf_get_regs(struct net_device *netdev, struct ethtool_regs *regs, void *p) { struct igbvf_adapter *adapter = netdev_priv(netdev); struct e1000_hw *hw = &adapter->hw; u32 *regs_buff = p; memset(p, 0, IGBVF_REGS_LEN * sizeof(u32)); regs->version = (1u << 24) | (adapter->pdev->revision << 16) | adapter->pdev->device; regs_buff[0] = er32(CTRL); regs_buff[1] = er32(STATUS); regs_buff[2] = er32(RDLEN(0)); regs_buff[3] = er32(RDH(0)); regs_buff[4] = er32(RDT(0)); regs_buff[5] = er32(TDLEN(0)); regs_buff[6] = er32(TDH(0)); regs_buff[7] = er32(TDT(0)); } static int igbvf_get_eeprom_len(struct net_device *netdev) { return 0; } static int igbvf_get_eeprom(struct net_device *netdev, struct ethtool_eeprom *eeprom, u8 *bytes) { return -EOPNOTSUPP; } static int igbvf_set_eeprom(struct net_device *netdev, struct ethtool_eeprom *eeprom, u8 *bytes) { return -EOPNOTSUPP; } static void igbvf_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo) { struct igbvf_adapter *adapter = netdev_priv(netdev); strscpy(drvinfo->driver, igbvf_driver_name, sizeof(drvinfo->driver)); strscpy(drvinfo->bus_info, pci_name(adapter->pdev), sizeof(drvinfo->bus_info)); } static void igbvf_get_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring, struct kernel_ethtool_ringparam *kernel_ring, struct netlink_ext_ack *extack) { struct igbvf_adapter *adapter = netdev_priv(netdev); struct igbvf_ring *tx_ring = adapter->tx_ring; struct igbvf_ring *rx_ring = adapter->rx_ring; ring->rx_max_pending = IGBVF_MAX_RXD; ring->tx_max_pending = IGBVF_MAX_TXD; ring->rx_pending = rx_ring->count; ring->tx_pending = tx_ring->count; } static int igbvf_set_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring, struct kernel_ethtool_ringparam *kernel_ring, struct netlink_ext_ack *extack) { struct igbvf_adapter *adapter = netdev_priv(netdev); struct igbvf_ring *temp_ring; int err = 0; u32 new_rx_count, new_tx_count; if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending)) return -EINVAL; new_rx_count = max_t(u32, ring->rx_pending, IGBVF_MIN_RXD); new_rx_count = min_t(u32, new_rx_count, IGBVF_MAX_RXD); new_rx_count = ALIGN(new_rx_count, REQ_RX_DESCRIPTOR_MULTIPLE); new_tx_count = max_t(u32, ring->tx_pending, IGBVF_MIN_TXD); new_tx_count = min_t(u32, new_tx_count, IGBVF_MAX_TXD); new_tx_count = ALIGN(new_tx_count, REQ_TX_DESCRIPTOR_MULTIPLE); if ((new_tx_count == adapter->tx_ring->count) && (new_rx_count == adapter->rx_ring->count)) { /* nothing to do */ return 0; } while (test_and_set_bit(__IGBVF_RESETTING, &adapter->state)) usleep_range(1000, 2000); if (!netif_running(adapter->netdev)) { adapter->tx_ring->count = new_tx_count; adapter->rx_ring->count = new_rx_count; goto clear_reset; } temp_ring = vmalloc(sizeof(struct igbvf_ring)); if (!temp_ring) { err = -ENOMEM; goto clear_reset; } igbvf_down(adapter); /* We can't just free everything and then setup again, * because the ISRs in MSI-X mode get passed pointers * to the Tx and Rx ring structs. */ if (new_tx_count != adapter->tx_ring->count) { memcpy(temp_ring, adapter->tx_ring, sizeof(struct igbvf_ring)); temp_ring->count = new_tx_count; err = igbvf_setup_tx_resources(adapter, temp_ring); if (err) goto err_setup; igbvf_free_tx_resources(adapter->tx_ring); memcpy(adapter->tx_ring, temp_ring, sizeof(struct igbvf_ring)); } if (new_rx_count != adapter->rx_ring->count) { memcpy(temp_ring, adapter->rx_ring, sizeof(struct igbvf_ring)); temp_ring->count = new_rx_count; err = igbvf_setup_rx_resources(adapter, temp_ring); if (err) goto err_setup; igbvf_free_rx_resources(adapter->rx_ring); memcpy(adapter->rx_ring, temp_ring, sizeof(struct igbvf_ring)); } err_setup: igbvf_up(adapter); vfree(temp_ring); clear_reset: clear_bit(__IGBVF_RESETTING, &adapter->state); return err; } static int igbvf_link_test(struct igbvf_adapter *adapter, u64 *data) { struct e1000_hw *hw = &adapter->hw; *data = 0; spin_lock_bh(&hw->mbx_lock); hw->mac.ops.check_for_link(hw); spin_unlock_bh(&hw->mbx_lock); if (!(er32(STATUS) & E1000_STATUS_LU)) *data = 1; return *data; } static void igbvf_diag_test(struct net_device *netdev, struct ethtool_test *eth_test, u64 *data) { struct igbvf_adapter *adapter = netdev_priv(netdev); set_bit(__IGBVF_TESTING, &adapter->state); /* Link test performed before hardware reset so autoneg doesn't * interfere with test result */ if (igbvf_link_test(adapter, &data[0])) eth_test->flags |= ETH_TEST_FL_FAILED; clear_bit(__IGBVF_TESTING, &adapter->state); msleep_interruptible(4 * 1000); } static void igbvf_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) { wol->supported = 0; wol->wolopts = 0; } static int igbvf_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) { return -EOPNOTSUPP; } static int igbvf_get_coalesce(struct net_device *netdev, struct ethtool_coalesce *ec, struct kernel_ethtool_coalesce *kernel_coal, struct netlink_ext_ack *extack) { struct igbvf_adapter *adapter = netdev_priv(netdev); if (adapter->requested_itr <= 3) ec->rx_coalesce_usecs = adapter->requested_itr; else ec->rx_coalesce_usecs = adapter->current_itr >> 2; return 0; } static int igbvf_set_coalesce(struct net_device *netdev, struct ethtool_coalesce *ec, struct kernel_ethtool_coalesce *kernel_coal, struct netlink_ext_ack *extack) { struct igbvf_adapter *adapter = netdev_priv(netdev); struct e1000_hw *hw = &adapter->hw; if ((ec->rx_coalesce_usecs >= IGBVF_MIN_ITR_USECS) && (ec->rx_coalesce_usecs <= IGBVF_MAX_ITR_USECS)) { adapter->current_itr = ec->rx_coalesce_usecs << 2; adapter->requested_itr = 1000000000 / (adapter->current_itr * 256); } else if ((ec->rx_coalesce_usecs == 3) || (ec->rx_coalesce_usecs == 2)) { adapter->current_itr = IGBVF_START_ITR; adapter->requested_itr = ec->rx_coalesce_usecs; } else if (ec->rx_coalesce_usecs == 0) { /* The user's desire is to turn off interrupt throttling * altogether, but due to HW limitations, we can't do that. * Instead we set a very small value in EITR, which would * allow ~967k interrupts per second, but allow the adapter's * internal clocking to still function properly. */ adapter->current_itr = 4; adapter->requested_itr = 1000000000 / (adapter->current_itr * 256); } else { return -EINVAL; } writel(adapter->current_itr, hw->hw_addr + adapter->rx_ring->itr_register); return 0; } static int igbvf_nway_reset(struct net_device *netdev) { struct igbvf_adapter *adapter = netdev_priv(netdev); if (netif_running(netdev)) igbvf_reinit_locked(adapter); return 0; } static void igbvf_get_ethtool_stats(struct net_device *netdev, struct ethtool_stats *stats, u64 *data) { struct igbvf_adapter *adapter = netdev_priv(netdev); int i; igbvf_update_stats(adapter); for (i = 0; i < IGBVF_GLOBAL_STATS_LEN; i++) { char *p = (char *)adapter + igbvf_gstrings_stats[i].stat_offset; char *b = (char *)adapter + igbvf_gstrings_stats[i].base_stat_offset; data[i] = ((igbvf_gstrings_stats[i].sizeof_stat == sizeof(u64)) ? (*(u64 *)p - *(u64 *)b) : (*(u32 *)p - *(u32 *)b)); } } static int igbvf_get_sset_count(struct net_device *dev, int stringset) { switch (stringset) { case ETH_SS_TEST: return IGBVF_TEST_LEN; case ETH_SS_STATS: return IGBVF_GLOBAL_STATS_LEN; default: return -EINVAL; } } static void igbvf_get_strings(struct net_device *netdev, u32 stringset, u8 *data) { u8 *p = data; int i; switch (stringset) { case ETH_SS_TEST: memcpy(data, *igbvf_gstrings_test, sizeof(igbvf_gstrings_test)); break; case ETH_SS_STATS: for (i = 0; i < IGBVF_GLOBAL_STATS_LEN; i++) { memcpy(p, igbvf_gstrings_stats[i].stat_string, ETH_GSTRING_LEN); p += ETH_GSTRING_LEN; } break; } } static const struct ethtool_ops igbvf_ethtool_ops = { .supported_coalesce_params = ETHTOOL_COALESCE_RX_USECS, .get_drvinfo = igbvf_get_drvinfo, .get_regs_len = igbvf_get_regs_len, .get_regs = igbvf_get_regs, .get_wol = igbvf_get_wol, .set_wol = igbvf_set_wol, .get_msglevel = igbvf_get_msglevel, .set_msglevel = igbvf_set_msglevel, .nway_reset = igbvf_nway_reset, .get_link = ethtool_op_get_link, .get_eeprom_len = igbvf_get_eeprom_len, .get_eeprom = igbvf_get_eeprom, .set_eeprom = igbvf_set_eeprom, .get_ringparam = igbvf_get_ringparam, .set_ringparam = igbvf_set_ringparam, .get_pauseparam = igbvf_get_pauseparam, .set_pauseparam = igbvf_set_pauseparam, .self_test = igbvf_diag_test, .get_sset_count = igbvf_get_sset_count, .get_strings = igbvf_get_strings, .get_ethtool_stats = igbvf_get_ethtool_stats, .get_coalesce = igbvf_get_coalesce, .set_coalesce = igbvf_set_coalesce, .get_link_ksettings = igbvf_get_link_ksettings, .set_link_ksettings = igbvf_set_link_ksettings, }; void igbvf_set_ethtool_ops(struct net_device *netdev) { netdev->ethtool_ops = &igbvf_ethtool_ops; }
linux-master
drivers/net/ethernet/intel/igbvf/ethtool.c
// SPDX-License-Identifier: GPL-2.0 /* Copyright(c) 2009 - 2018 Intel Corporation. */ #include <linux/etherdevice.h> #include "vf.h" static s32 e1000_check_for_link_vf(struct e1000_hw *hw); static s32 e1000_get_link_up_info_vf(struct e1000_hw *hw, u16 *speed, u16 *duplex); static s32 e1000_init_hw_vf(struct e1000_hw *hw); static s32 e1000_reset_hw_vf(struct e1000_hw *hw); static void e1000_update_mc_addr_list_vf(struct e1000_hw *hw, u8 *, u32, u32, u32); static void e1000_rar_set_vf(struct e1000_hw *, u8 *, u32); static s32 e1000_read_mac_addr_vf(struct e1000_hw *); static s32 e1000_set_uc_addr_vf(struct e1000_hw *hw, u32 subcmd, u8 *addr); static s32 e1000_set_vfta_vf(struct e1000_hw *, u16, bool); /** * e1000_init_mac_params_vf - Inits MAC params * @hw: pointer to the HW structure **/ static s32 e1000_init_mac_params_vf(struct e1000_hw *hw) { struct e1000_mac_info *mac = &hw->mac; /* VF's have no MTA Registers - PF feature only */ mac->mta_reg_count = 128; /* VF's have no access to RAR entries */ mac->rar_entry_count = 1; /* Function pointers */ /* reset */ mac->ops.reset_hw = e1000_reset_hw_vf; /* hw initialization */ mac->ops.init_hw = e1000_init_hw_vf; /* check for link */ mac->ops.check_for_link = e1000_check_for_link_vf; /* link info */ mac->ops.get_link_up_info = e1000_get_link_up_info_vf; /* multicast address update */ mac->ops.update_mc_addr_list = e1000_update_mc_addr_list_vf; /* set mac address */ mac->ops.rar_set = e1000_rar_set_vf; /* read mac address */ mac->ops.read_mac_addr = e1000_read_mac_addr_vf; /* set mac filter */ mac->ops.set_uc_addr = e1000_set_uc_addr_vf; /* set vlan filter table array */ mac->ops.set_vfta = e1000_set_vfta_vf; return E1000_SUCCESS; } /** * e1000_init_function_pointers_vf - Inits function pointers * @hw: pointer to the HW structure **/ void e1000_init_function_pointers_vf(struct e1000_hw *hw) { hw->mac.ops.init_params = e1000_init_mac_params_vf; hw->mbx.ops.init_params = e1000_init_mbx_params_vf; } /** * e1000_get_link_up_info_vf - Gets link info. * @hw: pointer to the HW structure * @speed: pointer to 16 bit value to store link speed. * @duplex: pointer to 16 bit value to store duplex. * * Since we cannot read the PHY and get accurate link info, we must rely upon * the status register's data which is often stale and inaccurate. **/ static s32 e1000_get_link_up_info_vf(struct e1000_hw *hw, u16 *speed, u16 *duplex) { s32 status; status = er32(STATUS); if (status & E1000_STATUS_SPEED_1000) *speed = SPEED_1000; else if (status & E1000_STATUS_SPEED_100) *speed = SPEED_100; else *speed = SPEED_10; if (status & E1000_STATUS_FD) *duplex = FULL_DUPLEX; else *duplex = HALF_DUPLEX; return E1000_SUCCESS; } /** * e1000_reset_hw_vf - Resets the HW * @hw: pointer to the HW structure * * VF's provide a function level reset. This is done using bit 26 of ctrl_reg. * This is all the reset we can perform on a VF. **/ static s32 e1000_reset_hw_vf(struct e1000_hw *hw) { struct e1000_mbx_info *mbx = &hw->mbx; u32 timeout = E1000_VF_INIT_TIMEOUT; u32 ret_val = -E1000_ERR_MAC_INIT; u32 msgbuf[3]; u8 *addr = (u8 *)(&msgbuf[1]); u32 ctrl; /* assert VF queue/interrupt reset */ ctrl = er32(CTRL); ew32(CTRL, ctrl | E1000_CTRL_RST); /* we cannot initialize while the RSTI / RSTD bits are asserted */ while (!mbx->ops.check_for_rst(hw) && timeout) { timeout--; udelay(5); } if (timeout) { /* mailbox timeout can now become active */ mbx->timeout = E1000_VF_MBX_INIT_TIMEOUT; /* notify PF of VF reset completion */ msgbuf[0] = E1000_VF_RESET; mbx->ops.write_posted(hw, msgbuf, 1); mdelay(10); /* set our "perm_addr" based on info provided by PF */ ret_val = mbx->ops.read_posted(hw, msgbuf, 3); if (!ret_val) { switch (msgbuf[0]) { case E1000_VF_RESET | E1000_VT_MSGTYPE_ACK: memcpy(hw->mac.perm_addr, addr, ETH_ALEN); break; case E1000_VF_RESET | E1000_VT_MSGTYPE_NACK: eth_zero_addr(hw->mac.perm_addr); break; default: ret_val = -E1000_ERR_MAC_INIT; } } } return ret_val; } /** * e1000_init_hw_vf - Inits the HW * @hw: pointer to the HW structure * * Not much to do here except clear the PF Reset indication if there is one. **/ static s32 e1000_init_hw_vf(struct e1000_hw *hw) { /* attempt to set and restore our mac address */ e1000_rar_set_vf(hw, hw->mac.addr, 0); return E1000_SUCCESS; } /** * e1000_hash_mc_addr_vf - Generate a multicast hash value * @hw: pointer to the HW structure * @mc_addr: pointer to a multicast address * * Generates a multicast address hash value which is used to determine * the multicast filter table array address and new table value. See * e1000_mta_set_generic() **/ static u32 e1000_hash_mc_addr_vf(struct e1000_hw *hw, u8 *mc_addr) { u32 hash_value, hash_mask; u8 bit_shift = 0; /* Register count multiplied by bits per register */ hash_mask = (hw->mac.mta_reg_count * 32) - 1; /* The bit_shift is the number of left-shifts * where 0xFF would still fall within the hash mask. */ while (hash_mask >> bit_shift != 0xFF) bit_shift++; hash_value = hash_mask & (((mc_addr[4] >> (8 - bit_shift)) | (((u16)mc_addr[5]) << bit_shift))); return hash_value; } /** * e1000_update_mc_addr_list_vf - Update Multicast addresses * @hw: pointer to the HW structure * @mc_addr_list: array of multicast addresses to program * @mc_addr_count: number of multicast addresses to program * @rar_used_count: the first RAR register free to program * @rar_count: total number of supported Receive Address Registers * * Updates the Receive Address Registers and Multicast Table Array. * The caller must have a packed mc_addr_list of multicast addresses. * The parameter rar_count will usually be hw->mac.rar_entry_count * unless there are workarounds that change this. **/ static void e1000_update_mc_addr_list_vf(struct e1000_hw *hw, u8 *mc_addr_list, u32 mc_addr_count, u32 rar_used_count, u32 rar_count) { struct e1000_mbx_info *mbx = &hw->mbx; u32 msgbuf[E1000_VFMAILBOX_SIZE]; u16 *hash_list = (u16 *)&msgbuf[1]; u32 hash_value; u32 cnt, i; s32 ret_val; /* Each entry in the list uses 1 16 bit word. We have 30 * 16 bit words available in our HW msg buffer (minus 1 for the * msg type). That's 30 hash values if we pack 'em right. If * there are more than 30 MC addresses to add then punt the * extras for now and then add code to handle more than 30 later. * It would be unusual for a server to request that many multi-cast * addresses except for in large enterprise network environments. */ cnt = (mc_addr_count > 30) ? 30 : mc_addr_count; msgbuf[0] = E1000_VF_SET_MULTICAST; msgbuf[0] |= cnt << E1000_VT_MSGINFO_SHIFT; for (i = 0; i < cnt; i++) { hash_value = e1000_hash_mc_addr_vf(hw, mc_addr_list); hash_list[i] = hash_value & 0x0FFFF; mc_addr_list += ETH_ALEN; } ret_val = mbx->ops.write_posted(hw, msgbuf, E1000_VFMAILBOX_SIZE); if (!ret_val) mbx->ops.read_posted(hw, msgbuf, 1); } /** * e1000_set_vfta_vf - Set/Unset vlan filter table address * @hw: pointer to the HW structure * @vid: determines the vfta register and bit to set/unset * @set: if true then set bit, else clear bit **/ static s32 e1000_set_vfta_vf(struct e1000_hw *hw, u16 vid, bool set) { struct e1000_mbx_info *mbx = &hw->mbx; u32 msgbuf[2]; s32 err; msgbuf[0] = E1000_VF_SET_VLAN; msgbuf[1] = vid; /* Setting the 8 bit field MSG INFO to true indicates "add" */ if (set) msgbuf[0] |= BIT(E1000_VT_MSGINFO_SHIFT); mbx->ops.write_posted(hw, msgbuf, 2); err = mbx->ops.read_posted(hw, msgbuf, 2); msgbuf[0] &= ~E1000_VT_MSGTYPE_CTS; /* if nacked the vlan was rejected */ if (!err && (msgbuf[0] == (E1000_VF_SET_VLAN | E1000_VT_MSGTYPE_NACK))) err = -E1000_ERR_MAC_INIT; return err; } /** * e1000_rlpml_set_vf - Set the maximum receive packet length * @hw: pointer to the HW structure * @max_size: value to assign to max frame size **/ void e1000_rlpml_set_vf(struct e1000_hw *hw, u16 max_size) { struct e1000_mbx_info *mbx = &hw->mbx; u32 msgbuf[2]; s32 ret_val; msgbuf[0] = E1000_VF_SET_LPE; msgbuf[1] = max_size; ret_val = mbx->ops.write_posted(hw, msgbuf, 2); if (!ret_val) mbx->ops.read_posted(hw, msgbuf, 1); } /** * e1000_rar_set_vf - set device MAC address * @hw: pointer to the HW structure * @addr: pointer to the receive address * @index: receive address array register **/ static void e1000_rar_set_vf(struct e1000_hw *hw, u8 *addr, u32 index) { struct e1000_mbx_info *mbx = &hw->mbx; u32 msgbuf[3]; u8 *msg_addr = (u8 *)(&msgbuf[1]); s32 ret_val; memset(msgbuf, 0, 12); msgbuf[0] = E1000_VF_SET_MAC_ADDR; memcpy(msg_addr, addr, ETH_ALEN); ret_val = mbx->ops.write_posted(hw, msgbuf, 3); if (!ret_val) ret_val = mbx->ops.read_posted(hw, msgbuf, 3); msgbuf[0] &= ~E1000_VT_MSGTYPE_CTS; /* if nacked the address was rejected, use "perm_addr" */ if (!ret_val && (msgbuf[0] == (E1000_VF_SET_MAC_ADDR | E1000_VT_MSGTYPE_NACK))) e1000_read_mac_addr_vf(hw); } /** * e1000_read_mac_addr_vf - Read device MAC address * @hw: pointer to the HW structure **/ static s32 e1000_read_mac_addr_vf(struct e1000_hw *hw) { memcpy(hw->mac.addr, hw->mac.perm_addr, ETH_ALEN); return E1000_SUCCESS; } /** * e1000_set_uc_addr_vf - Set or clear unicast filters * @hw: pointer to the HW structure * @sub_cmd: add or clear filters * @addr: pointer to the filter MAC address **/ static s32 e1000_set_uc_addr_vf(struct e1000_hw *hw, u32 sub_cmd, u8 *addr) { struct e1000_mbx_info *mbx = &hw->mbx; u32 msgbuf[3], msgbuf_chk; u8 *msg_addr = (u8 *)(&msgbuf[1]); s32 ret_val; memset(msgbuf, 0, sizeof(msgbuf)); msgbuf[0] |= sub_cmd; msgbuf[0] |= E1000_VF_SET_MAC_ADDR; msgbuf_chk = msgbuf[0]; if (addr) memcpy(msg_addr, addr, ETH_ALEN); ret_val = mbx->ops.write_posted(hw, msgbuf, 3); if (!ret_val) ret_val = mbx->ops.read_posted(hw, msgbuf, 3); msgbuf[0] &= ~E1000_VT_MSGTYPE_CTS; if (!ret_val) { msgbuf[0] &= ~E1000_VT_MSGTYPE_CTS; if (msgbuf[0] == (msgbuf_chk | E1000_VT_MSGTYPE_NACK)) return -ENOSPC; } return ret_val; } /** * e1000_check_for_link_vf - Check for link for a virtual interface * @hw: pointer to the HW structure * * Checks to see if the underlying PF is still talking to the VF and * if it is then it reports the link state to the hardware, otherwise * it reports link down and returns an error. **/ static s32 e1000_check_for_link_vf(struct e1000_hw *hw) { struct e1000_mbx_info *mbx = &hw->mbx; struct e1000_mac_info *mac = &hw->mac; s32 ret_val = E1000_SUCCESS; u32 in_msg = 0; /* We only want to run this if there has been a rst asserted. * in this case that could mean a link change, device reset, * or a virtual function reset */ /* If we were hit with a reset or timeout drop the link */ if (!mbx->ops.check_for_rst(hw) || !mbx->timeout) mac->get_link_status = true; if (!mac->get_link_status) goto out; /* if link status is down no point in checking to see if PF is up */ if (!(er32(STATUS) & E1000_STATUS_LU)) goto out; /* if the read failed it could just be a mailbox collision, best wait * until we are called again and don't report an error */ if (mbx->ops.read(hw, &in_msg, 1)) goto out; /* if incoming message isn't clear to send we are waiting on response */ if (!(in_msg & E1000_VT_MSGTYPE_CTS)) { /* msg is not CTS and is NACK we must have lost CTS status */ if (in_msg & E1000_VT_MSGTYPE_NACK) ret_val = -E1000_ERR_MAC_INIT; goto out; } /* the PF is talking, if we timed out in the past we reinit */ if (!mbx->timeout) { ret_val = -E1000_ERR_MAC_INIT; goto out; } /* if we passed all the tests above then the link is up and we no * longer need to check for link */ mac->get_link_status = false; out: return ret_val; }
linux-master
drivers/net/ethernet/intel/igbvf/vf.c
// SPDX-License-Identifier: GPL-2.0 /* Copyright(c) 2009 - 2018 Intel Corporation. */ #include "mbx.h" /** * e1000_poll_for_msg - Wait for message notification * @hw: pointer to the HW structure * * returns SUCCESS if it successfully received a message notification **/ static s32 e1000_poll_for_msg(struct e1000_hw *hw) { struct e1000_mbx_info *mbx = &hw->mbx; int countdown = mbx->timeout; if (!mbx->ops.check_for_msg) goto out; while (countdown && mbx->ops.check_for_msg(hw)) { countdown--; udelay(mbx->usec_delay); } /* if we failed, all future posted messages fail until reset */ if (!countdown) mbx->timeout = 0; out: return countdown ? E1000_SUCCESS : -E1000_ERR_MBX; } /** * e1000_poll_for_ack - Wait for message acknowledgment * @hw: pointer to the HW structure * * returns SUCCESS if it successfully received a message acknowledgment **/ static s32 e1000_poll_for_ack(struct e1000_hw *hw) { struct e1000_mbx_info *mbx = &hw->mbx; int countdown = mbx->timeout; if (!mbx->ops.check_for_ack) goto out; while (countdown && mbx->ops.check_for_ack(hw)) { countdown--; udelay(mbx->usec_delay); } /* if we failed, all future posted messages fail until reset */ if (!countdown) mbx->timeout = 0; out: return countdown ? E1000_SUCCESS : -E1000_ERR_MBX; } /** * e1000_read_posted_mbx - Wait for message notification and receive message * @hw: pointer to the HW structure * @msg: The message buffer * @size: Length of buffer * * returns SUCCESS if it successfully received a message notification and * copied it into the receive buffer. **/ static s32 e1000_read_posted_mbx(struct e1000_hw *hw, u32 *msg, u16 size) { struct e1000_mbx_info *mbx = &hw->mbx; s32 ret_val = -E1000_ERR_MBX; if (!mbx->ops.read) goto out; ret_val = e1000_poll_for_msg(hw); /* if ack received read message, otherwise we timed out */ if (!ret_val) ret_val = mbx->ops.read(hw, msg, size); out: return ret_val; } /** * e1000_write_posted_mbx - Write a message to the mailbox, wait for ack * @hw: pointer to the HW structure * @msg: The message buffer * @size: Length of buffer * * returns SUCCESS if it successfully copied message into the buffer and * received an ack to that message within delay * timeout period **/ static s32 e1000_write_posted_mbx(struct e1000_hw *hw, u32 *msg, u16 size) { struct e1000_mbx_info *mbx = &hw->mbx; s32 ret_val = -E1000_ERR_MBX; /* exit if we either can't write or there isn't a defined timeout */ if (!mbx->ops.write || !mbx->timeout) goto out; /* send msg*/ ret_val = mbx->ops.write(hw, msg, size); /* if msg sent wait until we receive an ack */ if (!ret_val) ret_val = e1000_poll_for_ack(hw); out: return ret_val; } /** * e1000_read_v2p_mailbox - read v2p mailbox * @hw: pointer to the HW structure * * This function is used to read the v2p mailbox without losing the read to * clear status bits. **/ static u32 e1000_read_v2p_mailbox(struct e1000_hw *hw) { u32 v2p_mailbox = er32(V2PMAILBOX(0)); v2p_mailbox |= hw->dev_spec.vf.v2p_mailbox; hw->dev_spec.vf.v2p_mailbox |= v2p_mailbox & E1000_V2PMAILBOX_R2C_BITS; return v2p_mailbox; } /** * e1000_check_for_bit_vf - Determine if a status bit was set * @hw: pointer to the HW structure * @mask: bitmask for bits to be tested and cleared * * This function is used to check for the read to clear bits within * the V2P mailbox. **/ static s32 e1000_check_for_bit_vf(struct e1000_hw *hw, u32 mask) { u32 v2p_mailbox = e1000_read_v2p_mailbox(hw); s32 ret_val = -E1000_ERR_MBX; if (v2p_mailbox & mask) ret_val = E1000_SUCCESS; hw->dev_spec.vf.v2p_mailbox &= ~mask; return ret_val; } /** * e1000_check_for_msg_vf - checks to see if the PF has sent mail * @hw: pointer to the HW structure * * returns SUCCESS if the PF has set the Status bit or else ERR_MBX **/ static s32 e1000_check_for_msg_vf(struct e1000_hw *hw) { s32 ret_val = -E1000_ERR_MBX; if (!e1000_check_for_bit_vf(hw, E1000_V2PMAILBOX_PFSTS)) { ret_val = E1000_SUCCESS; hw->mbx.stats.reqs++; } return ret_val; } /** * e1000_check_for_ack_vf - checks to see if the PF has ACK'd * @hw: pointer to the HW structure * * returns SUCCESS if the PF has set the ACK bit or else ERR_MBX **/ static s32 e1000_check_for_ack_vf(struct e1000_hw *hw) { s32 ret_val = -E1000_ERR_MBX; if (!e1000_check_for_bit_vf(hw, E1000_V2PMAILBOX_PFACK)) { ret_val = E1000_SUCCESS; hw->mbx.stats.acks++; } return ret_val; } /** * e1000_check_for_rst_vf - checks to see if the PF has reset * @hw: pointer to the HW structure * * returns true if the PF has set the reset done bit or else false **/ static s32 e1000_check_for_rst_vf(struct e1000_hw *hw) { s32 ret_val = -E1000_ERR_MBX; if (!e1000_check_for_bit_vf(hw, (E1000_V2PMAILBOX_RSTD | E1000_V2PMAILBOX_RSTI))) { ret_val = E1000_SUCCESS; hw->mbx.stats.rsts++; } return ret_val; } /** * e1000_obtain_mbx_lock_vf - obtain mailbox lock * @hw: pointer to the HW structure * * return SUCCESS if we obtained the mailbox lock **/ static s32 e1000_obtain_mbx_lock_vf(struct e1000_hw *hw) { s32 ret_val = -E1000_ERR_MBX; int count = 10; do { /* Take ownership of the buffer */ ew32(V2PMAILBOX(0), E1000_V2PMAILBOX_VFU); /* reserve mailbox for VF use */ if (e1000_read_v2p_mailbox(hw) & E1000_V2PMAILBOX_VFU) { ret_val = 0; break; } udelay(1000); } while (count-- > 0); return ret_val; } /** * e1000_write_mbx_vf - Write a message to the mailbox * @hw: pointer to the HW structure * @msg: The message buffer * @size: Length of buffer * * returns SUCCESS if it successfully copied message into the buffer **/ static s32 e1000_write_mbx_vf(struct e1000_hw *hw, u32 *msg, u16 size) { s32 err; u16 i; lockdep_assert_held(&hw->mbx_lock); /* lock the mailbox to prevent pf/vf race condition */ err = e1000_obtain_mbx_lock_vf(hw); if (err) goto out_no_write; /* flush any ack or msg as we are going to overwrite mailbox */ e1000_check_for_ack_vf(hw); e1000_check_for_msg_vf(hw); /* copy the caller specified message to the mailbox memory buffer */ for (i = 0; i < size; i++) array_ew32(VMBMEM(0), i, msg[i]); /* update stats */ hw->mbx.stats.msgs_tx++; /* Drop VFU and interrupt the PF to tell it a message has been sent */ ew32(V2PMAILBOX(0), E1000_V2PMAILBOX_REQ); out_no_write: return err; } /** * e1000_read_mbx_vf - Reads a message from the inbox intended for VF * @hw: pointer to the HW structure * @msg: The message buffer * @size: Length of buffer * * returns SUCCESS if it successfully read message from buffer **/ static s32 e1000_read_mbx_vf(struct e1000_hw *hw, u32 *msg, u16 size) { s32 err; u16 i; lockdep_assert_held(&hw->mbx_lock); /* lock the mailbox to prevent pf/vf race condition */ err = e1000_obtain_mbx_lock_vf(hw); if (err) goto out_no_read; /* copy the message from the mailbox memory buffer */ for (i = 0; i < size; i++) msg[i] = array_er32(VMBMEM(0), i); /* Acknowledge receipt and release mailbox, then we're done */ ew32(V2PMAILBOX(0), E1000_V2PMAILBOX_ACK); /* update stats */ hw->mbx.stats.msgs_rx++; out_no_read: return err; } /** * e1000_init_mbx_params_vf - set initial values for VF mailbox * @hw: pointer to the HW structure * * Initializes the hw->mbx struct to correct values for VF mailbox */ s32 e1000_init_mbx_params_vf(struct e1000_hw *hw) { struct e1000_mbx_info *mbx = &hw->mbx; /* start mailbox as timed out and let the reset_hw call set the timeout * value to being communications */ mbx->timeout = 0; mbx->usec_delay = E1000_VF_MBX_INIT_DELAY; mbx->size = E1000_VFMAILBOX_SIZE; mbx->ops.read = e1000_read_mbx_vf; mbx->ops.write = e1000_write_mbx_vf; mbx->ops.read_posted = e1000_read_posted_mbx; mbx->ops.write_posted = e1000_write_posted_mbx; mbx->ops.check_for_msg = e1000_check_for_msg_vf; mbx->ops.check_for_ack = e1000_check_for_ack_vf; mbx->ops.check_for_rst = e1000_check_for_rst_vf; mbx->stats.msgs_tx = 0; mbx->stats.msgs_rx = 0; mbx->stats.reqs = 0; mbx->stats.acks = 0; mbx->stats.rsts = 0; return E1000_SUCCESS; }
linux-master
drivers/net/ethernet/intel/igbvf/mbx.c
// SPDX-License-Identifier: GPL-2.0 /* Copyright(c) 2009 - 2018 Intel Corporation. */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/module.h> #include <linux/types.h> #include <linux/init.h> #include <linux/pci.h> #include <linux/vmalloc.h> #include <linux/pagemap.h> #include <linux/delay.h> #include <linux/netdevice.h> #include <linux/tcp.h> #include <linux/ipv6.h> #include <linux/slab.h> #include <net/checksum.h> #include <net/ip6_checksum.h> #include <linux/mii.h> #include <linux/ethtool.h> #include <linux/if_vlan.h> #include <linux/prefetch.h> #include <linux/sctp.h> #include "igbvf.h" char igbvf_driver_name[] = "igbvf"; static const char igbvf_driver_string[] = "Intel(R) Gigabit Virtual Function Network Driver"; static const char igbvf_copyright[] = "Copyright (c) 2009 - 2012 Intel Corporation."; #define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV|NETIF_MSG_PROBE|NETIF_MSG_LINK) static int debug = -1; module_param(debug, int, 0); MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)"); static int igbvf_poll(struct napi_struct *napi, int budget); static void igbvf_reset(struct igbvf_adapter *); static void igbvf_set_interrupt_capability(struct igbvf_adapter *); static void igbvf_reset_interrupt_capability(struct igbvf_adapter *); static struct igbvf_info igbvf_vf_info = { .mac = e1000_vfadapt, .flags = 0, .pba = 10, .init_ops = e1000_init_function_pointers_vf, }; static struct igbvf_info igbvf_i350_vf_info = { .mac = e1000_vfadapt_i350, .flags = 0, .pba = 10, .init_ops = e1000_init_function_pointers_vf, }; static const struct igbvf_info *igbvf_info_tbl[] = { [board_vf] = &igbvf_vf_info, [board_i350_vf] = &igbvf_i350_vf_info, }; /** * igbvf_desc_unused - calculate if we have unused descriptors * @ring: address of receive ring structure **/ static int igbvf_desc_unused(struct igbvf_ring *ring) { if (ring->next_to_clean > ring->next_to_use) return ring->next_to_clean - ring->next_to_use - 1; return ring->count + ring->next_to_clean - ring->next_to_use - 1; } /** * igbvf_receive_skb - helper function to handle Rx indications * @adapter: board private structure * @netdev: pointer to netdev struct * @skb: skb to indicate to stack * @status: descriptor status field as written by hardware * @vlan: descriptor vlan field as written by hardware (no le/be conversion) * @skb: pointer to sk_buff to be indicated to stack **/ static void igbvf_receive_skb(struct igbvf_adapter *adapter, struct net_device *netdev, struct sk_buff *skb, u32 status, __le16 vlan) { u16 vid; if (status & E1000_RXD_STAT_VP) { if ((adapter->flags & IGBVF_FLAG_RX_LB_VLAN_BSWAP) && (status & E1000_RXDEXT_STATERR_LB)) vid = be16_to_cpu((__force __be16)vlan) & E1000_RXD_SPC_VLAN_MASK; else vid = le16_to_cpu(vlan) & E1000_RXD_SPC_VLAN_MASK; if (test_bit(vid, adapter->active_vlans)) __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid); } napi_gro_receive(&adapter->rx_ring->napi, skb); } static inline void igbvf_rx_checksum_adv(struct igbvf_adapter *adapter, u32 status_err, struct sk_buff *skb) { skb_checksum_none_assert(skb); /* Ignore Checksum bit is set or checksum is disabled through ethtool */ if ((status_err & E1000_RXD_STAT_IXSM) || (adapter->flags & IGBVF_FLAG_RX_CSUM_DISABLED)) return; /* TCP/UDP checksum error bit is set */ if (status_err & (E1000_RXDEXT_STATERR_TCPE | E1000_RXDEXT_STATERR_IPE)) { /* let the stack verify checksum errors */ adapter->hw_csum_err++; return; } /* It must be a TCP or UDP packet with a valid checksum */ if (status_err & (E1000_RXD_STAT_TCPCS | E1000_RXD_STAT_UDPCS)) skb->ip_summed = CHECKSUM_UNNECESSARY; adapter->hw_csum_good++; } /** * igbvf_alloc_rx_buffers - Replace used receive buffers; packet split * @rx_ring: address of ring structure to repopulate * @cleaned_count: number of buffers to repopulate **/ static void igbvf_alloc_rx_buffers(struct igbvf_ring *rx_ring, int cleaned_count) { struct igbvf_adapter *adapter = rx_ring->adapter; struct net_device *netdev = adapter->netdev; struct pci_dev *pdev = adapter->pdev; union e1000_adv_rx_desc *rx_desc; struct igbvf_buffer *buffer_info; struct sk_buff *skb; unsigned int i; int bufsz; i = rx_ring->next_to_use; buffer_info = &rx_ring->buffer_info[i]; if (adapter->rx_ps_hdr_size) bufsz = adapter->rx_ps_hdr_size; else bufsz = adapter->rx_buffer_len; while (cleaned_count--) { rx_desc = IGBVF_RX_DESC_ADV(*rx_ring, i); if (adapter->rx_ps_hdr_size && !buffer_info->page_dma) { if (!buffer_info->page) { buffer_info->page = alloc_page(GFP_ATOMIC); if (!buffer_info->page) { adapter->alloc_rx_buff_failed++; goto no_buffers; } buffer_info->page_offset = 0; } else { buffer_info->page_offset ^= PAGE_SIZE / 2; } buffer_info->page_dma = dma_map_page(&pdev->dev, buffer_info->page, buffer_info->page_offset, PAGE_SIZE / 2, DMA_FROM_DEVICE); if (dma_mapping_error(&pdev->dev, buffer_info->page_dma)) { __free_page(buffer_info->page); buffer_info->page = NULL; dev_err(&pdev->dev, "RX DMA map failed\n"); break; } } if (!buffer_info->skb) { skb = netdev_alloc_skb_ip_align(netdev, bufsz); if (!skb) { adapter->alloc_rx_buff_failed++; goto no_buffers; } buffer_info->skb = skb; buffer_info->dma = dma_map_single(&pdev->dev, skb->data, bufsz, DMA_FROM_DEVICE); if (dma_mapping_error(&pdev->dev, buffer_info->dma)) { dev_kfree_skb(buffer_info->skb); buffer_info->skb = NULL; dev_err(&pdev->dev, "RX DMA map failed\n"); goto no_buffers; } } /* Refresh the desc even if buffer_addrs didn't change because * each write-back erases this info. */ if (adapter->rx_ps_hdr_size) { rx_desc->read.pkt_addr = cpu_to_le64(buffer_info->page_dma); rx_desc->read.hdr_addr = cpu_to_le64(buffer_info->dma); } else { rx_desc->read.pkt_addr = cpu_to_le64(buffer_info->dma); rx_desc->read.hdr_addr = 0; } i++; if (i == rx_ring->count) i = 0; buffer_info = &rx_ring->buffer_info[i]; } no_buffers: if (rx_ring->next_to_use != i) { rx_ring->next_to_use = i; if (i == 0) i = (rx_ring->count - 1); else i--; /* Force memory writes to complete before letting h/w * know there are new descriptors to fetch. (Only * applicable for weak-ordered memory model archs, * such as IA-64). */ wmb(); writel(i, adapter->hw.hw_addr + rx_ring->tail); } } /** * igbvf_clean_rx_irq - Send received data up the network stack; legacy * @adapter: board private structure * @work_done: output parameter used to indicate completed work * @work_to_do: input parameter setting limit of work * * the return value indicates whether actual cleaning was done, there * is no guarantee that everything was cleaned **/ static bool igbvf_clean_rx_irq(struct igbvf_adapter *adapter, int *work_done, int work_to_do) { struct igbvf_ring *rx_ring = adapter->rx_ring; struct net_device *netdev = adapter->netdev; struct pci_dev *pdev = adapter->pdev; union e1000_adv_rx_desc *rx_desc, *next_rxd; struct igbvf_buffer *buffer_info, *next_buffer; struct sk_buff *skb; bool cleaned = false; int cleaned_count = 0; unsigned int total_bytes = 0, total_packets = 0; unsigned int i; u32 length, hlen, staterr; i = rx_ring->next_to_clean; rx_desc = IGBVF_RX_DESC_ADV(*rx_ring, i); staterr = le32_to_cpu(rx_desc->wb.upper.status_error); while (staterr & E1000_RXD_STAT_DD) { if (*work_done >= work_to_do) break; (*work_done)++; rmb(); /* read descriptor and rx_buffer_info after status DD */ buffer_info = &rx_ring->buffer_info[i]; /* HW will not DMA in data larger than the given buffer, even * if it parses the (NFS, of course) header to be larger. In * that case, it fills the header buffer and spills the rest * into the page. */ hlen = (le16_to_cpu(rx_desc->wb.lower.lo_dword.hs_rss.hdr_info) & E1000_RXDADV_HDRBUFLEN_MASK) >> E1000_RXDADV_HDRBUFLEN_SHIFT; if (hlen > adapter->rx_ps_hdr_size) hlen = adapter->rx_ps_hdr_size; length = le16_to_cpu(rx_desc->wb.upper.length); cleaned = true; cleaned_count++; skb = buffer_info->skb; prefetch(skb->data - NET_IP_ALIGN); buffer_info->skb = NULL; if (!adapter->rx_ps_hdr_size) { dma_unmap_single(&pdev->dev, buffer_info->dma, adapter->rx_buffer_len, DMA_FROM_DEVICE); buffer_info->dma = 0; skb_put(skb, length); goto send_up; } if (!skb_shinfo(skb)->nr_frags) { dma_unmap_single(&pdev->dev, buffer_info->dma, adapter->rx_ps_hdr_size, DMA_FROM_DEVICE); buffer_info->dma = 0; skb_put(skb, hlen); } if (length) { dma_unmap_page(&pdev->dev, buffer_info->page_dma, PAGE_SIZE / 2, DMA_FROM_DEVICE); buffer_info->page_dma = 0; skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags, buffer_info->page, buffer_info->page_offset, length); if ((adapter->rx_buffer_len > (PAGE_SIZE / 2)) || (page_count(buffer_info->page) != 1)) buffer_info->page = NULL; else get_page(buffer_info->page); skb->len += length; skb->data_len += length; skb->truesize += PAGE_SIZE / 2; } send_up: i++; if (i == rx_ring->count) i = 0; next_rxd = IGBVF_RX_DESC_ADV(*rx_ring, i); prefetch(next_rxd); next_buffer = &rx_ring->buffer_info[i]; if (!(staterr & E1000_RXD_STAT_EOP)) { buffer_info->skb = next_buffer->skb; buffer_info->dma = next_buffer->dma; next_buffer->skb = skb; next_buffer->dma = 0; goto next_desc; } if (staterr & E1000_RXDEXT_ERR_FRAME_ERR_MASK) { dev_kfree_skb_irq(skb); goto next_desc; } total_bytes += skb->len; total_packets++; igbvf_rx_checksum_adv(adapter, staterr, skb); skb->protocol = eth_type_trans(skb, netdev); igbvf_receive_skb(adapter, netdev, skb, staterr, rx_desc->wb.upper.vlan); next_desc: rx_desc->wb.upper.status_error = 0; /* return some buffers to hardware, one at a time is too slow */ if (cleaned_count >= IGBVF_RX_BUFFER_WRITE) { igbvf_alloc_rx_buffers(rx_ring, cleaned_count); cleaned_count = 0; } /* use prefetched values */ rx_desc = next_rxd; buffer_info = next_buffer; staterr = le32_to_cpu(rx_desc->wb.upper.status_error); } rx_ring->next_to_clean = i; cleaned_count = igbvf_desc_unused(rx_ring); if (cleaned_count) igbvf_alloc_rx_buffers(rx_ring, cleaned_count); adapter->total_rx_packets += total_packets; adapter->total_rx_bytes += total_bytes; netdev->stats.rx_bytes += total_bytes; netdev->stats.rx_packets += total_packets; return cleaned; } static void igbvf_put_txbuf(struct igbvf_adapter *adapter, struct igbvf_buffer *buffer_info) { if (buffer_info->dma) { if (buffer_info->mapped_as_page) dma_unmap_page(&adapter->pdev->dev, buffer_info->dma, buffer_info->length, DMA_TO_DEVICE); else dma_unmap_single(&adapter->pdev->dev, buffer_info->dma, buffer_info->length, DMA_TO_DEVICE); buffer_info->dma = 0; } if (buffer_info->skb) { dev_kfree_skb_any(buffer_info->skb); buffer_info->skb = NULL; } buffer_info->time_stamp = 0; } /** * igbvf_setup_tx_resources - allocate Tx resources (Descriptors) * @adapter: board private structure * @tx_ring: ring being initialized * * Return 0 on success, negative on failure **/ int igbvf_setup_tx_resources(struct igbvf_adapter *adapter, struct igbvf_ring *tx_ring) { struct pci_dev *pdev = adapter->pdev; int size; size = sizeof(struct igbvf_buffer) * tx_ring->count; tx_ring->buffer_info = vzalloc(size); if (!tx_ring->buffer_info) goto err; /* round up to nearest 4K */ tx_ring->size = tx_ring->count * sizeof(union e1000_adv_tx_desc); tx_ring->size = ALIGN(tx_ring->size, 4096); tx_ring->desc = dma_alloc_coherent(&pdev->dev, tx_ring->size, &tx_ring->dma, GFP_KERNEL); if (!tx_ring->desc) goto err; tx_ring->adapter = adapter; tx_ring->next_to_use = 0; tx_ring->next_to_clean = 0; return 0; err: vfree(tx_ring->buffer_info); dev_err(&adapter->pdev->dev, "Unable to allocate memory for the transmit descriptor ring\n"); return -ENOMEM; } /** * igbvf_setup_rx_resources - allocate Rx resources (Descriptors) * @adapter: board private structure * @rx_ring: ring being initialized * * Returns 0 on success, negative on failure **/ int igbvf_setup_rx_resources(struct igbvf_adapter *adapter, struct igbvf_ring *rx_ring) { struct pci_dev *pdev = adapter->pdev; int size, desc_len; size = sizeof(struct igbvf_buffer) * rx_ring->count; rx_ring->buffer_info = vzalloc(size); if (!rx_ring->buffer_info) goto err; desc_len = sizeof(union e1000_adv_rx_desc); /* Round up to nearest 4K */ rx_ring->size = rx_ring->count * desc_len; rx_ring->size = ALIGN(rx_ring->size, 4096); rx_ring->desc = dma_alloc_coherent(&pdev->dev, rx_ring->size, &rx_ring->dma, GFP_KERNEL); if (!rx_ring->desc) goto err; rx_ring->next_to_clean = 0; rx_ring->next_to_use = 0; rx_ring->adapter = adapter; return 0; err: vfree(rx_ring->buffer_info); rx_ring->buffer_info = NULL; dev_err(&adapter->pdev->dev, "Unable to allocate memory for the receive descriptor ring\n"); return -ENOMEM; } /** * igbvf_clean_tx_ring - Free Tx Buffers * @tx_ring: ring to be cleaned **/ static void igbvf_clean_tx_ring(struct igbvf_ring *tx_ring) { struct igbvf_adapter *adapter = tx_ring->adapter; struct igbvf_buffer *buffer_info; unsigned long size; unsigned int i; if (!tx_ring->buffer_info) return; /* Free all the Tx ring sk_buffs */ for (i = 0; i < tx_ring->count; i++) { buffer_info = &tx_ring->buffer_info[i]; igbvf_put_txbuf(adapter, buffer_info); } size = sizeof(struct igbvf_buffer) * tx_ring->count; memset(tx_ring->buffer_info, 0, size); /* Zero out the descriptor ring */ memset(tx_ring->desc, 0, tx_ring->size); tx_ring->next_to_use = 0; tx_ring->next_to_clean = 0; writel(0, adapter->hw.hw_addr + tx_ring->head); writel(0, adapter->hw.hw_addr + tx_ring->tail); } /** * igbvf_free_tx_resources - Free Tx Resources per Queue * @tx_ring: ring to free resources from * * Free all transmit software resources **/ void igbvf_free_tx_resources(struct igbvf_ring *tx_ring) { struct pci_dev *pdev = tx_ring->adapter->pdev; igbvf_clean_tx_ring(tx_ring); vfree(tx_ring->buffer_info); tx_ring->buffer_info = NULL; dma_free_coherent(&pdev->dev, tx_ring->size, tx_ring->desc, tx_ring->dma); tx_ring->desc = NULL; } /** * igbvf_clean_rx_ring - Free Rx Buffers per Queue * @rx_ring: ring structure pointer to free buffers from **/ static void igbvf_clean_rx_ring(struct igbvf_ring *rx_ring) { struct igbvf_adapter *adapter = rx_ring->adapter; struct igbvf_buffer *buffer_info; struct pci_dev *pdev = adapter->pdev; unsigned long size; unsigned int i; if (!rx_ring->buffer_info) return; /* Free all the Rx ring sk_buffs */ for (i = 0; i < rx_ring->count; i++) { buffer_info = &rx_ring->buffer_info[i]; if (buffer_info->dma) { if (adapter->rx_ps_hdr_size) { dma_unmap_single(&pdev->dev, buffer_info->dma, adapter->rx_ps_hdr_size, DMA_FROM_DEVICE); } else { dma_unmap_single(&pdev->dev, buffer_info->dma, adapter->rx_buffer_len, DMA_FROM_DEVICE); } buffer_info->dma = 0; } if (buffer_info->skb) { dev_kfree_skb(buffer_info->skb); buffer_info->skb = NULL; } if (buffer_info->page) { if (buffer_info->page_dma) dma_unmap_page(&pdev->dev, buffer_info->page_dma, PAGE_SIZE / 2, DMA_FROM_DEVICE); put_page(buffer_info->page); buffer_info->page = NULL; buffer_info->page_dma = 0; buffer_info->page_offset = 0; } } size = sizeof(struct igbvf_buffer) * rx_ring->count; memset(rx_ring->buffer_info, 0, size); /* Zero out the descriptor ring */ memset(rx_ring->desc, 0, rx_ring->size); rx_ring->next_to_clean = 0; rx_ring->next_to_use = 0; writel(0, adapter->hw.hw_addr + rx_ring->head); writel(0, adapter->hw.hw_addr + rx_ring->tail); } /** * igbvf_free_rx_resources - Free Rx Resources * @rx_ring: ring to clean the resources from * * Free all receive software resources **/ void igbvf_free_rx_resources(struct igbvf_ring *rx_ring) { struct pci_dev *pdev = rx_ring->adapter->pdev; igbvf_clean_rx_ring(rx_ring); vfree(rx_ring->buffer_info); rx_ring->buffer_info = NULL; dma_free_coherent(&pdev->dev, rx_ring->size, rx_ring->desc, rx_ring->dma); rx_ring->desc = NULL; } /** * igbvf_update_itr - update the dynamic ITR value based on statistics * @adapter: pointer to adapter * @itr_setting: current adapter->itr * @packets: the number of packets during this measurement interval * @bytes: the number of bytes during this measurement interval * * Stores a new ITR value based on packets and byte counts during the last * interrupt. The advantage of per interrupt computation is faster updates * and more accurate ITR for the current traffic pattern. Constants in this * function were computed based on theoretical maximum wire speed and thresholds * were set based on testing data as well as attempting to minimize response * time while increasing bulk throughput. **/ static enum latency_range igbvf_update_itr(struct igbvf_adapter *adapter, enum latency_range itr_setting, int packets, int bytes) { enum latency_range retval = itr_setting; if (packets == 0) goto update_itr_done; switch (itr_setting) { case lowest_latency: /* handle TSO and jumbo frames */ if (bytes/packets > 8000) retval = bulk_latency; else if ((packets < 5) && (bytes > 512)) retval = low_latency; break; case low_latency: /* 50 usec aka 20000 ints/s */ if (bytes > 10000) { /* this if handles the TSO accounting */ if (bytes/packets > 8000) retval = bulk_latency; else if ((packets < 10) || ((bytes/packets) > 1200)) retval = bulk_latency; else if ((packets > 35)) retval = lowest_latency; } else if (bytes/packets > 2000) { retval = bulk_latency; } else if (packets <= 2 && bytes < 512) { retval = lowest_latency; } break; case bulk_latency: /* 250 usec aka 4000 ints/s */ if (bytes > 25000) { if (packets > 35) retval = low_latency; } else if (bytes < 6000) { retval = low_latency; } break; default: break; } update_itr_done: return retval; } static int igbvf_range_to_itr(enum latency_range current_range) { int new_itr; switch (current_range) { /* counts and packets in update_itr are dependent on these numbers */ case lowest_latency: new_itr = IGBVF_70K_ITR; break; case low_latency: new_itr = IGBVF_20K_ITR; break; case bulk_latency: new_itr = IGBVF_4K_ITR; break; default: new_itr = IGBVF_START_ITR; break; } return new_itr; } static void igbvf_set_itr(struct igbvf_adapter *adapter) { u32 new_itr; adapter->tx_ring->itr_range = igbvf_update_itr(adapter, adapter->tx_ring->itr_val, adapter->total_tx_packets, adapter->total_tx_bytes); /* conservative mode (itr 3) eliminates the lowest_latency setting */ if (adapter->requested_itr == 3 && adapter->tx_ring->itr_range == lowest_latency) adapter->tx_ring->itr_range = low_latency; new_itr = igbvf_range_to_itr(adapter->tx_ring->itr_range); if (new_itr != adapter->tx_ring->itr_val) { u32 current_itr = adapter->tx_ring->itr_val; /* this attempts to bias the interrupt rate towards Bulk * by adding intermediate steps when interrupt rate is * increasing */ new_itr = new_itr > current_itr ? min(current_itr + (new_itr >> 2), new_itr) : new_itr; adapter->tx_ring->itr_val = new_itr; adapter->tx_ring->set_itr = 1; } adapter->rx_ring->itr_range = igbvf_update_itr(adapter, adapter->rx_ring->itr_val, adapter->total_rx_packets, adapter->total_rx_bytes); if (adapter->requested_itr == 3 && adapter->rx_ring->itr_range == lowest_latency) adapter->rx_ring->itr_range = low_latency; new_itr = igbvf_range_to_itr(adapter->rx_ring->itr_range); if (new_itr != adapter->rx_ring->itr_val) { u32 current_itr = adapter->rx_ring->itr_val; new_itr = new_itr > current_itr ? min(current_itr + (new_itr >> 2), new_itr) : new_itr; adapter->rx_ring->itr_val = new_itr; adapter->rx_ring->set_itr = 1; } } /** * igbvf_clean_tx_irq - Reclaim resources after transmit completes * @tx_ring: ring structure to clean descriptors from * * returns true if ring is completely cleaned **/ static bool igbvf_clean_tx_irq(struct igbvf_ring *tx_ring) { struct igbvf_adapter *adapter = tx_ring->adapter; struct net_device *netdev = adapter->netdev; struct igbvf_buffer *buffer_info; struct sk_buff *skb; union e1000_adv_tx_desc *tx_desc, *eop_desc; unsigned int total_bytes = 0, total_packets = 0; unsigned int i, count = 0; bool cleaned = false; i = tx_ring->next_to_clean; buffer_info = &tx_ring->buffer_info[i]; eop_desc = buffer_info->next_to_watch; do { /* if next_to_watch is not set then there is no work pending */ if (!eop_desc) break; /* prevent any other reads prior to eop_desc */ smp_rmb(); /* if DD is not set pending work has not been completed */ if (!(eop_desc->wb.status & cpu_to_le32(E1000_TXD_STAT_DD))) break; /* clear next_to_watch to prevent false hangs */ buffer_info->next_to_watch = NULL; for (cleaned = false; !cleaned; count++) { tx_desc = IGBVF_TX_DESC_ADV(*tx_ring, i); cleaned = (tx_desc == eop_desc); skb = buffer_info->skb; if (skb) { unsigned int segs, bytecount; /* gso_segs is currently only valid for tcp */ segs = skb_shinfo(skb)->gso_segs ?: 1; /* multiply data chunks by size of headers */ bytecount = ((segs - 1) * skb_headlen(skb)) + skb->len; total_packets += segs; total_bytes += bytecount; } igbvf_put_txbuf(adapter, buffer_info); tx_desc->wb.status = 0; i++; if (i == tx_ring->count) i = 0; buffer_info = &tx_ring->buffer_info[i]; } eop_desc = buffer_info->next_to_watch; } while (count < tx_ring->count); tx_ring->next_to_clean = i; if (unlikely(count && netif_carrier_ok(netdev) && igbvf_desc_unused(tx_ring) >= IGBVF_TX_QUEUE_WAKE)) { /* Make sure that anybody stopping the queue after this * sees the new next_to_clean. */ smp_mb(); if (netif_queue_stopped(netdev) && !(test_bit(__IGBVF_DOWN, &adapter->state))) { netif_wake_queue(netdev); ++adapter->restart_queue; } } netdev->stats.tx_bytes += total_bytes; netdev->stats.tx_packets += total_packets; return count < tx_ring->count; } static irqreturn_t igbvf_msix_other(int irq, void *data) { struct net_device *netdev = data; struct igbvf_adapter *adapter = netdev_priv(netdev); struct e1000_hw *hw = &adapter->hw; adapter->int_counter1++; hw->mac.get_link_status = 1; if (!test_bit(__IGBVF_DOWN, &adapter->state)) mod_timer(&adapter->watchdog_timer, jiffies + 1); ew32(EIMS, adapter->eims_other); return IRQ_HANDLED; } static irqreturn_t igbvf_intr_msix_tx(int irq, void *data) { struct net_device *netdev = data; struct igbvf_adapter *adapter = netdev_priv(netdev); struct e1000_hw *hw = &adapter->hw; struct igbvf_ring *tx_ring = adapter->tx_ring; if (tx_ring->set_itr) { writel(tx_ring->itr_val, adapter->hw.hw_addr + tx_ring->itr_register); adapter->tx_ring->set_itr = 0; } adapter->total_tx_bytes = 0; adapter->total_tx_packets = 0; /* auto mask will automatically re-enable the interrupt when we write * EICS */ if (!igbvf_clean_tx_irq(tx_ring)) /* Ring was not completely cleaned, so fire another interrupt */ ew32(EICS, tx_ring->eims_value); else ew32(EIMS, tx_ring->eims_value); return IRQ_HANDLED; } static irqreturn_t igbvf_intr_msix_rx(int irq, void *data) { struct net_device *netdev = data; struct igbvf_adapter *adapter = netdev_priv(netdev); adapter->int_counter0++; /* Write the ITR value calculated at the end of the * previous interrupt. */ if (adapter->rx_ring->set_itr) { writel(adapter->rx_ring->itr_val, adapter->hw.hw_addr + adapter->rx_ring->itr_register); adapter->rx_ring->set_itr = 0; } if (napi_schedule_prep(&adapter->rx_ring->napi)) { adapter->total_rx_bytes = 0; adapter->total_rx_packets = 0; __napi_schedule(&adapter->rx_ring->napi); } return IRQ_HANDLED; } #define IGBVF_NO_QUEUE -1 static void igbvf_assign_vector(struct igbvf_adapter *adapter, int rx_queue, int tx_queue, int msix_vector) { struct e1000_hw *hw = &adapter->hw; u32 ivar, index; /* 82576 uses a table-based method for assigning vectors. * Each queue has a single entry in the table to which we write * a vector number along with a "valid" bit. Sadly, the layout * of the table is somewhat counterintuitive. */ if (rx_queue > IGBVF_NO_QUEUE) { index = (rx_queue >> 1); ivar = array_er32(IVAR0, index); if (rx_queue & 0x1) { /* vector goes into third byte of register */ ivar = ivar & 0xFF00FFFF; ivar |= (msix_vector | E1000_IVAR_VALID) << 16; } else { /* vector goes into low byte of register */ ivar = ivar & 0xFFFFFF00; ivar |= msix_vector | E1000_IVAR_VALID; } adapter->rx_ring[rx_queue].eims_value = BIT(msix_vector); array_ew32(IVAR0, index, ivar); } if (tx_queue > IGBVF_NO_QUEUE) { index = (tx_queue >> 1); ivar = array_er32(IVAR0, index); if (tx_queue & 0x1) { /* vector goes into high byte of register */ ivar = ivar & 0x00FFFFFF; ivar |= (msix_vector | E1000_IVAR_VALID) << 24; } else { /* vector goes into second byte of register */ ivar = ivar & 0xFFFF00FF; ivar |= (msix_vector | E1000_IVAR_VALID) << 8; } adapter->tx_ring[tx_queue].eims_value = BIT(msix_vector); array_ew32(IVAR0, index, ivar); } } /** * igbvf_configure_msix - Configure MSI-X hardware * @adapter: board private structure * * igbvf_configure_msix sets up the hardware to properly * generate MSI-X interrupts. **/ static void igbvf_configure_msix(struct igbvf_adapter *adapter) { u32 tmp; struct e1000_hw *hw = &adapter->hw; struct igbvf_ring *tx_ring = adapter->tx_ring; struct igbvf_ring *rx_ring = adapter->rx_ring; int vector = 0; adapter->eims_enable_mask = 0; igbvf_assign_vector(adapter, IGBVF_NO_QUEUE, 0, vector++); adapter->eims_enable_mask |= tx_ring->eims_value; writel(tx_ring->itr_val, hw->hw_addr + tx_ring->itr_register); igbvf_assign_vector(adapter, 0, IGBVF_NO_QUEUE, vector++); adapter->eims_enable_mask |= rx_ring->eims_value; writel(rx_ring->itr_val, hw->hw_addr + rx_ring->itr_register); /* set vector for other causes, i.e. link changes */ tmp = (vector++ | E1000_IVAR_VALID); ew32(IVAR_MISC, tmp); adapter->eims_enable_mask = GENMASK(vector - 1, 0); adapter->eims_other = BIT(vector - 1); e1e_flush(); } static void igbvf_reset_interrupt_capability(struct igbvf_adapter *adapter) { if (adapter->msix_entries) { pci_disable_msix(adapter->pdev); kfree(adapter->msix_entries); adapter->msix_entries = NULL; } } /** * igbvf_set_interrupt_capability - set MSI or MSI-X if supported * @adapter: board private structure * * Attempt to configure interrupts using the best available * capabilities of the hardware and kernel. **/ static void igbvf_set_interrupt_capability(struct igbvf_adapter *adapter) { int err = -ENOMEM; int i; /* we allocate 3 vectors, 1 for Tx, 1 for Rx, one for PF messages */ adapter->msix_entries = kcalloc(3, sizeof(struct msix_entry), GFP_KERNEL); if (adapter->msix_entries) { for (i = 0; i < 3; i++) adapter->msix_entries[i].entry = i; err = pci_enable_msix_range(adapter->pdev, adapter->msix_entries, 3, 3); } if (err < 0) { /* MSI-X failed */ dev_err(&adapter->pdev->dev, "Failed to initialize MSI-X interrupts.\n"); igbvf_reset_interrupt_capability(adapter); } } /** * igbvf_request_msix - Initialize MSI-X interrupts * @adapter: board private structure * * igbvf_request_msix allocates MSI-X vectors and requests interrupts from the * kernel. **/ static int igbvf_request_msix(struct igbvf_adapter *adapter) { struct net_device *netdev = adapter->netdev; int err = 0, vector = 0; if (strlen(netdev->name) < (IFNAMSIZ - 5)) { sprintf(adapter->tx_ring->name, "%s-tx-0", netdev->name); sprintf(adapter->rx_ring->name, "%s-rx-0", netdev->name); } else { memcpy(adapter->tx_ring->name, netdev->name, IFNAMSIZ); memcpy(adapter->rx_ring->name, netdev->name, IFNAMSIZ); } err = request_irq(adapter->msix_entries[vector].vector, igbvf_intr_msix_tx, 0, adapter->tx_ring->name, netdev); if (err) goto out; adapter->tx_ring->itr_register = E1000_EITR(vector); adapter->tx_ring->itr_val = adapter->current_itr; vector++; err = request_irq(adapter->msix_entries[vector].vector, igbvf_intr_msix_rx, 0, adapter->rx_ring->name, netdev); if (err) goto free_irq_tx; adapter->rx_ring->itr_register = E1000_EITR(vector); adapter->rx_ring->itr_val = adapter->current_itr; vector++; err = request_irq(adapter->msix_entries[vector].vector, igbvf_msix_other, 0, netdev->name, netdev); if (err) goto free_irq_rx; igbvf_configure_msix(adapter); return 0; free_irq_rx: free_irq(adapter->msix_entries[--vector].vector, netdev); free_irq_tx: free_irq(adapter->msix_entries[--vector].vector, netdev); out: return err; } /** * igbvf_alloc_queues - Allocate memory for all rings * @adapter: board private structure to initialize **/ static int igbvf_alloc_queues(struct igbvf_adapter *adapter) { struct net_device *netdev = adapter->netdev; adapter->tx_ring = kzalloc(sizeof(struct igbvf_ring), GFP_KERNEL); if (!adapter->tx_ring) return -ENOMEM; adapter->rx_ring = kzalloc(sizeof(struct igbvf_ring), GFP_KERNEL); if (!adapter->rx_ring) { kfree(adapter->tx_ring); return -ENOMEM; } netif_napi_add(netdev, &adapter->rx_ring->napi, igbvf_poll); return 0; } /** * igbvf_request_irq - initialize interrupts * @adapter: board private structure * * Attempts to configure interrupts using the best available * capabilities of the hardware and kernel. **/ static int igbvf_request_irq(struct igbvf_adapter *adapter) { int err = -1; /* igbvf supports msi-x only */ if (adapter->msix_entries) err = igbvf_request_msix(adapter); if (!err) return err; dev_err(&adapter->pdev->dev, "Unable to allocate interrupt, Error: %d\n", err); return err; } static void igbvf_free_irq(struct igbvf_adapter *adapter) { struct net_device *netdev = adapter->netdev; int vector; if (adapter->msix_entries) { for (vector = 0; vector < 3; vector++) free_irq(adapter->msix_entries[vector].vector, netdev); } } /** * igbvf_irq_disable - Mask off interrupt generation on the NIC * @adapter: board private structure **/ static void igbvf_irq_disable(struct igbvf_adapter *adapter) { struct e1000_hw *hw = &adapter->hw; ew32(EIMC, ~0); if (adapter->msix_entries) ew32(EIAC, 0); } /** * igbvf_irq_enable - Enable default interrupt generation settings * @adapter: board private structure **/ static void igbvf_irq_enable(struct igbvf_adapter *adapter) { struct e1000_hw *hw = &adapter->hw; ew32(EIAC, adapter->eims_enable_mask); ew32(EIAM, adapter->eims_enable_mask); ew32(EIMS, adapter->eims_enable_mask); } /** * igbvf_poll - NAPI Rx polling callback * @napi: struct associated with this polling callback * @budget: amount of packets driver is allowed to process this poll **/ static int igbvf_poll(struct napi_struct *napi, int budget) { struct igbvf_ring *rx_ring = container_of(napi, struct igbvf_ring, napi); struct igbvf_adapter *adapter = rx_ring->adapter; struct e1000_hw *hw = &adapter->hw; int work_done = 0; igbvf_clean_rx_irq(adapter, &work_done, budget); if (work_done == budget) return budget; /* Exit the polling mode, but don't re-enable interrupts if stack might * poll us due to busy-polling */ if (likely(napi_complete_done(napi, work_done))) { if (adapter->requested_itr & 3) igbvf_set_itr(adapter); if (!test_bit(__IGBVF_DOWN, &adapter->state)) ew32(EIMS, adapter->rx_ring->eims_value); } return work_done; } /** * igbvf_set_rlpml - set receive large packet maximum length * @adapter: board private structure * * Configure the maximum size of packets that will be received */ static void igbvf_set_rlpml(struct igbvf_adapter *adapter) { int max_frame_size; struct e1000_hw *hw = &adapter->hw; max_frame_size = adapter->max_frame_size + VLAN_TAG_SIZE; spin_lock_bh(&hw->mbx_lock); e1000_rlpml_set_vf(hw, max_frame_size); spin_unlock_bh(&hw->mbx_lock); } static int igbvf_vlan_rx_add_vid(struct net_device *netdev, __be16 proto, u16 vid) { struct igbvf_adapter *adapter = netdev_priv(netdev); struct e1000_hw *hw = &adapter->hw; spin_lock_bh(&hw->mbx_lock); if (hw->mac.ops.set_vfta(hw, vid, true)) { dev_warn(&adapter->pdev->dev, "Vlan id %d\n is not added", vid); spin_unlock_bh(&hw->mbx_lock); return -EINVAL; } spin_unlock_bh(&hw->mbx_lock); set_bit(vid, adapter->active_vlans); return 0; } static int igbvf_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto, u16 vid) { struct igbvf_adapter *adapter = netdev_priv(netdev); struct e1000_hw *hw = &adapter->hw; spin_lock_bh(&hw->mbx_lock); if (hw->mac.ops.set_vfta(hw, vid, false)) { dev_err(&adapter->pdev->dev, "Failed to remove vlan id %d\n", vid); spin_unlock_bh(&hw->mbx_lock); return -EINVAL; } spin_unlock_bh(&hw->mbx_lock); clear_bit(vid, adapter->active_vlans); return 0; } static void igbvf_restore_vlan(struct igbvf_adapter *adapter) { u16 vid; for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID) igbvf_vlan_rx_add_vid(adapter->netdev, htons(ETH_P_8021Q), vid); } /** * igbvf_configure_tx - Configure Transmit Unit after Reset * @adapter: board private structure * * Configure the Tx unit of the MAC after a reset. **/ static void igbvf_configure_tx(struct igbvf_adapter *adapter) { struct e1000_hw *hw = &adapter->hw; struct igbvf_ring *tx_ring = adapter->tx_ring; u64 tdba; u32 txdctl, dca_txctrl; /* disable transmits */ txdctl = er32(TXDCTL(0)); ew32(TXDCTL(0), txdctl & ~E1000_TXDCTL_QUEUE_ENABLE); e1e_flush(); msleep(10); /* Setup the HW Tx Head and Tail descriptor pointers */ ew32(TDLEN(0), tx_ring->count * sizeof(union e1000_adv_tx_desc)); tdba = tx_ring->dma; ew32(TDBAL(0), (tdba & DMA_BIT_MASK(32))); ew32(TDBAH(0), (tdba >> 32)); ew32(TDH(0), 0); ew32(TDT(0), 0); tx_ring->head = E1000_TDH(0); tx_ring->tail = E1000_TDT(0); /* Turn off Relaxed Ordering on head write-backs. The writebacks * MUST be delivered in order or it will completely screw up * our bookkeeping. */ dca_txctrl = er32(DCA_TXCTRL(0)); dca_txctrl &= ~E1000_DCA_TXCTRL_TX_WB_RO_EN; ew32(DCA_TXCTRL(0), dca_txctrl); /* enable transmits */ txdctl |= E1000_TXDCTL_QUEUE_ENABLE; ew32(TXDCTL(0), txdctl); /* Setup Transmit Descriptor Settings for eop descriptor */ adapter->txd_cmd = E1000_ADVTXD_DCMD_EOP | E1000_ADVTXD_DCMD_IFCS; /* enable Report Status bit */ adapter->txd_cmd |= E1000_ADVTXD_DCMD_RS; } /** * igbvf_setup_srrctl - configure the receive control registers * @adapter: Board private structure **/ static void igbvf_setup_srrctl(struct igbvf_adapter *adapter) { struct e1000_hw *hw = &adapter->hw; u32 srrctl = 0; srrctl &= ~(E1000_SRRCTL_DESCTYPE_MASK | E1000_SRRCTL_BSIZEHDR_MASK | E1000_SRRCTL_BSIZEPKT_MASK); /* Enable queue drop to avoid head of line blocking */ srrctl |= E1000_SRRCTL_DROP_EN; /* Setup buffer sizes */ srrctl |= ALIGN(adapter->rx_buffer_len, 1024) >> E1000_SRRCTL_BSIZEPKT_SHIFT; if (adapter->rx_buffer_len < 2048) { adapter->rx_ps_hdr_size = 0; srrctl |= E1000_SRRCTL_DESCTYPE_ADV_ONEBUF; } else { adapter->rx_ps_hdr_size = 128; srrctl |= adapter->rx_ps_hdr_size << E1000_SRRCTL_BSIZEHDRSIZE_SHIFT; srrctl |= E1000_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS; } ew32(SRRCTL(0), srrctl); } /** * igbvf_configure_rx - Configure Receive Unit after Reset * @adapter: board private structure * * Configure the Rx unit of the MAC after a reset. **/ static void igbvf_configure_rx(struct igbvf_adapter *adapter) { struct e1000_hw *hw = &adapter->hw; struct igbvf_ring *rx_ring = adapter->rx_ring; u64 rdba; u32 rxdctl; /* disable receives */ rxdctl = er32(RXDCTL(0)); ew32(RXDCTL(0), rxdctl & ~E1000_RXDCTL_QUEUE_ENABLE); e1e_flush(); msleep(10); /* Setup the HW Rx Head and Tail Descriptor Pointers and * the Base and Length of the Rx Descriptor Ring */ rdba = rx_ring->dma; ew32(RDBAL(0), (rdba & DMA_BIT_MASK(32))); ew32(RDBAH(0), (rdba >> 32)); ew32(RDLEN(0), rx_ring->count * sizeof(union e1000_adv_rx_desc)); rx_ring->head = E1000_RDH(0); rx_ring->tail = E1000_RDT(0); ew32(RDH(0), 0); ew32(RDT(0), 0); rxdctl |= E1000_RXDCTL_QUEUE_ENABLE; rxdctl &= 0xFFF00000; rxdctl |= IGBVF_RX_PTHRESH; rxdctl |= IGBVF_RX_HTHRESH << 8; rxdctl |= IGBVF_RX_WTHRESH << 16; igbvf_set_rlpml(adapter); /* enable receives */ ew32(RXDCTL(0), rxdctl); } /** * igbvf_set_multi - Multicast and Promiscuous mode set * @netdev: network interface device structure * * The set_multi entry point is called whenever the multicast address * list or the network interface flags are updated. This routine is * responsible for configuring the hardware for proper multicast, * promiscuous mode, and all-multi behavior. **/ static void igbvf_set_multi(struct net_device *netdev) { struct igbvf_adapter *adapter = netdev_priv(netdev); struct e1000_hw *hw = &adapter->hw; struct netdev_hw_addr *ha; u8 *mta_list = NULL; int i; if (!netdev_mc_empty(netdev)) { mta_list = kmalloc_array(netdev_mc_count(netdev), ETH_ALEN, GFP_ATOMIC); if (!mta_list) return; } /* prepare a packed array of only addresses. */ i = 0; netdev_for_each_mc_addr(ha, netdev) memcpy(mta_list + (i++ * ETH_ALEN), ha->addr, ETH_ALEN); spin_lock_bh(&hw->mbx_lock); hw->mac.ops.update_mc_addr_list(hw, mta_list, i, 0, 0); spin_unlock_bh(&hw->mbx_lock); kfree(mta_list); } /** * igbvf_set_uni - Configure unicast MAC filters * @netdev: network interface device structure * * This routine is responsible for configuring the hardware for proper * unicast filters. **/ static int igbvf_set_uni(struct net_device *netdev) { struct igbvf_adapter *adapter = netdev_priv(netdev); struct e1000_hw *hw = &adapter->hw; if (netdev_uc_count(netdev) > IGBVF_MAX_MAC_FILTERS) { pr_err("Too many unicast filters - No Space\n"); return -ENOSPC; } spin_lock_bh(&hw->mbx_lock); /* Clear all unicast MAC filters */ hw->mac.ops.set_uc_addr(hw, E1000_VF_MAC_FILTER_CLR, NULL); spin_unlock_bh(&hw->mbx_lock); if (!netdev_uc_empty(netdev)) { struct netdev_hw_addr *ha; /* Add MAC filters one by one */ netdev_for_each_uc_addr(ha, netdev) { spin_lock_bh(&hw->mbx_lock); hw->mac.ops.set_uc_addr(hw, E1000_VF_MAC_FILTER_ADD, ha->addr); spin_unlock_bh(&hw->mbx_lock); udelay(200); } } return 0; } static void igbvf_set_rx_mode(struct net_device *netdev) { igbvf_set_multi(netdev); igbvf_set_uni(netdev); } /** * igbvf_configure - configure the hardware for Rx and Tx * @adapter: private board structure **/ static void igbvf_configure(struct igbvf_adapter *adapter) { igbvf_set_rx_mode(adapter->netdev); igbvf_restore_vlan(adapter); igbvf_configure_tx(adapter); igbvf_setup_srrctl(adapter); igbvf_configure_rx(adapter); igbvf_alloc_rx_buffers(adapter->rx_ring, igbvf_desc_unused(adapter->rx_ring)); } /* igbvf_reset - bring the hardware into a known good state * @adapter: private board structure * * This function boots the hardware and enables some settings that * require a configuration cycle of the hardware - those cannot be * set/changed during runtime. After reset the device needs to be * properly configured for Rx, Tx etc. */ static void igbvf_reset(struct igbvf_adapter *adapter) { struct e1000_mac_info *mac = &adapter->hw.mac; struct net_device *netdev = adapter->netdev; struct e1000_hw *hw = &adapter->hw; spin_lock_bh(&hw->mbx_lock); /* Allow time for pending master requests to run */ if (mac->ops.reset_hw(hw)) dev_info(&adapter->pdev->dev, "PF still resetting\n"); mac->ops.init_hw(hw); spin_unlock_bh(&hw->mbx_lock); if (is_valid_ether_addr(adapter->hw.mac.addr)) { eth_hw_addr_set(netdev, adapter->hw.mac.addr); memcpy(netdev->perm_addr, adapter->hw.mac.addr, netdev->addr_len); } adapter->last_reset = jiffies; } int igbvf_up(struct igbvf_adapter *adapter) { struct e1000_hw *hw = &adapter->hw; /* hardware has been reset, we need to reload some things */ igbvf_configure(adapter); clear_bit(__IGBVF_DOWN, &adapter->state); napi_enable(&adapter->rx_ring->napi); if (adapter->msix_entries) igbvf_configure_msix(adapter); /* Clear any pending interrupts. */ er32(EICR); igbvf_irq_enable(adapter); /* start the watchdog */ hw->mac.get_link_status = 1; mod_timer(&adapter->watchdog_timer, jiffies + 1); return 0; } void igbvf_down(struct igbvf_adapter *adapter) { struct net_device *netdev = adapter->netdev; struct e1000_hw *hw = &adapter->hw; u32 rxdctl, txdctl; /* signal that we're down so the interrupt handler does not * reschedule our watchdog timer */ set_bit(__IGBVF_DOWN, &adapter->state); /* disable receives in the hardware */ rxdctl = er32(RXDCTL(0)); ew32(RXDCTL(0), rxdctl & ~E1000_RXDCTL_QUEUE_ENABLE); netif_carrier_off(netdev); netif_stop_queue(netdev); /* disable transmits in the hardware */ txdctl = er32(TXDCTL(0)); ew32(TXDCTL(0), txdctl & ~E1000_TXDCTL_QUEUE_ENABLE); /* flush both disables and wait for them to finish */ e1e_flush(); msleep(10); napi_disable(&adapter->rx_ring->napi); igbvf_irq_disable(adapter); del_timer_sync(&adapter->watchdog_timer); /* record the stats before reset*/ igbvf_update_stats(adapter); adapter->link_speed = 0; adapter->link_duplex = 0; igbvf_reset(adapter); igbvf_clean_tx_ring(adapter->tx_ring); igbvf_clean_rx_ring(adapter->rx_ring); } void igbvf_reinit_locked(struct igbvf_adapter *adapter) { might_sleep(); while (test_and_set_bit(__IGBVF_RESETTING, &adapter->state)) usleep_range(1000, 2000); igbvf_down(adapter); igbvf_up(adapter); clear_bit(__IGBVF_RESETTING, &adapter->state); } /** * igbvf_sw_init - Initialize general software structures (struct igbvf_adapter) * @adapter: board private structure to initialize * * igbvf_sw_init initializes the Adapter private data structure. * Fields are initialized based on PCI device information and * OS network device settings (MTU size). **/ static int igbvf_sw_init(struct igbvf_adapter *adapter) { struct net_device *netdev = adapter->netdev; s32 rc; adapter->rx_buffer_len = ETH_FRAME_LEN + VLAN_HLEN + ETH_FCS_LEN; adapter->rx_ps_hdr_size = 0; adapter->max_frame_size = netdev->mtu + ETH_HLEN + ETH_FCS_LEN; adapter->min_frame_size = ETH_ZLEN + ETH_FCS_LEN; adapter->tx_int_delay = 8; adapter->tx_abs_int_delay = 32; adapter->rx_int_delay = 0; adapter->rx_abs_int_delay = 8; adapter->requested_itr = 3; adapter->current_itr = IGBVF_START_ITR; /* Set various function pointers */ adapter->ei->init_ops(&adapter->hw); rc = adapter->hw.mac.ops.init_params(&adapter->hw); if (rc) return rc; rc = adapter->hw.mbx.ops.init_params(&adapter->hw); if (rc) return rc; igbvf_set_interrupt_capability(adapter); if (igbvf_alloc_queues(adapter)) return -ENOMEM; spin_lock_init(&adapter->tx_queue_lock); /* Explicitly disable IRQ since the NIC can be in any state. */ igbvf_irq_disable(adapter); spin_lock_init(&adapter->stats_lock); spin_lock_init(&adapter->hw.mbx_lock); set_bit(__IGBVF_DOWN, &adapter->state); return 0; } static void igbvf_initialize_last_counter_stats(struct igbvf_adapter *adapter) { struct e1000_hw *hw = &adapter->hw; adapter->stats.last_gprc = er32(VFGPRC); adapter->stats.last_gorc = er32(VFGORC); adapter->stats.last_gptc = er32(VFGPTC); adapter->stats.last_gotc = er32(VFGOTC); adapter->stats.last_mprc = er32(VFMPRC); adapter->stats.last_gotlbc = er32(VFGOTLBC); adapter->stats.last_gptlbc = er32(VFGPTLBC); adapter->stats.last_gorlbc = er32(VFGORLBC); adapter->stats.last_gprlbc = er32(VFGPRLBC); adapter->stats.base_gprc = er32(VFGPRC); adapter->stats.base_gorc = er32(VFGORC); adapter->stats.base_gptc = er32(VFGPTC); adapter->stats.base_gotc = er32(VFGOTC); adapter->stats.base_mprc = er32(VFMPRC); adapter->stats.base_gotlbc = er32(VFGOTLBC); adapter->stats.base_gptlbc = er32(VFGPTLBC); adapter->stats.base_gorlbc = er32(VFGORLBC); adapter->stats.base_gprlbc = er32(VFGPRLBC); } /** * igbvf_open - Called when a network interface is made active * @netdev: network interface device structure * * Returns 0 on success, negative value on failure * * The open entry point is called when a network interface is made * active by the system (IFF_UP). At this point all resources needed * for transmit and receive operations are allocated, the interrupt * handler is registered with the OS, the watchdog timer is started, * and the stack is notified that the interface is ready. **/ static int igbvf_open(struct net_device *netdev) { struct igbvf_adapter *adapter = netdev_priv(netdev); struct e1000_hw *hw = &adapter->hw; int err; /* disallow open during test */ if (test_bit(__IGBVF_TESTING, &adapter->state)) return -EBUSY; /* allocate transmit descriptors */ err = igbvf_setup_tx_resources(adapter, adapter->tx_ring); if (err) goto err_setup_tx; /* allocate receive descriptors */ err = igbvf_setup_rx_resources(adapter, adapter->rx_ring); if (err) goto err_setup_rx; /* before we allocate an interrupt, we must be ready to handle it. * Setting DEBUG_SHIRQ in the kernel makes it fire an interrupt * as soon as we call pci_request_irq, so we have to setup our * clean_rx handler before we do so. */ igbvf_configure(adapter); err = igbvf_request_irq(adapter); if (err) goto err_req_irq; /* From here on the code is the same as igbvf_up() */ clear_bit(__IGBVF_DOWN, &adapter->state); napi_enable(&adapter->rx_ring->napi); /* clear any pending interrupts */ er32(EICR); igbvf_irq_enable(adapter); /* start the watchdog */ hw->mac.get_link_status = 1; mod_timer(&adapter->watchdog_timer, jiffies + 1); return 0; err_req_irq: igbvf_free_rx_resources(adapter->rx_ring); err_setup_rx: igbvf_free_tx_resources(adapter->tx_ring); err_setup_tx: igbvf_reset(adapter); return err; } /** * igbvf_close - Disables a network interface * @netdev: network interface device structure * * Returns 0, this is not allowed to fail * * The close entry point is called when an interface is de-activated * by the OS. The hardware is still under the drivers control, but * needs to be disabled. A global MAC reset is issued to stop the * hardware, and all transmit and receive resources are freed. **/ static int igbvf_close(struct net_device *netdev) { struct igbvf_adapter *adapter = netdev_priv(netdev); WARN_ON(test_bit(__IGBVF_RESETTING, &adapter->state)); igbvf_down(adapter); igbvf_free_irq(adapter); igbvf_free_tx_resources(adapter->tx_ring); igbvf_free_rx_resources(adapter->rx_ring); return 0; } /** * igbvf_set_mac - Change the Ethernet Address of the NIC * @netdev: network interface device structure * @p: pointer to an address structure * * Returns 0 on success, negative on failure **/ static int igbvf_set_mac(struct net_device *netdev, void *p) { struct igbvf_adapter *adapter = netdev_priv(netdev); struct e1000_hw *hw = &adapter->hw; struct sockaddr *addr = p; if (!is_valid_ether_addr(addr->sa_data)) return -EADDRNOTAVAIL; memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len); spin_lock_bh(&hw->mbx_lock); hw->mac.ops.rar_set(hw, hw->mac.addr, 0); spin_unlock_bh(&hw->mbx_lock); if (!ether_addr_equal(addr->sa_data, hw->mac.addr)) return -EADDRNOTAVAIL; eth_hw_addr_set(netdev, addr->sa_data); return 0; } #define UPDATE_VF_COUNTER(reg, name) \ { \ u32 current_counter = er32(reg); \ if (current_counter < adapter->stats.last_##name) \ adapter->stats.name += 0x100000000LL; \ adapter->stats.last_##name = current_counter; \ adapter->stats.name &= 0xFFFFFFFF00000000LL; \ adapter->stats.name |= current_counter; \ } /** * igbvf_update_stats - Update the board statistics counters * @adapter: board private structure **/ void igbvf_update_stats(struct igbvf_adapter *adapter) { struct e1000_hw *hw = &adapter->hw; struct pci_dev *pdev = adapter->pdev; /* Prevent stats update while adapter is being reset, link is down * or if the pci connection is down. */ if (adapter->link_speed == 0) return; if (test_bit(__IGBVF_RESETTING, &adapter->state)) return; if (pci_channel_offline(pdev)) return; UPDATE_VF_COUNTER(VFGPRC, gprc); UPDATE_VF_COUNTER(VFGORC, gorc); UPDATE_VF_COUNTER(VFGPTC, gptc); UPDATE_VF_COUNTER(VFGOTC, gotc); UPDATE_VF_COUNTER(VFMPRC, mprc); UPDATE_VF_COUNTER(VFGOTLBC, gotlbc); UPDATE_VF_COUNTER(VFGPTLBC, gptlbc); UPDATE_VF_COUNTER(VFGORLBC, gorlbc); UPDATE_VF_COUNTER(VFGPRLBC, gprlbc); /* Fill out the OS statistics structure */ adapter->netdev->stats.multicast = adapter->stats.mprc; } static void igbvf_print_link_info(struct igbvf_adapter *adapter) { dev_info(&adapter->pdev->dev, "Link is Up %d Mbps %s Duplex\n", adapter->link_speed, adapter->link_duplex == FULL_DUPLEX ? "Full" : "Half"); } static bool igbvf_has_link(struct igbvf_adapter *adapter) { struct e1000_hw *hw = &adapter->hw; s32 ret_val = E1000_SUCCESS; bool link_active; /* If interface is down, stay link down */ if (test_bit(__IGBVF_DOWN, &adapter->state)) return false; spin_lock_bh(&hw->mbx_lock); ret_val = hw->mac.ops.check_for_link(hw); spin_unlock_bh(&hw->mbx_lock); link_active = !hw->mac.get_link_status; /* if check for link returns error we will need to reset */ if (ret_val && time_after(jiffies, adapter->last_reset + (10 * HZ))) schedule_work(&adapter->reset_task); return link_active; } /** * igbvf_watchdog - Timer Call-back * @t: timer list pointer containing private struct **/ static void igbvf_watchdog(struct timer_list *t) { struct igbvf_adapter *adapter = from_timer(adapter, t, watchdog_timer); /* Do the rest outside of interrupt context */ schedule_work(&adapter->watchdog_task); } static void igbvf_watchdog_task(struct work_struct *work) { struct igbvf_adapter *adapter = container_of(work, struct igbvf_adapter, watchdog_task); struct net_device *netdev = adapter->netdev; struct e1000_mac_info *mac = &adapter->hw.mac; struct igbvf_ring *tx_ring = adapter->tx_ring; struct e1000_hw *hw = &adapter->hw; u32 link; int tx_pending = 0; link = igbvf_has_link(adapter); if (link) { if (!netif_carrier_ok(netdev)) { mac->ops.get_link_up_info(&adapter->hw, &adapter->link_speed, &adapter->link_duplex); igbvf_print_link_info(adapter); netif_carrier_on(netdev); netif_wake_queue(netdev); } } else { if (netif_carrier_ok(netdev)) { adapter->link_speed = 0; adapter->link_duplex = 0; dev_info(&adapter->pdev->dev, "Link is Down\n"); netif_carrier_off(netdev); netif_stop_queue(netdev); } } if (netif_carrier_ok(netdev)) { igbvf_update_stats(adapter); } else { tx_pending = (igbvf_desc_unused(tx_ring) + 1 < tx_ring->count); if (tx_pending) { /* We've lost link, so the controller stops DMA, * but we've got queued Tx work that's never going * to get done, so reset controller to flush Tx. * (Do the reset outside of interrupt context). */ adapter->tx_timeout_count++; schedule_work(&adapter->reset_task); } } /* Cause software interrupt to ensure Rx ring is cleaned */ ew32(EICS, adapter->rx_ring->eims_value); /* Reset the timer */ if (!test_bit(__IGBVF_DOWN, &adapter->state)) mod_timer(&adapter->watchdog_timer, round_jiffies(jiffies + (2 * HZ))); } #define IGBVF_TX_FLAGS_CSUM 0x00000001 #define IGBVF_TX_FLAGS_VLAN 0x00000002 #define IGBVF_TX_FLAGS_TSO 0x00000004 #define IGBVF_TX_FLAGS_IPV4 0x00000008 #define IGBVF_TX_FLAGS_VLAN_MASK 0xffff0000 #define IGBVF_TX_FLAGS_VLAN_SHIFT 16 static void igbvf_tx_ctxtdesc(struct igbvf_ring *tx_ring, u32 vlan_macip_lens, u32 type_tucmd, u32 mss_l4len_idx) { struct e1000_adv_tx_context_desc *context_desc; struct igbvf_buffer *buffer_info; u16 i = tx_ring->next_to_use; context_desc = IGBVF_TX_CTXTDESC_ADV(*tx_ring, i); buffer_info = &tx_ring->buffer_info[i]; i++; tx_ring->next_to_use = (i < tx_ring->count) ? i : 0; /* set bits to identify this as an advanced context descriptor */ type_tucmd |= E1000_TXD_CMD_DEXT | E1000_ADVTXD_DTYP_CTXT; context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens); context_desc->seqnum_seed = 0; context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd); context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx); buffer_info->time_stamp = jiffies; buffer_info->dma = 0; } static int igbvf_tso(struct igbvf_ring *tx_ring, struct sk_buff *skb, u32 tx_flags, u8 *hdr_len) { u32 vlan_macip_lens, type_tucmd, mss_l4len_idx; union { struct iphdr *v4; struct ipv6hdr *v6; unsigned char *hdr; } ip; union { struct tcphdr *tcp; unsigned char *hdr; } l4; u32 paylen, l4_offset; int err; if (skb->ip_summed != CHECKSUM_PARTIAL) return 0; if (!skb_is_gso(skb)) return 0; err = skb_cow_head(skb, 0); if (err < 0) return err; ip.hdr = skb_network_header(skb); l4.hdr = skb_checksum_start(skb); /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */ type_tucmd = E1000_ADVTXD_TUCMD_L4T_TCP; /* initialize outer IP header fields */ if (ip.v4->version == 4) { unsigned char *csum_start = skb_checksum_start(skb); unsigned char *trans_start = ip.hdr + (ip.v4->ihl * 4); /* IP header will have to cancel out any data that * is not a part of the outer IP header */ ip.v4->check = csum_fold(csum_partial(trans_start, csum_start - trans_start, 0)); type_tucmd |= E1000_ADVTXD_TUCMD_IPV4; ip.v4->tot_len = 0; } else { ip.v6->payload_len = 0; } /* determine offset of inner transport header */ l4_offset = l4.hdr - skb->data; /* compute length of segmentation header */ *hdr_len = (l4.tcp->doff * 4) + l4_offset; /* remove payload length from inner checksum */ paylen = skb->len - l4_offset; csum_replace_by_diff(&l4.tcp->check, (__force __wsum)htonl(paylen)); /* MSS L4LEN IDX */ mss_l4len_idx = (*hdr_len - l4_offset) << E1000_ADVTXD_L4LEN_SHIFT; mss_l4len_idx |= skb_shinfo(skb)->gso_size << E1000_ADVTXD_MSS_SHIFT; /* VLAN MACLEN IPLEN */ vlan_macip_lens = l4.hdr - ip.hdr; vlan_macip_lens |= (ip.hdr - skb->data) << E1000_ADVTXD_MACLEN_SHIFT; vlan_macip_lens |= tx_flags & IGBVF_TX_FLAGS_VLAN_MASK; igbvf_tx_ctxtdesc(tx_ring, vlan_macip_lens, type_tucmd, mss_l4len_idx); return 1; } static bool igbvf_tx_csum(struct igbvf_ring *tx_ring, struct sk_buff *skb, u32 tx_flags, __be16 protocol) { u32 vlan_macip_lens = 0; u32 type_tucmd = 0; if (skb->ip_summed != CHECKSUM_PARTIAL) { csum_failed: if (!(tx_flags & IGBVF_TX_FLAGS_VLAN)) return false; goto no_csum; } switch (skb->csum_offset) { case offsetof(struct tcphdr, check): type_tucmd = E1000_ADVTXD_TUCMD_L4T_TCP; fallthrough; case offsetof(struct udphdr, check): break; case offsetof(struct sctphdr, checksum): /* validate that this is actually an SCTP request */ if (skb_csum_is_sctp(skb)) { type_tucmd = E1000_ADVTXD_TUCMD_L4T_SCTP; break; } fallthrough; default: skb_checksum_help(skb); goto csum_failed; } vlan_macip_lens = skb_checksum_start_offset(skb) - skb_network_offset(skb); no_csum: vlan_macip_lens |= skb_network_offset(skb) << E1000_ADVTXD_MACLEN_SHIFT; vlan_macip_lens |= tx_flags & IGBVF_TX_FLAGS_VLAN_MASK; igbvf_tx_ctxtdesc(tx_ring, vlan_macip_lens, type_tucmd, 0); return true; } static int igbvf_maybe_stop_tx(struct net_device *netdev, int size) { struct igbvf_adapter *adapter = netdev_priv(netdev); /* there is enough descriptors then we don't need to worry */ if (igbvf_desc_unused(adapter->tx_ring) >= size) return 0; netif_stop_queue(netdev); /* Herbert's original patch had: * smp_mb__after_netif_stop_queue(); * but since that doesn't exist yet, just open code it. */ smp_mb(); /* We need to check again just in case room has been made available */ if (igbvf_desc_unused(adapter->tx_ring) < size) return -EBUSY; netif_wake_queue(netdev); ++adapter->restart_queue; return 0; } #define IGBVF_MAX_TXD_PWR 16 #define IGBVF_MAX_DATA_PER_TXD (1u << IGBVF_MAX_TXD_PWR) static inline int igbvf_tx_map_adv(struct igbvf_adapter *adapter, struct igbvf_ring *tx_ring, struct sk_buff *skb) { struct igbvf_buffer *buffer_info; struct pci_dev *pdev = adapter->pdev; unsigned int len = skb_headlen(skb); unsigned int count = 0, i; unsigned int f; i = tx_ring->next_to_use; buffer_info = &tx_ring->buffer_info[i]; BUG_ON(len >= IGBVF_MAX_DATA_PER_TXD); buffer_info->length = len; /* set time_stamp *before* dma to help avoid a possible race */ buffer_info->time_stamp = jiffies; buffer_info->mapped_as_page = false; buffer_info->dma = dma_map_single(&pdev->dev, skb->data, len, DMA_TO_DEVICE); if (dma_mapping_error(&pdev->dev, buffer_info->dma)) goto dma_error; for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) { const skb_frag_t *frag; count++; i++; if (i == tx_ring->count) i = 0; frag = &skb_shinfo(skb)->frags[f]; len = skb_frag_size(frag); buffer_info = &tx_ring->buffer_info[i]; BUG_ON(len >= IGBVF_MAX_DATA_PER_TXD); buffer_info->length = len; buffer_info->time_stamp = jiffies; buffer_info->mapped_as_page = true; buffer_info->dma = skb_frag_dma_map(&pdev->dev, frag, 0, len, DMA_TO_DEVICE); if (dma_mapping_error(&pdev->dev, buffer_info->dma)) goto dma_error; } tx_ring->buffer_info[i].skb = skb; return ++count; dma_error: dev_err(&pdev->dev, "TX DMA map failed\n"); /* clear timestamp and dma mappings for failed buffer_info mapping */ buffer_info->dma = 0; buffer_info->time_stamp = 0; buffer_info->length = 0; buffer_info->mapped_as_page = false; if (count) count--; /* clear timestamp and dma mappings for remaining portion of packet */ while (count--) { if (i == 0) i += tx_ring->count; i--; buffer_info = &tx_ring->buffer_info[i]; igbvf_put_txbuf(adapter, buffer_info); } return 0; } static inline void igbvf_tx_queue_adv(struct igbvf_adapter *adapter, struct igbvf_ring *tx_ring, int tx_flags, int count, unsigned int first, u32 paylen, u8 hdr_len) { union e1000_adv_tx_desc *tx_desc = NULL; struct igbvf_buffer *buffer_info; u32 olinfo_status = 0, cmd_type_len; unsigned int i; cmd_type_len = (E1000_ADVTXD_DTYP_DATA | E1000_ADVTXD_DCMD_IFCS | E1000_ADVTXD_DCMD_DEXT); if (tx_flags & IGBVF_TX_FLAGS_VLAN) cmd_type_len |= E1000_ADVTXD_DCMD_VLE; if (tx_flags & IGBVF_TX_FLAGS_TSO) { cmd_type_len |= E1000_ADVTXD_DCMD_TSE; /* insert tcp checksum */ olinfo_status |= E1000_TXD_POPTS_TXSM << 8; /* insert ip checksum */ if (tx_flags & IGBVF_TX_FLAGS_IPV4) olinfo_status |= E1000_TXD_POPTS_IXSM << 8; } else if (tx_flags & IGBVF_TX_FLAGS_CSUM) { olinfo_status |= E1000_TXD_POPTS_TXSM << 8; } olinfo_status |= ((paylen - hdr_len) << E1000_ADVTXD_PAYLEN_SHIFT); i = tx_ring->next_to_use; while (count--) { buffer_info = &tx_ring->buffer_info[i]; tx_desc = IGBVF_TX_DESC_ADV(*tx_ring, i); tx_desc->read.buffer_addr = cpu_to_le64(buffer_info->dma); tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type_len | buffer_info->length); tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status); i++; if (i == tx_ring->count) i = 0; } tx_desc->read.cmd_type_len |= cpu_to_le32(adapter->txd_cmd); /* Force memory writes to complete before letting h/w * know there are new descriptors to fetch. (Only * applicable for weak-ordered memory model archs, * such as IA-64). */ wmb(); tx_ring->buffer_info[first].next_to_watch = tx_desc; tx_ring->next_to_use = i; writel(i, adapter->hw.hw_addr + tx_ring->tail); } static netdev_tx_t igbvf_xmit_frame_ring_adv(struct sk_buff *skb, struct net_device *netdev, struct igbvf_ring *tx_ring) { struct igbvf_adapter *adapter = netdev_priv(netdev); unsigned int first, tx_flags = 0; u8 hdr_len = 0; int count = 0; int tso = 0; __be16 protocol = vlan_get_protocol(skb); if (test_bit(__IGBVF_DOWN, &adapter->state)) { dev_kfree_skb_any(skb); return NETDEV_TX_OK; } if (skb->len <= 0) { dev_kfree_skb_any(skb); return NETDEV_TX_OK; } /* need: count + 4 desc gap to keep tail from touching * + 2 desc gap to keep tail from touching head, * + 1 desc for skb->data, * + 1 desc for context descriptor, * head, otherwise try next time */ if (igbvf_maybe_stop_tx(netdev, skb_shinfo(skb)->nr_frags + 4)) { /* this is a hard error */ return NETDEV_TX_BUSY; } if (skb_vlan_tag_present(skb)) { tx_flags |= IGBVF_TX_FLAGS_VLAN; tx_flags |= (skb_vlan_tag_get(skb) << IGBVF_TX_FLAGS_VLAN_SHIFT); } if (protocol == htons(ETH_P_IP)) tx_flags |= IGBVF_TX_FLAGS_IPV4; first = tx_ring->next_to_use; tso = igbvf_tso(tx_ring, skb, tx_flags, &hdr_len); if (unlikely(tso < 0)) { dev_kfree_skb_any(skb); return NETDEV_TX_OK; } if (tso) tx_flags |= IGBVF_TX_FLAGS_TSO; else if (igbvf_tx_csum(tx_ring, skb, tx_flags, protocol) && (skb->ip_summed == CHECKSUM_PARTIAL)) tx_flags |= IGBVF_TX_FLAGS_CSUM; /* count reflects descriptors mapped, if 0 then mapping error * has occurred and we need to rewind the descriptor queue */ count = igbvf_tx_map_adv(adapter, tx_ring, skb); if (count) { igbvf_tx_queue_adv(adapter, tx_ring, tx_flags, count, first, skb->len, hdr_len); /* Make sure there is space in the ring for the next send. */ igbvf_maybe_stop_tx(netdev, MAX_SKB_FRAGS + 4); } else { dev_kfree_skb_any(skb); tx_ring->buffer_info[first].time_stamp = 0; tx_ring->next_to_use = first; } return NETDEV_TX_OK; } static netdev_tx_t igbvf_xmit_frame(struct sk_buff *skb, struct net_device *netdev) { struct igbvf_adapter *adapter = netdev_priv(netdev); struct igbvf_ring *tx_ring; if (test_bit(__IGBVF_DOWN, &adapter->state)) { dev_kfree_skb_any(skb); return NETDEV_TX_OK; } tx_ring = &adapter->tx_ring[0]; return igbvf_xmit_frame_ring_adv(skb, netdev, tx_ring); } /** * igbvf_tx_timeout - Respond to a Tx Hang * @netdev: network interface device structure * @txqueue: queue timing out (unused) **/ static void igbvf_tx_timeout(struct net_device *netdev, unsigned int __always_unused txqueue) { struct igbvf_adapter *adapter = netdev_priv(netdev); /* Do the reset outside of interrupt context */ adapter->tx_timeout_count++; schedule_work(&adapter->reset_task); } static void igbvf_reset_task(struct work_struct *work) { struct igbvf_adapter *adapter; adapter = container_of(work, struct igbvf_adapter, reset_task); igbvf_reinit_locked(adapter); } /** * igbvf_change_mtu - Change the Maximum Transfer Unit * @netdev: network interface device structure * @new_mtu: new value for maximum frame size * * Returns 0 on success, negative on failure **/ static int igbvf_change_mtu(struct net_device *netdev, int new_mtu) { struct igbvf_adapter *adapter = netdev_priv(netdev); int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN; while (test_and_set_bit(__IGBVF_RESETTING, &adapter->state)) usleep_range(1000, 2000); /* igbvf_down has a dependency on max_frame_size */ adapter->max_frame_size = max_frame; if (netif_running(netdev)) igbvf_down(adapter); /* NOTE: netdev_alloc_skb reserves 16 bytes, and typically NET_IP_ALIGN * means we reserve 2 more, this pushes us to allocate from the next * larger slab size. * i.e. RXBUFFER_2048 --> size-4096 slab * However with the new *_jumbo_rx* routines, jumbo receives will use * fragmented skbs */ if (max_frame <= 1024) adapter->rx_buffer_len = 1024; else if (max_frame <= 2048) adapter->rx_buffer_len = 2048; else #if (PAGE_SIZE / 2) > 16384 adapter->rx_buffer_len = 16384; #else adapter->rx_buffer_len = PAGE_SIZE / 2; #endif /* adjust allocation if LPE protects us, and we aren't using SBP */ if ((max_frame == ETH_FRAME_LEN + ETH_FCS_LEN) || (max_frame == ETH_FRAME_LEN + VLAN_HLEN + ETH_FCS_LEN)) adapter->rx_buffer_len = ETH_FRAME_LEN + VLAN_HLEN + ETH_FCS_LEN; netdev_dbg(netdev, "changing MTU from %d to %d\n", netdev->mtu, new_mtu); netdev->mtu = new_mtu; if (netif_running(netdev)) igbvf_up(adapter); else igbvf_reset(adapter); clear_bit(__IGBVF_RESETTING, &adapter->state); return 0; } static int igbvf_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) { switch (cmd) { default: return -EOPNOTSUPP; } } static int igbvf_suspend(struct device *dev_d) { struct net_device *netdev = dev_get_drvdata(dev_d); struct igbvf_adapter *adapter = netdev_priv(netdev); netif_device_detach(netdev); if (netif_running(netdev)) { WARN_ON(test_bit(__IGBVF_RESETTING, &adapter->state)); igbvf_down(adapter); igbvf_free_irq(adapter); } return 0; } static int __maybe_unused igbvf_resume(struct device *dev_d) { struct pci_dev *pdev = to_pci_dev(dev_d); struct net_device *netdev = pci_get_drvdata(pdev); struct igbvf_adapter *adapter = netdev_priv(netdev); u32 err; pci_set_master(pdev); if (netif_running(netdev)) { err = igbvf_request_irq(adapter); if (err) return err; } igbvf_reset(adapter); if (netif_running(netdev)) igbvf_up(adapter); netif_device_attach(netdev); return 0; } static void igbvf_shutdown(struct pci_dev *pdev) { igbvf_suspend(&pdev->dev); } #ifdef CONFIG_NET_POLL_CONTROLLER /* Polling 'interrupt' - used by things like netconsole to send skbs * without having to re-enable interrupts. It's not called while * the interrupt routine is executing. */ static void igbvf_netpoll(struct net_device *netdev) { struct igbvf_adapter *adapter = netdev_priv(netdev); disable_irq(adapter->pdev->irq); igbvf_clean_tx_irq(adapter->tx_ring); enable_irq(adapter->pdev->irq); } #endif /** * igbvf_io_error_detected - called when PCI error is detected * @pdev: Pointer to PCI device * @state: The current pci connection state * * This function is called after a PCI bus error affecting * this device has been detected. */ static pci_ers_result_t igbvf_io_error_detected(struct pci_dev *pdev, pci_channel_state_t state) { struct net_device *netdev = pci_get_drvdata(pdev); struct igbvf_adapter *adapter = netdev_priv(netdev); netif_device_detach(netdev); if (state == pci_channel_io_perm_failure) return PCI_ERS_RESULT_DISCONNECT; if (netif_running(netdev)) igbvf_down(adapter); pci_disable_device(pdev); /* Request a slot reset. */ return PCI_ERS_RESULT_NEED_RESET; } /** * igbvf_io_slot_reset - called after the pci bus has been reset. * @pdev: Pointer to PCI device * * Restart the card from scratch, as if from a cold-boot. Implementation * resembles the first-half of the igbvf_resume routine. */ static pci_ers_result_t igbvf_io_slot_reset(struct pci_dev *pdev) { struct net_device *netdev = pci_get_drvdata(pdev); struct igbvf_adapter *adapter = netdev_priv(netdev); if (pci_enable_device_mem(pdev)) { dev_err(&pdev->dev, "Cannot re-enable PCI device after reset.\n"); return PCI_ERS_RESULT_DISCONNECT; } pci_set_master(pdev); igbvf_reset(adapter); return PCI_ERS_RESULT_RECOVERED; } /** * igbvf_io_resume - called when traffic can start flowing again. * @pdev: Pointer to PCI device * * This callback is called when the error recovery driver tells us that * its OK to resume normal operation. Implementation resembles the * second-half of the igbvf_resume routine. */ static void igbvf_io_resume(struct pci_dev *pdev) { struct net_device *netdev = pci_get_drvdata(pdev); struct igbvf_adapter *adapter = netdev_priv(netdev); if (netif_running(netdev)) { if (igbvf_up(adapter)) { dev_err(&pdev->dev, "can't bring device back up after reset\n"); return; } } netif_device_attach(netdev); } /** * igbvf_io_prepare - prepare device driver for PCI reset * @pdev: PCI device information struct */ static void igbvf_io_prepare(struct pci_dev *pdev) { struct net_device *netdev = pci_get_drvdata(pdev); struct igbvf_adapter *adapter = netdev_priv(netdev); while (test_and_set_bit(__IGBVF_RESETTING, &adapter->state)) usleep_range(1000, 2000); igbvf_down(adapter); } /** * igbvf_io_reset_done - PCI reset done, device driver reset can begin * @pdev: PCI device information struct */ static void igbvf_io_reset_done(struct pci_dev *pdev) { struct net_device *netdev = pci_get_drvdata(pdev); struct igbvf_adapter *adapter = netdev_priv(netdev); igbvf_up(adapter); clear_bit(__IGBVF_RESETTING, &adapter->state); } static void igbvf_print_device_info(struct igbvf_adapter *adapter) { struct e1000_hw *hw = &adapter->hw; struct net_device *netdev = adapter->netdev; struct pci_dev *pdev = adapter->pdev; if (hw->mac.type == e1000_vfadapt_i350) dev_info(&pdev->dev, "Intel(R) I350 Virtual Function\n"); else dev_info(&pdev->dev, "Intel(R) 82576 Virtual Function\n"); dev_info(&pdev->dev, "Address: %pM\n", netdev->dev_addr); } static int igbvf_set_features(struct net_device *netdev, netdev_features_t features) { struct igbvf_adapter *adapter = netdev_priv(netdev); if (features & NETIF_F_RXCSUM) adapter->flags &= ~IGBVF_FLAG_RX_CSUM_DISABLED; else adapter->flags |= IGBVF_FLAG_RX_CSUM_DISABLED; return 0; } #define IGBVF_MAX_MAC_HDR_LEN 127 #define IGBVF_MAX_NETWORK_HDR_LEN 511 static netdev_features_t igbvf_features_check(struct sk_buff *skb, struct net_device *dev, netdev_features_t features) { unsigned int network_hdr_len, mac_hdr_len; /* Make certain the headers can be described by a context descriptor */ mac_hdr_len = skb_network_header(skb) - skb->data; if (unlikely(mac_hdr_len > IGBVF_MAX_MAC_HDR_LEN)) return features & ~(NETIF_F_HW_CSUM | NETIF_F_SCTP_CRC | NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_TSO | NETIF_F_TSO6); network_hdr_len = skb_checksum_start(skb) - skb_network_header(skb); if (unlikely(network_hdr_len > IGBVF_MAX_NETWORK_HDR_LEN)) return features & ~(NETIF_F_HW_CSUM | NETIF_F_SCTP_CRC | NETIF_F_TSO | NETIF_F_TSO6); /* We can only support IPV4 TSO in tunnels if we can mangle the * inner IP ID field, so strip TSO if MANGLEID is not supported. */ if (skb->encapsulation && !(features & NETIF_F_TSO_MANGLEID)) features &= ~NETIF_F_TSO; return features; } static const struct net_device_ops igbvf_netdev_ops = { .ndo_open = igbvf_open, .ndo_stop = igbvf_close, .ndo_start_xmit = igbvf_xmit_frame, .ndo_set_rx_mode = igbvf_set_rx_mode, .ndo_set_mac_address = igbvf_set_mac, .ndo_change_mtu = igbvf_change_mtu, .ndo_eth_ioctl = igbvf_ioctl, .ndo_tx_timeout = igbvf_tx_timeout, .ndo_vlan_rx_add_vid = igbvf_vlan_rx_add_vid, .ndo_vlan_rx_kill_vid = igbvf_vlan_rx_kill_vid, #ifdef CONFIG_NET_POLL_CONTROLLER .ndo_poll_controller = igbvf_netpoll, #endif .ndo_set_features = igbvf_set_features, .ndo_features_check = igbvf_features_check, }; /** * igbvf_probe - Device Initialization Routine * @pdev: PCI device information struct * @ent: entry in igbvf_pci_tbl * * Returns 0 on success, negative on failure * * igbvf_probe initializes an adapter identified by a pci_dev structure. * The OS initialization, configuring of the adapter private structure, * and a hardware reset occur. **/ static int igbvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) { struct net_device *netdev; struct igbvf_adapter *adapter; struct e1000_hw *hw; const struct igbvf_info *ei = igbvf_info_tbl[ent->driver_data]; static int cards_found; int err; err = pci_enable_device_mem(pdev); if (err) return err; err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); if (err) { dev_err(&pdev->dev, "No usable DMA configuration, aborting\n"); goto err_dma; } err = pci_request_regions(pdev, igbvf_driver_name); if (err) goto err_pci_reg; pci_set_master(pdev); err = -ENOMEM; netdev = alloc_etherdev(sizeof(struct igbvf_adapter)); if (!netdev) goto err_alloc_etherdev; SET_NETDEV_DEV(netdev, &pdev->dev); pci_set_drvdata(pdev, netdev); adapter = netdev_priv(netdev); hw = &adapter->hw; adapter->netdev = netdev; adapter->pdev = pdev; adapter->ei = ei; adapter->pba = ei->pba; adapter->flags = ei->flags; adapter->hw.back = adapter; adapter->hw.mac.type = ei->mac; adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE); /* PCI config space info */ hw->vendor_id = pdev->vendor; hw->device_id = pdev->device; hw->subsystem_vendor_id = pdev->subsystem_vendor; hw->subsystem_device_id = pdev->subsystem_device; hw->revision_id = pdev->revision; err = -EIO; adapter->hw.hw_addr = ioremap(pci_resource_start(pdev, 0), pci_resource_len(pdev, 0)); if (!adapter->hw.hw_addr) goto err_ioremap; if (ei->get_variants) { err = ei->get_variants(adapter); if (err) goto err_get_variants; } /* setup adapter struct */ err = igbvf_sw_init(adapter); if (err) goto err_sw_init; /* construct the net_device struct */ netdev->netdev_ops = &igbvf_netdev_ops; igbvf_set_ethtool_ops(netdev); netdev->watchdog_timeo = 5 * HZ; strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1); adapter->bd_number = cards_found++; netdev->hw_features = NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_RXCSUM | NETIF_F_HW_CSUM | NETIF_F_SCTP_CRC; #define IGBVF_GSO_PARTIAL_FEATURES (NETIF_F_GSO_GRE | \ NETIF_F_GSO_GRE_CSUM | \ NETIF_F_GSO_IPXIP4 | \ NETIF_F_GSO_IPXIP6 | \ NETIF_F_GSO_UDP_TUNNEL | \ NETIF_F_GSO_UDP_TUNNEL_CSUM) netdev->gso_partial_features = IGBVF_GSO_PARTIAL_FEATURES; netdev->hw_features |= NETIF_F_GSO_PARTIAL | IGBVF_GSO_PARTIAL_FEATURES; netdev->features = netdev->hw_features | NETIF_F_HIGHDMA; netdev->vlan_features |= netdev->features | NETIF_F_TSO_MANGLEID; netdev->mpls_features |= NETIF_F_HW_CSUM; netdev->hw_enc_features |= netdev->vlan_features; /* set this bit last since it cannot be part of vlan_features */ netdev->features |= NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_TX; /* MTU range: 68 - 9216 */ netdev->min_mtu = ETH_MIN_MTU; netdev->max_mtu = MAX_STD_JUMBO_FRAME_SIZE; spin_lock_bh(&hw->mbx_lock); /*reset the controller to put the device in a known good state */ err = hw->mac.ops.reset_hw(hw); if (err) { dev_info(&pdev->dev, "PF still in reset state. Is the PF interface up?\n"); } else { err = hw->mac.ops.read_mac_addr(hw); if (err) dev_info(&pdev->dev, "Error reading MAC address.\n"); else if (is_zero_ether_addr(adapter->hw.mac.addr)) dev_info(&pdev->dev, "MAC address not assigned by administrator.\n"); eth_hw_addr_set(netdev, adapter->hw.mac.addr); } spin_unlock_bh(&hw->mbx_lock); if (!is_valid_ether_addr(netdev->dev_addr)) { dev_info(&pdev->dev, "Assigning random MAC address.\n"); eth_hw_addr_random(netdev); memcpy(adapter->hw.mac.addr, netdev->dev_addr, netdev->addr_len); } timer_setup(&adapter->watchdog_timer, igbvf_watchdog, 0); INIT_WORK(&adapter->reset_task, igbvf_reset_task); INIT_WORK(&adapter->watchdog_task, igbvf_watchdog_task); /* ring size defaults */ adapter->rx_ring->count = 1024; adapter->tx_ring->count = 1024; /* reset the hardware with the new settings */ igbvf_reset(adapter); /* set hardware-specific flags */ if (adapter->hw.mac.type == e1000_vfadapt_i350) adapter->flags |= IGBVF_FLAG_RX_LB_VLAN_BSWAP; strcpy(netdev->name, "eth%d"); err = register_netdev(netdev); if (err) goto err_hw_init; /* tell the stack to leave us alone until igbvf_open() is called */ netif_carrier_off(netdev); netif_stop_queue(netdev); igbvf_print_device_info(adapter); igbvf_initialize_last_counter_stats(adapter); return 0; err_hw_init: netif_napi_del(&adapter->rx_ring->napi); kfree(adapter->tx_ring); kfree(adapter->rx_ring); err_sw_init: igbvf_reset_interrupt_capability(adapter); err_get_variants: iounmap(adapter->hw.hw_addr); err_ioremap: free_netdev(netdev); err_alloc_etherdev: pci_release_regions(pdev); err_pci_reg: err_dma: pci_disable_device(pdev); return err; } /** * igbvf_remove - Device Removal Routine * @pdev: PCI device information struct * * igbvf_remove is called by the PCI subsystem to alert the driver * that it should release a PCI device. The could be caused by a * Hot-Plug event, or because the driver is going to be removed from * memory. **/ static void igbvf_remove(struct pci_dev *pdev) { struct net_device *netdev = pci_get_drvdata(pdev); struct igbvf_adapter *adapter = netdev_priv(netdev); struct e1000_hw *hw = &adapter->hw; /* The watchdog timer may be rescheduled, so explicitly * disable it from being rescheduled. */ set_bit(__IGBVF_DOWN, &adapter->state); del_timer_sync(&adapter->watchdog_timer); cancel_work_sync(&adapter->reset_task); cancel_work_sync(&adapter->watchdog_task); unregister_netdev(netdev); igbvf_reset_interrupt_capability(adapter); /* it is important to delete the NAPI struct prior to freeing the * Rx ring so that you do not end up with null pointer refs */ netif_napi_del(&adapter->rx_ring->napi); kfree(adapter->tx_ring); kfree(adapter->rx_ring); iounmap(hw->hw_addr); if (hw->flash_address) iounmap(hw->flash_address); pci_release_regions(pdev); free_netdev(netdev); pci_disable_device(pdev); } /* PCI Error Recovery (ERS) */ static const struct pci_error_handlers igbvf_err_handler = { .error_detected = igbvf_io_error_detected, .slot_reset = igbvf_io_slot_reset, .resume = igbvf_io_resume, .reset_prepare = igbvf_io_prepare, .reset_done = igbvf_io_reset_done, }; static const struct pci_device_id igbvf_pci_tbl[] = { { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_VF), board_vf }, { PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_VF), board_i350_vf }, { } /* terminate list */ }; MODULE_DEVICE_TABLE(pci, igbvf_pci_tbl); static SIMPLE_DEV_PM_OPS(igbvf_pm_ops, igbvf_suspend, igbvf_resume); /* PCI Device API Driver */ static struct pci_driver igbvf_driver = { .name = igbvf_driver_name, .id_table = igbvf_pci_tbl, .probe = igbvf_probe, .remove = igbvf_remove, .driver.pm = &igbvf_pm_ops, .shutdown = igbvf_shutdown, .err_handler = &igbvf_err_handler }; /** * igbvf_init_module - Driver Registration Routine * * igbvf_init_module is the first routine called when the driver is * loaded. All it does is register with the PCI subsystem. **/ static int __init igbvf_init_module(void) { int ret; pr_info("%s\n", igbvf_driver_string); pr_info("%s\n", igbvf_copyright); ret = pci_register_driver(&igbvf_driver); return ret; } module_init(igbvf_init_module); /** * igbvf_exit_module - Driver Exit Cleanup Routine * * igbvf_exit_module is called just before the driver is removed * from memory. **/ static void __exit igbvf_exit_module(void) { pci_unregister_driver(&igbvf_driver); } module_exit(igbvf_exit_module); MODULE_AUTHOR("Intel Corporation, <[email protected]>"); MODULE_DESCRIPTION("Intel(R) Gigabit Virtual Function Network Driver"); MODULE_LICENSE("GPL v2"); /* netdev.c */
linux-master
drivers/net/ethernet/intel/igbvf/netdev.c
// SPDX-License-Identifier: GPL-2.0 /* Copyright(c) 1999 - 2018 Intel Corporation. */ /****************************************************************************** Copyright (c)2006 - 2007 Myricom, Inc. for some LRO specific code ******************************************************************************/ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/types.h> #include <linux/bitops.h> #include <linux/module.h> #include <linux/pci.h> #include <linux/netdevice.h> #include <linux/vmalloc.h> #include <linux/string.h> #include <linux/in.h> #include <linux/ip.h> #include <linux/tcp.h> #include <linux/sctp.h> #include <linux/ipv6.h> #include <linux/slab.h> #include <net/checksum.h> #include <net/ip6_checksum.h> #include <linux/ethtool.h> #include <linux/if.h> #include <linux/if_vlan.h> #include <linux/prefetch.h> #include <net/mpls.h> #include <linux/bpf.h> #include <linux/bpf_trace.h> #include <linux/atomic.h> #include <net/xfrm.h> #include "ixgbevf.h" const char ixgbevf_driver_name[] = "ixgbevf"; static const char ixgbevf_driver_string[] = "Intel(R) 10 Gigabit PCI Express Virtual Function Network Driver"; static char ixgbevf_copyright[] = "Copyright (c) 2009 - 2018 Intel Corporation."; static const struct ixgbevf_info *ixgbevf_info_tbl[] = { [board_82599_vf] = &ixgbevf_82599_vf_info, [board_82599_vf_hv] = &ixgbevf_82599_vf_hv_info, [board_X540_vf] = &ixgbevf_X540_vf_info, [board_X540_vf_hv] = &ixgbevf_X540_vf_hv_info, [board_X550_vf] = &ixgbevf_X550_vf_info, [board_X550_vf_hv] = &ixgbevf_X550_vf_hv_info, [board_X550EM_x_vf] = &ixgbevf_X550EM_x_vf_info, [board_X550EM_x_vf_hv] = &ixgbevf_X550EM_x_vf_hv_info, [board_x550em_a_vf] = &ixgbevf_x550em_a_vf_info, }; /* ixgbevf_pci_tbl - PCI Device ID Table * * Wildcard entries (PCI_ANY_ID) should come last * Last entry must be all 0s * * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, * Class, Class Mask, private data (not used) } */ static const struct pci_device_id ixgbevf_pci_tbl[] = { {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_VF), board_82599_vf }, {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_VF_HV), board_82599_vf_hv }, {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X540_VF), board_X540_vf }, {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X540_VF_HV), board_X540_vf_hv }, {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550_VF), board_X550_vf }, {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550_VF_HV), board_X550_vf_hv }, {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_VF), board_X550EM_x_vf }, {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_VF_HV), board_X550EM_x_vf_hv}, {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_VF), board_x550em_a_vf }, /* required last entry */ {0, } }; MODULE_DEVICE_TABLE(pci, ixgbevf_pci_tbl); MODULE_AUTHOR("Intel Corporation, <[email protected]>"); MODULE_DESCRIPTION("Intel(R) 10 Gigabit Virtual Function Network Driver"); MODULE_LICENSE("GPL v2"); #define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV|NETIF_MSG_PROBE|NETIF_MSG_LINK) static int debug = -1; module_param(debug, int, 0); MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)"); static struct workqueue_struct *ixgbevf_wq; static void ixgbevf_service_event_schedule(struct ixgbevf_adapter *adapter) { if (!test_bit(__IXGBEVF_DOWN, &adapter->state) && !test_bit(__IXGBEVF_REMOVING, &adapter->state) && !test_and_set_bit(__IXGBEVF_SERVICE_SCHED, &adapter->state)) queue_work(ixgbevf_wq, &adapter->service_task); } static void ixgbevf_service_event_complete(struct ixgbevf_adapter *adapter) { BUG_ON(!test_bit(__IXGBEVF_SERVICE_SCHED, &adapter->state)); /* flush memory to make sure state is correct before next watchdog */ smp_mb__before_atomic(); clear_bit(__IXGBEVF_SERVICE_SCHED, &adapter->state); } /* forward decls */ static void ixgbevf_queue_reset_subtask(struct ixgbevf_adapter *adapter); static void ixgbevf_set_itr(struct ixgbevf_q_vector *q_vector); static void ixgbevf_free_all_rx_resources(struct ixgbevf_adapter *adapter); static bool ixgbevf_can_reuse_rx_page(struct ixgbevf_rx_buffer *rx_buffer); static void ixgbevf_reuse_rx_page(struct ixgbevf_ring *rx_ring, struct ixgbevf_rx_buffer *old_buff); static void ixgbevf_remove_adapter(struct ixgbe_hw *hw) { struct ixgbevf_adapter *adapter = hw->back; if (!hw->hw_addr) return; hw->hw_addr = NULL; dev_err(&adapter->pdev->dev, "Adapter removed\n"); if (test_bit(__IXGBEVF_SERVICE_INITED, &adapter->state)) ixgbevf_service_event_schedule(adapter); } static void ixgbevf_check_remove(struct ixgbe_hw *hw, u32 reg) { u32 value; /* The following check not only optimizes a bit by not * performing a read on the status register when the * register just read was a status register read that * returned IXGBE_FAILED_READ_REG. It also blocks any * potential recursion. */ if (reg == IXGBE_VFSTATUS) { ixgbevf_remove_adapter(hw); return; } value = ixgbevf_read_reg(hw, IXGBE_VFSTATUS); if (value == IXGBE_FAILED_READ_REG) ixgbevf_remove_adapter(hw); } u32 ixgbevf_read_reg(struct ixgbe_hw *hw, u32 reg) { u8 __iomem *reg_addr = READ_ONCE(hw->hw_addr); u32 value; if (IXGBE_REMOVED(reg_addr)) return IXGBE_FAILED_READ_REG; value = readl(reg_addr + reg); if (unlikely(value == IXGBE_FAILED_READ_REG)) ixgbevf_check_remove(hw, reg); return value; } /** * ixgbevf_set_ivar - set IVAR registers - maps interrupt causes to vectors * @adapter: pointer to adapter struct * @direction: 0 for Rx, 1 for Tx, -1 for other causes * @queue: queue to map the corresponding interrupt to * @msix_vector: the vector to map to the corresponding queue **/ static void ixgbevf_set_ivar(struct ixgbevf_adapter *adapter, s8 direction, u8 queue, u8 msix_vector) { u32 ivar, index; struct ixgbe_hw *hw = &adapter->hw; if (direction == -1) { /* other causes */ msix_vector |= IXGBE_IVAR_ALLOC_VAL; ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR_MISC); ivar &= ~0xFF; ivar |= msix_vector; IXGBE_WRITE_REG(hw, IXGBE_VTIVAR_MISC, ivar); } else { /* Tx or Rx causes */ msix_vector |= IXGBE_IVAR_ALLOC_VAL; index = ((16 * (queue & 1)) + (8 * direction)); ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR(queue >> 1)); ivar &= ~(0xFF << index); ivar |= (msix_vector << index); IXGBE_WRITE_REG(hw, IXGBE_VTIVAR(queue >> 1), ivar); } } static u64 ixgbevf_get_tx_completed(struct ixgbevf_ring *ring) { return ring->stats.packets; } static u32 ixgbevf_get_tx_pending(struct ixgbevf_ring *ring) { struct ixgbevf_adapter *adapter = netdev_priv(ring->netdev); struct ixgbe_hw *hw = &adapter->hw; u32 head = IXGBE_READ_REG(hw, IXGBE_VFTDH(ring->reg_idx)); u32 tail = IXGBE_READ_REG(hw, IXGBE_VFTDT(ring->reg_idx)); if (head != tail) return (head < tail) ? tail - head : (tail + ring->count - head); return 0; } static inline bool ixgbevf_check_tx_hang(struct ixgbevf_ring *tx_ring) { u32 tx_done = ixgbevf_get_tx_completed(tx_ring); u32 tx_done_old = tx_ring->tx_stats.tx_done_old; u32 tx_pending = ixgbevf_get_tx_pending(tx_ring); clear_check_for_tx_hang(tx_ring); /* Check for a hung queue, but be thorough. This verifies * that a transmit has been completed since the previous * check AND there is at least one packet pending. The * ARMED bit is set to indicate a potential hang. */ if ((tx_done_old == tx_done) && tx_pending) { /* make sure it is true for two checks in a row */ return test_and_set_bit(__IXGBEVF_HANG_CHECK_ARMED, &tx_ring->state); } /* reset the countdown */ clear_bit(__IXGBEVF_HANG_CHECK_ARMED, &tx_ring->state); /* update completed stats and continue */ tx_ring->tx_stats.tx_done_old = tx_done; return false; } static void ixgbevf_tx_timeout_reset(struct ixgbevf_adapter *adapter) { /* Do the reset outside of interrupt context */ if (!test_bit(__IXGBEVF_DOWN, &adapter->state)) { set_bit(__IXGBEVF_RESET_REQUESTED, &adapter->state); ixgbevf_service_event_schedule(adapter); } } /** * ixgbevf_tx_timeout - Respond to a Tx Hang * @netdev: network interface device structure * @txqueue: transmit queue hanging (unused) **/ static void ixgbevf_tx_timeout(struct net_device *netdev, unsigned int __always_unused txqueue) { struct ixgbevf_adapter *adapter = netdev_priv(netdev); ixgbevf_tx_timeout_reset(adapter); } /** * ixgbevf_clean_tx_irq - Reclaim resources after transmit completes * @q_vector: board private structure * @tx_ring: tx ring to clean * @napi_budget: Used to determine if we are in netpoll **/ static bool ixgbevf_clean_tx_irq(struct ixgbevf_q_vector *q_vector, struct ixgbevf_ring *tx_ring, int napi_budget) { struct ixgbevf_adapter *adapter = q_vector->adapter; struct ixgbevf_tx_buffer *tx_buffer; union ixgbe_adv_tx_desc *tx_desc; unsigned int total_bytes = 0, total_packets = 0, total_ipsec = 0; unsigned int budget = tx_ring->count / 2; unsigned int i = tx_ring->next_to_clean; if (test_bit(__IXGBEVF_DOWN, &adapter->state)) return true; tx_buffer = &tx_ring->tx_buffer_info[i]; tx_desc = IXGBEVF_TX_DESC(tx_ring, i); i -= tx_ring->count; do { union ixgbe_adv_tx_desc *eop_desc = tx_buffer->next_to_watch; /* if next_to_watch is not set then there is no work pending */ if (!eop_desc) break; /* prevent any other reads prior to eop_desc */ smp_rmb(); /* if DD is not set pending work has not been completed */ if (!(eop_desc->wb.status & cpu_to_le32(IXGBE_TXD_STAT_DD))) break; /* clear next_to_watch to prevent false hangs */ tx_buffer->next_to_watch = NULL; /* update the statistics for this packet */ total_bytes += tx_buffer->bytecount; total_packets += tx_buffer->gso_segs; if (tx_buffer->tx_flags & IXGBE_TX_FLAGS_IPSEC) total_ipsec++; /* free the skb */ if (ring_is_xdp(tx_ring)) page_frag_free(tx_buffer->data); else napi_consume_skb(tx_buffer->skb, napi_budget); /* unmap skb header data */ dma_unmap_single(tx_ring->dev, dma_unmap_addr(tx_buffer, dma), dma_unmap_len(tx_buffer, len), DMA_TO_DEVICE); /* clear tx_buffer data */ dma_unmap_len_set(tx_buffer, len, 0); /* unmap remaining buffers */ while (tx_desc != eop_desc) { tx_buffer++; tx_desc++; i++; if (unlikely(!i)) { i -= tx_ring->count; tx_buffer = tx_ring->tx_buffer_info; tx_desc = IXGBEVF_TX_DESC(tx_ring, 0); } /* unmap any remaining paged data */ if (dma_unmap_len(tx_buffer, len)) { dma_unmap_page(tx_ring->dev, dma_unmap_addr(tx_buffer, dma), dma_unmap_len(tx_buffer, len), DMA_TO_DEVICE); dma_unmap_len_set(tx_buffer, len, 0); } } /* move us one more past the eop_desc for start of next pkt */ tx_buffer++; tx_desc++; i++; if (unlikely(!i)) { i -= tx_ring->count; tx_buffer = tx_ring->tx_buffer_info; tx_desc = IXGBEVF_TX_DESC(tx_ring, 0); } /* issue prefetch for next Tx descriptor */ prefetch(tx_desc); /* update budget accounting */ budget--; } while (likely(budget)); i += tx_ring->count; tx_ring->next_to_clean = i; u64_stats_update_begin(&tx_ring->syncp); tx_ring->stats.bytes += total_bytes; tx_ring->stats.packets += total_packets; u64_stats_update_end(&tx_ring->syncp); q_vector->tx.total_bytes += total_bytes; q_vector->tx.total_packets += total_packets; adapter->tx_ipsec += total_ipsec; if (check_for_tx_hang(tx_ring) && ixgbevf_check_tx_hang(tx_ring)) { struct ixgbe_hw *hw = &adapter->hw; union ixgbe_adv_tx_desc *eop_desc; eop_desc = tx_ring->tx_buffer_info[i].next_to_watch; pr_err("Detected Tx Unit Hang%s\n" " Tx Queue <%d>\n" " TDH, TDT <%x>, <%x>\n" " next_to_use <%x>\n" " next_to_clean <%x>\n" "tx_buffer_info[next_to_clean]\n" " next_to_watch <%p>\n" " eop_desc->wb.status <%x>\n" " time_stamp <%lx>\n" " jiffies <%lx>\n", ring_is_xdp(tx_ring) ? " XDP" : "", tx_ring->queue_index, IXGBE_READ_REG(hw, IXGBE_VFTDH(tx_ring->reg_idx)), IXGBE_READ_REG(hw, IXGBE_VFTDT(tx_ring->reg_idx)), tx_ring->next_to_use, i, eop_desc, (eop_desc ? eop_desc->wb.status : 0), tx_ring->tx_buffer_info[i].time_stamp, jiffies); if (!ring_is_xdp(tx_ring)) netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index); /* schedule immediate reset if we believe we hung */ ixgbevf_tx_timeout_reset(adapter); return true; } if (ring_is_xdp(tx_ring)) return !!budget; #define TX_WAKE_THRESHOLD (DESC_NEEDED * 2) if (unlikely(total_packets && netif_carrier_ok(tx_ring->netdev) && (ixgbevf_desc_unused(tx_ring) >= TX_WAKE_THRESHOLD))) { /* Make sure that anybody stopping the queue after this * sees the new next_to_clean. */ smp_mb(); if (__netif_subqueue_stopped(tx_ring->netdev, tx_ring->queue_index) && !test_bit(__IXGBEVF_DOWN, &adapter->state)) { netif_wake_subqueue(tx_ring->netdev, tx_ring->queue_index); ++tx_ring->tx_stats.restart_queue; } } return !!budget; } /** * ixgbevf_rx_skb - Helper function to determine proper Rx method * @q_vector: structure containing interrupt and ring information * @skb: packet to send up **/ static void ixgbevf_rx_skb(struct ixgbevf_q_vector *q_vector, struct sk_buff *skb) { napi_gro_receive(&q_vector->napi, skb); } #define IXGBE_RSS_L4_TYPES_MASK \ ((1ul << IXGBE_RXDADV_RSSTYPE_IPV4_TCP) | \ (1ul << IXGBE_RXDADV_RSSTYPE_IPV4_UDP) | \ (1ul << IXGBE_RXDADV_RSSTYPE_IPV6_TCP) | \ (1ul << IXGBE_RXDADV_RSSTYPE_IPV6_UDP)) static inline void ixgbevf_rx_hash(struct ixgbevf_ring *ring, union ixgbe_adv_rx_desc *rx_desc, struct sk_buff *skb) { u16 rss_type; if (!(ring->netdev->features & NETIF_F_RXHASH)) return; rss_type = le16_to_cpu(rx_desc->wb.lower.lo_dword.hs_rss.pkt_info) & IXGBE_RXDADV_RSSTYPE_MASK; if (!rss_type) return; skb_set_hash(skb, le32_to_cpu(rx_desc->wb.lower.hi_dword.rss), (IXGBE_RSS_L4_TYPES_MASK & (1ul << rss_type)) ? PKT_HASH_TYPE_L4 : PKT_HASH_TYPE_L3); } /** * ixgbevf_rx_checksum - indicate in skb if hw indicated a good cksum * @ring: structure containig ring specific data * @rx_desc: current Rx descriptor being processed * @skb: skb currently being received and modified **/ static inline void ixgbevf_rx_checksum(struct ixgbevf_ring *ring, union ixgbe_adv_rx_desc *rx_desc, struct sk_buff *skb) { skb_checksum_none_assert(skb); /* Rx csum disabled */ if (!(ring->netdev->features & NETIF_F_RXCSUM)) return; /* if IP and error */ if (ixgbevf_test_staterr(rx_desc, IXGBE_RXD_STAT_IPCS) && ixgbevf_test_staterr(rx_desc, IXGBE_RXDADV_ERR_IPE)) { ring->rx_stats.csum_err++; return; } if (!ixgbevf_test_staterr(rx_desc, IXGBE_RXD_STAT_L4CS)) return; if (ixgbevf_test_staterr(rx_desc, IXGBE_RXDADV_ERR_TCPE)) { ring->rx_stats.csum_err++; return; } /* It must be a TCP or UDP packet with a valid checksum */ skb->ip_summed = CHECKSUM_UNNECESSARY; } /** * ixgbevf_process_skb_fields - Populate skb header fields from Rx descriptor * @rx_ring: rx descriptor ring packet is being transacted on * @rx_desc: pointer to the EOP Rx descriptor * @skb: pointer to current skb being populated * * This function checks the ring, descriptor, and packet information in * order to populate the checksum, VLAN, protocol, and other fields within * the skb. **/ static void ixgbevf_process_skb_fields(struct ixgbevf_ring *rx_ring, union ixgbe_adv_rx_desc *rx_desc, struct sk_buff *skb) { ixgbevf_rx_hash(rx_ring, rx_desc, skb); ixgbevf_rx_checksum(rx_ring, rx_desc, skb); if (ixgbevf_test_staterr(rx_desc, IXGBE_RXD_STAT_VP)) { u16 vid = le16_to_cpu(rx_desc->wb.upper.vlan); unsigned long *active_vlans = netdev_priv(rx_ring->netdev); if (test_bit(vid & VLAN_VID_MASK, active_vlans)) __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid); } if (ixgbevf_test_staterr(rx_desc, IXGBE_RXDADV_STAT_SECP)) ixgbevf_ipsec_rx(rx_ring, rx_desc, skb); skb->protocol = eth_type_trans(skb, rx_ring->netdev); } static struct ixgbevf_rx_buffer *ixgbevf_get_rx_buffer(struct ixgbevf_ring *rx_ring, const unsigned int size) { struct ixgbevf_rx_buffer *rx_buffer; rx_buffer = &rx_ring->rx_buffer_info[rx_ring->next_to_clean]; prefetchw(rx_buffer->page); /* we are reusing so sync this buffer for CPU use */ dma_sync_single_range_for_cpu(rx_ring->dev, rx_buffer->dma, rx_buffer->page_offset, size, DMA_FROM_DEVICE); rx_buffer->pagecnt_bias--; return rx_buffer; } static void ixgbevf_put_rx_buffer(struct ixgbevf_ring *rx_ring, struct ixgbevf_rx_buffer *rx_buffer, struct sk_buff *skb) { if (ixgbevf_can_reuse_rx_page(rx_buffer)) { /* hand second half of page back to the ring */ ixgbevf_reuse_rx_page(rx_ring, rx_buffer); } else { if (IS_ERR(skb)) /* We are not reusing the buffer so unmap it and free * any references we are holding to it */ dma_unmap_page_attrs(rx_ring->dev, rx_buffer->dma, ixgbevf_rx_pg_size(rx_ring), DMA_FROM_DEVICE, IXGBEVF_RX_DMA_ATTR); __page_frag_cache_drain(rx_buffer->page, rx_buffer->pagecnt_bias); } /* clear contents of rx_buffer */ rx_buffer->page = NULL; } /** * ixgbevf_is_non_eop - process handling of non-EOP buffers * @rx_ring: Rx ring being processed * @rx_desc: Rx descriptor for current buffer * * This function updates next to clean. If the buffer is an EOP buffer * this function exits returning false, otherwise it will place the * sk_buff in the next buffer to be chained and return true indicating * that this is in fact a non-EOP buffer. **/ static bool ixgbevf_is_non_eop(struct ixgbevf_ring *rx_ring, union ixgbe_adv_rx_desc *rx_desc) { u32 ntc = rx_ring->next_to_clean + 1; /* fetch, update, and store next to clean */ ntc = (ntc < rx_ring->count) ? ntc : 0; rx_ring->next_to_clean = ntc; prefetch(IXGBEVF_RX_DESC(rx_ring, ntc)); if (likely(ixgbevf_test_staterr(rx_desc, IXGBE_RXD_STAT_EOP))) return false; return true; } static inline unsigned int ixgbevf_rx_offset(struct ixgbevf_ring *rx_ring) { return ring_uses_build_skb(rx_ring) ? IXGBEVF_SKB_PAD : 0; } static bool ixgbevf_alloc_mapped_page(struct ixgbevf_ring *rx_ring, struct ixgbevf_rx_buffer *bi) { struct page *page = bi->page; dma_addr_t dma; /* since we are recycling buffers we should seldom need to alloc */ if (likely(page)) return true; /* alloc new page for storage */ page = dev_alloc_pages(ixgbevf_rx_pg_order(rx_ring)); if (unlikely(!page)) { rx_ring->rx_stats.alloc_rx_page_failed++; return false; } /* map page for use */ dma = dma_map_page_attrs(rx_ring->dev, page, 0, ixgbevf_rx_pg_size(rx_ring), DMA_FROM_DEVICE, IXGBEVF_RX_DMA_ATTR); /* if mapping failed free memory back to system since * there isn't much point in holding memory we can't use */ if (dma_mapping_error(rx_ring->dev, dma)) { __free_pages(page, ixgbevf_rx_pg_order(rx_ring)); rx_ring->rx_stats.alloc_rx_page_failed++; return false; } bi->dma = dma; bi->page = page; bi->page_offset = ixgbevf_rx_offset(rx_ring); bi->pagecnt_bias = 1; rx_ring->rx_stats.alloc_rx_page++; return true; } /** * ixgbevf_alloc_rx_buffers - Replace used receive buffers; packet split * @rx_ring: rx descriptor ring (for a specific queue) to setup buffers on * @cleaned_count: number of buffers to replace **/ static void ixgbevf_alloc_rx_buffers(struct ixgbevf_ring *rx_ring, u16 cleaned_count) { union ixgbe_adv_rx_desc *rx_desc; struct ixgbevf_rx_buffer *bi; unsigned int i = rx_ring->next_to_use; /* nothing to do or no valid netdev defined */ if (!cleaned_count || !rx_ring->netdev) return; rx_desc = IXGBEVF_RX_DESC(rx_ring, i); bi = &rx_ring->rx_buffer_info[i]; i -= rx_ring->count; do { if (!ixgbevf_alloc_mapped_page(rx_ring, bi)) break; /* sync the buffer for use by the device */ dma_sync_single_range_for_device(rx_ring->dev, bi->dma, bi->page_offset, ixgbevf_rx_bufsz(rx_ring), DMA_FROM_DEVICE); /* Refresh the desc even if pkt_addr didn't change * because each write-back erases this info. */ rx_desc->read.pkt_addr = cpu_to_le64(bi->dma + bi->page_offset); rx_desc++; bi++; i++; if (unlikely(!i)) { rx_desc = IXGBEVF_RX_DESC(rx_ring, 0); bi = rx_ring->rx_buffer_info; i -= rx_ring->count; } /* clear the length for the next_to_use descriptor */ rx_desc->wb.upper.length = 0; cleaned_count--; } while (cleaned_count); i += rx_ring->count; if (rx_ring->next_to_use != i) { /* record the next descriptor to use */ rx_ring->next_to_use = i; /* update next to alloc since we have filled the ring */ rx_ring->next_to_alloc = i; /* Force memory writes to complete before letting h/w * know there are new descriptors to fetch. (Only * applicable for weak-ordered memory model archs, * such as IA-64). */ wmb(); ixgbevf_write_tail(rx_ring, i); } } /** * ixgbevf_cleanup_headers - Correct corrupted or empty headers * @rx_ring: rx descriptor ring packet is being transacted on * @rx_desc: pointer to the EOP Rx descriptor * @skb: pointer to current skb being fixed * * Check for corrupted packet headers caused by senders on the local L2 * embedded NIC switch not setting up their Tx Descriptors right. These * should be very rare. * * Also address the case where we are pulling data in on pages only * and as such no data is present in the skb header. * * In addition if skb is not at least 60 bytes we need to pad it so that * it is large enough to qualify as a valid Ethernet frame. * * Returns true if an error was encountered and skb was freed. **/ static bool ixgbevf_cleanup_headers(struct ixgbevf_ring *rx_ring, union ixgbe_adv_rx_desc *rx_desc, struct sk_buff *skb) { /* XDP packets use error pointer so abort at this point */ if (IS_ERR(skb)) return true; /* verify that the packet does not have any known errors */ if (unlikely(ixgbevf_test_staterr(rx_desc, IXGBE_RXDADV_ERR_FRAME_ERR_MASK))) { struct net_device *netdev = rx_ring->netdev; if (!(netdev->features & NETIF_F_RXALL)) { dev_kfree_skb_any(skb); return true; } } /* if eth_skb_pad returns an error the skb was freed */ if (eth_skb_pad(skb)) return true; return false; } /** * ixgbevf_reuse_rx_page - page flip buffer and store it back on the ring * @rx_ring: rx descriptor ring to store buffers on * @old_buff: donor buffer to have page reused * * Synchronizes page for reuse by the adapter **/ static void ixgbevf_reuse_rx_page(struct ixgbevf_ring *rx_ring, struct ixgbevf_rx_buffer *old_buff) { struct ixgbevf_rx_buffer *new_buff; u16 nta = rx_ring->next_to_alloc; new_buff = &rx_ring->rx_buffer_info[nta]; /* update, and store next to alloc */ nta++; rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0; /* transfer page from old buffer to new buffer */ new_buff->page = old_buff->page; new_buff->dma = old_buff->dma; new_buff->page_offset = old_buff->page_offset; new_buff->pagecnt_bias = old_buff->pagecnt_bias; } static bool ixgbevf_can_reuse_rx_page(struct ixgbevf_rx_buffer *rx_buffer) { unsigned int pagecnt_bias = rx_buffer->pagecnt_bias; struct page *page = rx_buffer->page; /* avoid re-using remote and pfmemalloc pages */ if (!dev_page_is_reusable(page)) return false; #if (PAGE_SIZE < 8192) /* if we are only owner of page we can reuse it */ if (unlikely((page_ref_count(page) - pagecnt_bias) > 1)) return false; #else #define IXGBEVF_LAST_OFFSET \ (SKB_WITH_OVERHEAD(PAGE_SIZE) - IXGBEVF_RXBUFFER_2048) if (rx_buffer->page_offset > IXGBEVF_LAST_OFFSET) return false; #endif /* If we have drained the page fragment pool we need to update * the pagecnt_bias and page count so that we fully restock the * number of references the driver holds. */ if (unlikely(!pagecnt_bias)) { page_ref_add(page, USHRT_MAX); rx_buffer->pagecnt_bias = USHRT_MAX; } return true; } /** * ixgbevf_add_rx_frag - Add contents of Rx buffer to sk_buff * @rx_ring: rx descriptor ring to transact packets on * @rx_buffer: buffer containing page to add * @skb: sk_buff to place the data into * @size: size of buffer to be added * * This function will add the data contained in rx_buffer->page to the skb. **/ static void ixgbevf_add_rx_frag(struct ixgbevf_ring *rx_ring, struct ixgbevf_rx_buffer *rx_buffer, struct sk_buff *skb, unsigned int size) { #if (PAGE_SIZE < 8192) unsigned int truesize = ixgbevf_rx_pg_size(rx_ring) / 2; #else unsigned int truesize = ring_uses_build_skb(rx_ring) ? SKB_DATA_ALIGN(IXGBEVF_SKB_PAD + size) : SKB_DATA_ALIGN(size); #endif skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_buffer->page, rx_buffer->page_offset, size, truesize); #if (PAGE_SIZE < 8192) rx_buffer->page_offset ^= truesize; #else rx_buffer->page_offset += truesize; #endif } static struct sk_buff *ixgbevf_construct_skb(struct ixgbevf_ring *rx_ring, struct ixgbevf_rx_buffer *rx_buffer, struct xdp_buff *xdp, union ixgbe_adv_rx_desc *rx_desc) { unsigned int size = xdp->data_end - xdp->data; #if (PAGE_SIZE < 8192) unsigned int truesize = ixgbevf_rx_pg_size(rx_ring) / 2; #else unsigned int truesize = SKB_DATA_ALIGN(xdp->data_end - xdp->data_hard_start); #endif unsigned int headlen; struct sk_buff *skb; /* prefetch first cache line of first page */ net_prefetch(xdp->data); /* Note, we get here by enabling legacy-rx via: * * ethtool --set-priv-flags <dev> legacy-rx on * * In this mode, we currently get 0 extra XDP headroom as * opposed to having legacy-rx off, where we process XDP * packets going to stack via ixgbevf_build_skb(). * * For ixgbevf_construct_skb() mode it means that the * xdp->data_meta will always point to xdp->data, since * the helper cannot expand the head. Should this ever * changed in future for legacy-rx mode on, then lets also * add xdp->data_meta handling here. */ /* allocate a skb to store the frags */ skb = napi_alloc_skb(&rx_ring->q_vector->napi, IXGBEVF_RX_HDR_SIZE); if (unlikely(!skb)) return NULL; /* Determine available headroom for copy */ headlen = size; if (headlen > IXGBEVF_RX_HDR_SIZE) headlen = eth_get_headlen(skb->dev, xdp->data, IXGBEVF_RX_HDR_SIZE); /* align pull length to size of long to optimize memcpy performance */ memcpy(__skb_put(skb, headlen), xdp->data, ALIGN(headlen, sizeof(long))); /* update all of the pointers */ size -= headlen; if (size) { skb_add_rx_frag(skb, 0, rx_buffer->page, (xdp->data + headlen) - page_address(rx_buffer->page), size, truesize); #if (PAGE_SIZE < 8192) rx_buffer->page_offset ^= truesize; #else rx_buffer->page_offset += truesize; #endif } else { rx_buffer->pagecnt_bias++; } return skb; } static inline void ixgbevf_irq_enable_queues(struct ixgbevf_adapter *adapter, u32 qmask) { struct ixgbe_hw *hw = &adapter->hw; IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, qmask); } static struct sk_buff *ixgbevf_build_skb(struct ixgbevf_ring *rx_ring, struct ixgbevf_rx_buffer *rx_buffer, struct xdp_buff *xdp, union ixgbe_adv_rx_desc *rx_desc) { unsigned int metasize = xdp->data - xdp->data_meta; #if (PAGE_SIZE < 8192) unsigned int truesize = ixgbevf_rx_pg_size(rx_ring) / 2; #else unsigned int truesize = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) + SKB_DATA_ALIGN(xdp->data_end - xdp->data_hard_start); #endif struct sk_buff *skb; /* Prefetch first cache line of first page. If xdp->data_meta * is unused, this points to xdp->data, otherwise, we likely * have a consumer accessing first few bytes of meta data, * and then actual data. */ net_prefetch(xdp->data_meta); /* build an skb around the page buffer */ skb = napi_build_skb(xdp->data_hard_start, truesize); if (unlikely(!skb)) return NULL; /* update pointers within the skb to store the data */ skb_reserve(skb, xdp->data - xdp->data_hard_start); __skb_put(skb, xdp->data_end - xdp->data); if (metasize) skb_metadata_set(skb, metasize); /* update buffer offset */ #if (PAGE_SIZE < 8192) rx_buffer->page_offset ^= truesize; #else rx_buffer->page_offset += truesize; #endif return skb; } #define IXGBEVF_XDP_PASS 0 #define IXGBEVF_XDP_CONSUMED 1 #define IXGBEVF_XDP_TX 2 static int ixgbevf_xmit_xdp_ring(struct ixgbevf_ring *ring, struct xdp_buff *xdp) { struct ixgbevf_tx_buffer *tx_buffer; union ixgbe_adv_tx_desc *tx_desc; u32 len, cmd_type; dma_addr_t dma; u16 i; len = xdp->data_end - xdp->data; if (unlikely(!ixgbevf_desc_unused(ring))) return IXGBEVF_XDP_CONSUMED; dma = dma_map_single(ring->dev, xdp->data, len, DMA_TO_DEVICE); if (dma_mapping_error(ring->dev, dma)) return IXGBEVF_XDP_CONSUMED; /* record the location of the first descriptor for this packet */ i = ring->next_to_use; tx_buffer = &ring->tx_buffer_info[i]; dma_unmap_len_set(tx_buffer, len, len); dma_unmap_addr_set(tx_buffer, dma, dma); tx_buffer->data = xdp->data; tx_buffer->bytecount = len; tx_buffer->gso_segs = 1; tx_buffer->protocol = 0; /* Populate minimal context descriptor that will provide for the * fact that we are expected to process Ethernet frames. */ if (!test_bit(__IXGBEVF_TX_XDP_RING_PRIMED, &ring->state)) { struct ixgbe_adv_tx_context_desc *context_desc; set_bit(__IXGBEVF_TX_XDP_RING_PRIMED, &ring->state); context_desc = IXGBEVF_TX_CTXTDESC(ring, 0); context_desc->vlan_macip_lens = cpu_to_le32(ETH_HLEN << IXGBE_ADVTXD_MACLEN_SHIFT); context_desc->fceof_saidx = 0; context_desc->type_tucmd_mlhl = cpu_to_le32(IXGBE_TXD_CMD_DEXT | IXGBE_ADVTXD_DTYP_CTXT); context_desc->mss_l4len_idx = 0; i = 1; } /* put descriptor type bits */ cmd_type = IXGBE_ADVTXD_DTYP_DATA | IXGBE_ADVTXD_DCMD_DEXT | IXGBE_ADVTXD_DCMD_IFCS; cmd_type |= len | IXGBE_TXD_CMD; tx_desc = IXGBEVF_TX_DESC(ring, i); tx_desc->read.buffer_addr = cpu_to_le64(dma); tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type); tx_desc->read.olinfo_status = cpu_to_le32((len << IXGBE_ADVTXD_PAYLEN_SHIFT) | IXGBE_ADVTXD_CC); /* Avoid any potential race with cleanup */ smp_wmb(); /* set next_to_watch value indicating a packet is present */ i++; if (i == ring->count) i = 0; tx_buffer->next_to_watch = tx_desc; ring->next_to_use = i; return IXGBEVF_XDP_TX; } static struct sk_buff *ixgbevf_run_xdp(struct ixgbevf_adapter *adapter, struct ixgbevf_ring *rx_ring, struct xdp_buff *xdp) { int result = IXGBEVF_XDP_PASS; struct ixgbevf_ring *xdp_ring; struct bpf_prog *xdp_prog; u32 act; xdp_prog = READ_ONCE(rx_ring->xdp_prog); if (!xdp_prog) goto xdp_out; act = bpf_prog_run_xdp(xdp_prog, xdp); switch (act) { case XDP_PASS: break; case XDP_TX: xdp_ring = adapter->xdp_ring[rx_ring->queue_index]; result = ixgbevf_xmit_xdp_ring(xdp_ring, xdp); if (result == IXGBEVF_XDP_CONSUMED) goto out_failure; break; default: bpf_warn_invalid_xdp_action(rx_ring->netdev, xdp_prog, act); fallthrough; case XDP_ABORTED: out_failure: trace_xdp_exception(rx_ring->netdev, xdp_prog, act); fallthrough; /* handle aborts by dropping packet */ case XDP_DROP: result = IXGBEVF_XDP_CONSUMED; break; } xdp_out: return ERR_PTR(-result); } static unsigned int ixgbevf_rx_frame_truesize(struct ixgbevf_ring *rx_ring, unsigned int size) { unsigned int truesize; #if (PAGE_SIZE < 8192) truesize = ixgbevf_rx_pg_size(rx_ring) / 2; /* Must be power-of-2 */ #else truesize = ring_uses_build_skb(rx_ring) ? SKB_DATA_ALIGN(IXGBEVF_SKB_PAD + size) + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) : SKB_DATA_ALIGN(size); #endif return truesize; } static void ixgbevf_rx_buffer_flip(struct ixgbevf_ring *rx_ring, struct ixgbevf_rx_buffer *rx_buffer, unsigned int size) { unsigned int truesize = ixgbevf_rx_frame_truesize(rx_ring, size); #if (PAGE_SIZE < 8192) rx_buffer->page_offset ^= truesize; #else rx_buffer->page_offset += truesize; #endif } static int ixgbevf_clean_rx_irq(struct ixgbevf_q_vector *q_vector, struct ixgbevf_ring *rx_ring, int budget) { unsigned int total_rx_bytes = 0, total_rx_packets = 0, frame_sz = 0; struct ixgbevf_adapter *adapter = q_vector->adapter; u16 cleaned_count = ixgbevf_desc_unused(rx_ring); struct sk_buff *skb = rx_ring->skb; bool xdp_xmit = false; struct xdp_buff xdp; /* Frame size depend on rx_ring setup when PAGE_SIZE=4K */ #if (PAGE_SIZE < 8192) frame_sz = ixgbevf_rx_frame_truesize(rx_ring, 0); #endif xdp_init_buff(&xdp, frame_sz, &rx_ring->xdp_rxq); while (likely(total_rx_packets < budget)) { struct ixgbevf_rx_buffer *rx_buffer; union ixgbe_adv_rx_desc *rx_desc; unsigned int size; /* return some buffers to hardware, one at a time is too slow */ if (cleaned_count >= IXGBEVF_RX_BUFFER_WRITE) { ixgbevf_alloc_rx_buffers(rx_ring, cleaned_count); cleaned_count = 0; } rx_desc = IXGBEVF_RX_DESC(rx_ring, rx_ring->next_to_clean); size = le16_to_cpu(rx_desc->wb.upper.length); if (!size) break; /* This memory barrier is needed to keep us from reading * any other fields out of the rx_desc until we know the * RXD_STAT_DD bit is set */ rmb(); rx_buffer = ixgbevf_get_rx_buffer(rx_ring, size); /* retrieve a buffer from the ring */ if (!skb) { unsigned int offset = ixgbevf_rx_offset(rx_ring); unsigned char *hard_start; hard_start = page_address(rx_buffer->page) + rx_buffer->page_offset - offset; xdp_prepare_buff(&xdp, hard_start, offset, size, true); #if (PAGE_SIZE > 4096) /* At larger PAGE_SIZE, frame_sz depend on len size */ xdp.frame_sz = ixgbevf_rx_frame_truesize(rx_ring, size); #endif skb = ixgbevf_run_xdp(adapter, rx_ring, &xdp); } if (IS_ERR(skb)) { if (PTR_ERR(skb) == -IXGBEVF_XDP_TX) { xdp_xmit = true; ixgbevf_rx_buffer_flip(rx_ring, rx_buffer, size); } else { rx_buffer->pagecnt_bias++; } total_rx_packets++; total_rx_bytes += size; } else if (skb) { ixgbevf_add_rx_frag(rx_ring, rx_buffer, skb, size); } else if (ring_uses_build_skb(rx_ring)) { skb = ixgbevf_build_skb(rx_ring, rx_buffer, &xdp, rx_desc); } else { skb = ixgbevf_construct_skb(rx_ring, rx_buffer, &xdp, rx_desc); } /* exit if we failed to retrieve a buffer */ if (!skb) { rx_ring->rx_stats.alloc_rx_buff_failed++; rx_buffer->pagecnt_bias++; break; } ixgbevf_put_rx_buffer(rx_ring, rx_buffer, skb); cleaned_count++; /* fetch next buffer in frame if non-eop */ if (ixgbevf_is_non_eop(rx_ring, rx_desc)) continue; /* verify the packet layout is correct */ if (ixgbevf_cleanup_headers(rx_ring, rx_desc, skb)) { skb = NULL; continue; } /* probably a little skewed due to removing CRC */ total_rx_bytes += skb->len; /* Workaround hardware that can't do proper VEPA multicast * source pruning. */ if ((skb->pkt_type == PACKET_BROADCAST || skb->pkt_type == PACKET_MULTICAST) && ether_addr_equal(rx_ring->netdev->dev_addr, eth_hdr(skb)->h_source)) { dev_kfree_skb_irq(skb); continue; } /* populate checksum, VLAN, and protocol */ ixgbevf_process_skb_fields(rx_ring, rx_desc, skb); ixgbevf_rx_skb(q_vector, skb); /* reset skb pointer */ skb = NULL; /* update budget accounting */ total_rx_packets++; } /* place incomplete frames back on ring for completion */ rx_ring->skb = skb; if (xdp_xmit) { struct ixgbevf_ring *xdp_ring = adapter->xdp_ring[rx_ring->queue_index]; /* Force memory writes to complete before letting h/w * know there are new descriptors to fetch. */ wmb(); ixgbevf_write_tail(xdp_ring, xdp_ring->next_to_use); } u64_stats_update_begin(&rx_ring->syncp); rx_ring->stats.packets += total_rx_packets; rx_ring->stats.bytes += total_rx_bytes; u64_stats_update_end(&rx_ring->syncp); q_vector->rx.total_packets += total_rx_packets; q_vector->rx.total_bytes += total_rx_bytes; return total_rx_packets; } /** * ixgbevf_poll - NAPI polling calback * @napi: napi struct with our devices info in it * @budget: amount of work driver is allowed to do this pass, in packets * * This function will clean more than one or more rings associated with a * q_vector. **/ static int ixgbevf_poll(struct napi_struct *napi, int budget) { struct ixgbevf_q_vector *q_vector = container_of(napi, struct ixgbevf_q_vector, napi); struct ixgbevf_adapter *adapter = q_vector->adapter; struct ixgbevf_ring *ring; int per_ring_budget, work_done = 0; bool clean_complete = true; ixgbevf_for_each_ring(ring, q_vector->tx) { if (!ixgbevf_clean_tx_irq(q_vector, ring, budget)) clean_complete = false; } if (budget <= 0) return budget; /* attempt to distribute budget to each queue fairly, but don't allow * the budget to go below 1 because we'll exit polling */ if (q_vector->rx.count > 1) per_ring_budget = max(budget/q_vector->rx.count, 1); else per_ring_budget = budget; ixgbevf_for_each_ring(ring, q_vector->rx) { int cleaned = ixgbevf_clean_rx_irq(q_vector, ring, per_ring_budget); work_done += cleaned; if (cleaned >= per_ring_budget) clean_complete = false; } /* If all work not completed, return budget and keep polling */ if (!clean_complete) return budget; /* Exit the polling mode, but don't re-enable interrupts if stack might * poll us due to busy-polling */ if (likely(napi_complete_done(napi, work_done))) { if (adapter->rx_itr_setting == 1) ixgbevf_set_itr(q_vector); if (!test_bit(__IXGBEVF_DOWN, &adapter->state) && !test_bit(__IXGBEVF_REMOVING, &adapter->state)) ixgbevf_irq_enable_queues(adapter, BIT(q_vector->v_idx)); } return min(work_done, budget - 1); } /** * ixgbevf_write_eitr - write VTEITR register in hardware specific way * @q_vector: structure containing interrupt and ring information **/ void ixgbevf_write_eitr(struct ixgbevf_q_vector *q_vector) { struct ixgbevf_adapter *adapter = q_vector->adapter; struct ixgbe_hw *hw = &adapter->hw; int v_idx = q_vector->v_idx; u32 itr_reg = q_vector->itr & IXGBE_MAX_EITR; /* set the WDIS bit to not clear the timer bits and cause an * immediate assertion of the interrupt */ itr_reg |= IXGBE_EITR_CNT_WDIS; IXGBE_WRITE_REG(hw, IXGBE_VTEITR(v_idx), itr_reg); } /** * ixgbevf_configure_msix - Configure MSI-X hardware * @adapter: board private structure * * ixgbevf_configure_msix sets up the hardware to properly generate MSI-X * interrupts. **/ static void ixgbevf_configure_msix(struct ixgbevf_adapter *adapter) { struct ixgbevf_q_vector *q_vector; int q_vectors, v_idx; q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; adapter->eims_enable_mask = 0; /* Populate the IVAR table and set the ITR values to the * corresponding register. */ for (v_idx = 0; v_idx < q_vectors; v_idx++) { struct ixgbevf_ring *ring; q_vector = adapter->q_vector[v_idx]; ixgbevf_for_each_ring(ring, q_vector->rx) ixgbevf_set_ivar(adapter, 0, ring->reg_idx, v_idx); ixgbevf_for_each_ring(ring, q_vector->tx) ixgbevf_set_ivar(adapter, 1, ring->reg_idx, v_idx); if (q_vector->tx.ring && !q_vector->rx.ring) { /* Tx only vector */ if (adapter->tx_itr_setting == 1) q_vector->itr = IXGBE_12K_ITR; else q_vector->itr = adapter->tx_itr_setting; } else { /* Rx or Rx/Tx vector */ if (adapter->rx_itr_setting == 1) q_vector->itr = IXGBE_20K_ITR; else q_vector->itr = adapter->rx_itr_setting; } /* add q_vector eims value to global eims_enable_mask */ adapter->eims_enable_mask |= BIT(v_idx); ixgbevf_write_eitr(q_vector); } ixgbevf_set_ivar(adapter, -1, 1, v_idx); /* setup eims_other and add value to global eims_enable_mask */ adapter->eims_other = BIT(v_idx); adapter->eims_enable_mask |= adapter->eims_other; } enum latency_range { lowest_latency = 0, low_latency = 1, bulk_latency = 2, latency_invalid = 255 }; /** * ixgbevf_update_itr - update the dynamic ITR value based on statistics * @q_vector: structure containing interrupt and ring information * @ring_container: structure containing ring performance data * * Stores a new ITR value based on packets and byte * counts during the last interrupt. The advantage of per interrupt * computation is faster updates and more accurate ITR for the current * traffic pattern. Constants in this function were computed * based on theoretical maximum wire speed and thresholds were set based * on testing data as well as attempting to minimize response time * while increasing bulk throughput. **/ static void ixgbevf_update_itr(struct ixgbevf_q_vector *q_vector, struct ixgbevf_ring_container *ring_container) { int bytes = ring_container->total_bytes; int packets = ring_container->total_packets; u32 timepassed_us; u64 bytes_perint; u8 itr_setting = ring_container->itr; if (packets == 0) return; /* simple throttle rate management * 0-20MB/s lowest (100000 ints/s) * 20-100MB/s low (20000 ints/s) * 100-1249MB/s bulk (12000 ints/s) */ /* what was last interrupt timeslice? */ timepassed_us = q_vector->itr >> 2; if (timepassed_us == 0) return; bytes_perint = bytes / timepassed_us; /* bytes/usec */ switch (itr_setting) { case lowest_latency: if (bytes_perint > 10) itr_setting = low_latency; break; case low_latency: if (bytes_perint > 20) itr_setting = bulk_latency; else if (bytes_perint <= 10) itr_setting = lowest_latency; break; case bulk_latency: if (bytes_perint <= 20) itr_setting = low_latency; break; } /* clear work counters since we have the values we need */ ring_container->total_bytes = 0; ring_container->total_packets = 0; /* write updated itr to ring container */ ring_container->itr = itr_setting; } static void ixgbevf_set_itr(struct ixgbevf_q_vector *q_vector) { u32 new_itr = q_vector->itr; u8 current_itr; ixgbevf_update_itr(q_vector, &q_vector->tx); ixgbevf_update_itr(q_vector, &q_vector->rx); current_itr = max(q_vector->rx.itr, q_vector->tx.itr); switch (current_itr) { /* counts and packets in update_itr are dependent on these numbers */ case lowest_latency: new_itr = IXGBE_100K_ITR; break; case low_latency: new_itr = IXGBE_20K_ITR; break; case bulk_latency: new_itr = IXGBE_12K_ITR; break; default: break; } if (new_itr != q_vector->itr) { /* do an exponential smoothing */ new_itr = (10 * new_itr * q_vector->itr) / ((9 * new_itr) + q_vector->itr); /* save the algorithm value here */ q_vector->itr = new_itr; ixgbevf_write_eitr(q_vector); } } static irqreturn_t ixgbevf_msix_other(int irq, void *data) { struct ixgbevf_adapter *adapter = data; struct ixgbe_hw *hw = &adapter->hw; hw->mac.get_link_status = 1; ixgbevf_service_event_schedule(adapter); IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, adapter->eims_other); return IRQ_HANDLED; } /** * ixgbevf_msix_clean_rings - single unshared vector rx clean (all queues) * @irq: unused * @data: pointer to our q_vector struct for this interrupt vector **/ static irqreturn_t ixgbevf_msix_clean_rings(int irq, void *data) { struct ixgbevf_q_vector *q_vector = data; /* EIAM disabled interrupts (on this vector) for us */ if (q_vector->rx.ring || q_vector->tx.ring) napi_schedule_irqoff(&q_vector->napi); return IRQ_HANDLED; } /** * ixgbevf_request_msix_irqs - Initialize MSI-X interrupts * @adapter: board private structure * * ixgbevf_request_msix_irqs allocates MSI-X vectors and requests * interrupts from the kernel. **/ static int ixgbevf_request_msix_irqs(struct ixgbevf_adapter *adapter) { struct net_device *netdev = adapter->netdev; int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; unsigned int ri = 0, ti = 0; int vector, err; for (vector = 0; vector < q_vectors; vector++) { struct ixgbevf_q_vector *q_vector = adapter->q_vector[vector]; struct msix_entry *entry = &adapter->msix_entries[vector]; if (q_vector->tx.ring && q_vector->rx.ring) { snprintf(q_vector->name, sizeof(q_vector->name), "%s-TxRx-%u", netdev->name, ri++); ti++; } else if (q_vector->rx.ring) { snprintf(q_vector->name, sizeof(q_vector->name), "%s-rx-%u", netdev->name, ri++); } else if (q_vector->tx.ring) { snprintf(q_vector->name, sizeof(q_vector->name), "%s-tx-%u", netdev->name, ti++); } else { /* skip this unused q_vector */ continue; } err = request_irq(entry->vector, &ixgbevf_msix_clean_rings, 0, q_vector->name, q_vector); if (err) { hw_dbg(&adapter->hw, "request_irq failed for MSIX interrupt Error: %d\n", err); goto free_queue_irqs; } } err = request_irq(adapter->msix_entries[vector].vector, &ixgbevf_msix_other, 0, netdev->name, adapter); if (err) { hw_dbg(&adapter->hw, "request_irq for msix_other failed: %d\n", err); goto free_queue_irqs; } return 0; free_queue_irqs: while (vector) { vector--; free_irq(adapter->msix_entries[vector].vector, adapter->q_vector[vector]); } /* This failure is non-recoverable - it indicates the system is * out of MSIX vector resources and the VF driver cannot run * without them. Set the number of msix vectors to zero * indicating that not enough can be allocated. The error * will be returned to the user indicating device open failed. * Any further attempts to force the driver to open will also * fail. The only way to recover is to unload the driver and * reload it again. If the system has recovered some MSIX * vectors then it may succeed. */ adapter->num_msix_vectors = 0; return err; } /** * ixgbevf_request_irq - initialize interrupts * @adapter: board private structure * * Attempts to configure interrupts using the best available * capabilities of the hardware and kernel. **/ static int ixgbevf_request_irq(struct ixgbevf_adapter *adapter) { int err = ixgbevf_request_msix_irqs(adapter); if (err) hw_dbg(&adapter->hw, "request_irq failed, Error %d\n", err); return err; } static void ixgbevf_free_irq(struct ixgbevf_adapter *adapter) { int i, q_vectors; if (!adapter->msix_entries) return; q_vectors = adapter->num_msix_vectors; i = q_vectors - 1; free_irq(adapter->msix_entries[i].vector, adapter); i--; for (; i >= 0; i--) { /* free only the irqs that were actually requested */ if (!adapter->q_vector[i]->rx.ring && !adapter->q_vector[i]->tx.ring) continue; free_irq(adapter->msix_entries[i].vector, adapter->q_vector[i]); } } /** * ixgbevf_irq_disable - Mask off interrupt generation on the NIC * @adapter: board private structure **/ static inline void ixgbevf_irq_disable(struct ixgbevf_adapter *adapter) { struct ixgbe_hw *hw = &adapter->hw; int i; IXGBE_WRITE_REG(hw, IXGBE_VTEIAM, 0); IXGBE_WRITE_REG(hw, IXGBE_VTEIMC, ~0); IXGBE_WRITE_REG(hw, IXGBE_VTEIAC, 0); IXGBE_WRITE_FLUSH(hw); for (i = 0; i < adapter->num_msix_vectors; i++) synchronize_irq(adapter->msix_entries[i].vector); } /** * ixgbevf_irq_enable - Enable default interrupt generation settings * @adapter: board private structure **/ static inline void ixgbevf_irq_enable(struct ixgbevf_adapter *adapter) { struct ixgbe_hw *hw = &adapter->hw; IXGBE_WRITE_REG(hw, IXGBE_VTEIAM, adapter->eims_enable_mask); IXGBE_WRITE_REG(hw, IXGBE_VTEIAC, adapter->eims_enable_mask); IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, adapter->eims_enable_mask); } /** * ixgbevf_configure_tx_ring - Configure 82599 VF Tx ring after Reset * @adapter: board private structure * @ring: structure containing ring specific data * * Configure the Tx descriptor ring after a reset. **/ static void ixgbevf_configure_tx_ring(struct ixgbevf_adapter *adapter, struct ixgbevf_ring *ring) { struct ixgbe_hw *hw = &adapter->hw; u64 tdba = ring->dma; int wait_loop = 10; u32 txdctl = IXGBE_TXDCTL_ENABLE; u8 reg_idx = ring->reg_idx; /* disable queue to avoid issues while updating state */ IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(reg_idx), IXGBE_TXDCTL_SWFLSH); IXGBE_WRITE_FLUSH(hw); IXGBE_WRITE_REG(hw, IXGBE_VFTDBAL(reg_idx), tdba & DMA_BIT_MASK(32)); IXGBE_WRITE_REG(hw, IXGBE_VFTDBAH(reg_idx), tdba >> 32); IXGBE_WRITE_REG(hw, IXGBE_VFTDLEN(reg_idx), ring->count * sizeof(union ixgbe_adv_tx_desc)); /* disable head writeback */ IXGBE_WRITE_REG(hw, IXGBE_VFTDWBAH(reg_idx), 0); IXGBE_WRITE_REG(hw, IXGBE_VFTDWBAL(reg_idx), 0); /* enable relaxed ordering */ IXGBE_WRITE_REG(hw, IXGBE_VFDCA_TXCTRL(reg_idx), (IXGBE_DCA_TXCTRL_DESC_RRO_EN | IXGBE_DCA_TXCTRL_DATA_RRO_EN)); /* reset head and tail pointers */ IXGBE_WRITE_REG(hw, IXGBE_VFTDH(reg_idx), 0); IXGBE_WRITE_REG(hw, IXGBE_VFTDT(reg_idx), 0); ring->tail = adapter->io_addr + IXGBE_VFTDT(reg_idx); /* reset ntu and ntc to place SW in sync with hardwdare */ ring->next_to_clean = 0; ring->next_to_use = 0; /* In order to avoid issues WTHRESH + PTHRESH should always be equal * to or less than the number of on chip descriptors, which is * currently 40. */ txdctl |= (8 << 16); /* WTHRESH = 8 */ /* Setting PTHRESH to 32 both improves performance */ txdctl |= (1u << 8) | /* HTHRESH = 1 */ 32; /* PTHRESH = 32 */ /* reinitialize tx_buffer_info */ memset(ring->tx_buffer_info, 0, sizeof(struct ixgbevf_tx_buffer) * ring->count); clear_bit(__IXGBEVF_HANG_CHECK_ARMED, &ring->state); clear_bit(__IXGBEVF_TX_XDP_RING_PRIMED, &ring->state); IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(reg_idx), txdctl); /* poll to verify queue is enabled */ do { usleep_range(1000, 2000); txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(reg_idx)); } while (--wait_loop && !(txdctl & IXGBE_TXDCTL_ENABLE)); if (!wait_loop) hw_dbg(hw, "Could not enable Tx Queue %d\n", reg_idx); } /** * ixgbevf_configure_tx - Configure 82599 VF Transmit Unit after Reset * @adapter: board private structure * * Configure the Tx unit of the MAC after a reset. **/ static void ixgbevf_configure_tx(struct ixgbevf_adapter *adapter) { u32 i; /* Setup the HW Tx Head and Tail descriptor pointers */ for (i = 0; i < adapter->num_tx_queues; i++) ixgbevf_configure_tx_ring(adapter, adapter->tx_ring[i]); for (i = 0; i < adapter->num_xdp_queues; i++) ixgbevf_configure_tx_ring(adapter, adapter->xdp_ring[i]); } #define IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT 2 static void ixgbevf_configure_srrctl(struct ixgbevf_adapter *adapter, struct ixgbevf_ring *ring, int index) { struct ixgbe_hw *hw = &adapter->hw; u32 srrctl; srrctl = IXGBE_SRRCTL_DROP_EN; srrctl |= IXGBEVF_RX_HDR_SIZE << IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT; if (ring_uses_large_buffer(ring)) srrctl |= IXGBEVF_RXBUFFER_3072 >> IXGBE_SRRCTL_BSIZEPKT_SHIFT; else srrctl |= IXGBEVF_RXBUFFER_2048 >> IXGBE_SRRCTL_BSIZEPKT_SHIFT; srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF; IXGBE_WRITE_REG(hw, IXGBE_VFSRRCTL(index), srrctl); } static void ixgbevf_setup_psrtype(struct ixgbevf_adapter *adapter) { struct ixgbe_hw *hw = &adapter->hw; /* PSRTYPE must be initialized in 82599 */ u32 psrtype = IXGBE_PSRTYPE_TCPHDR | IXGBE_PSRTYPE_UDPHDR | IXGBE_PSRTYPE_IPV4HDR | IXGBE_PSRTYPE_IPV6HDR | IXGBE_PSRTYPE_L2HDR; if (adapter->num_rx_queues > 1) psrtype |= BIT(29); IXGBE_WRITE_REG(hw, IXGBE_VFPSRTYPE, psrtype); } #define IXGBEVF_MAX_RX_DESC_POLL 10 static void ixgbevf_disable_rx_queue(struct ixgbevf_adapter *adapter, struct ixgbevf_ring *ring) { struct ixgbe_hw *hw = &adapter->hw; int wait_loop = IXGBEVF_MAX_RX_DESC_POLL; u32 rxdctl; u8 reg_idx = ring->reg_idx; if (IXGBE_REMOVED(hw->hw_addr)) return; rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(reg_idx)); rxdctl &= ~IXGBE_RXDCTL_ENABLE; /* write value back with RXDCTL.ENABLE bit cleared */ IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(reg_idx), rxdctl); /* the hardware may take up to 100us to really disable the Rx queue */ do { udelay(10); rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(reg_idx)); } while (--wait_loop && (rxdctl & IXGBE_RXDCTL_ENABLE)); if (!wait_loop) pr_err("RXDCTL.ENABLE queue %d not cleared while polling\n", reg_idx); } static void ixgbevf_rx_desc_queue_enable(struct ixgbevf_adapter *adapter, struct ixgbevf_ring *ring) { struct ixgbe_hw *hw = &adapter->hw; int wait_loop = IXGBEVF_MAX_RX_DESC_POLL; u32 rxdctl; u8 reg_idx = ring->reg_idx; if (IXGBE_REMOVED(hw->hw_addr)) return; do { usleep_range(1000, 2000); rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(reg_idx)); } while (--wait_loop && !(rxdctl & IXGBE_RXDCTL_ENABLE)); if (!wait_loop) pr_err("RXDCTL.ENABLE queue %d not set while polling\n", reg_idx); } /** * ixgbevf_init_rss_key - Initialize adapter RSS key * @adapter: device handle * * Allocates and initializes the RSS key if it is not allocated. **/ static inline int ixgbevf_init_rss_key(struct ixgbevf_adapter *adapter) { u32 *rss_key; if (!adapter->rss_key) { rss_key = kzalloc(IXGBEVF_RSS_HASH_KEY_SIZE, GFP_KERNEL); if (unlikely(!rss_key)) return -ENOMEM; netdev_rss_key_fill(rss_key, IXGBEVF_RSS_HASH_KEY_SIZE); adapter->rss_key = rss_key; } return 0; } static void ixgbevf_setup_vfmrqc(struct ixgbevf_adapter *adapter) { struct ixgbe_hw *hw = &adapter->hw; u32 vfmrqc = 0, vfreta = 0; u16 rss_i = adapter->num_rx_queues; u8 i, j; /* Fill out hash function seeds */ for (i = 0; i < IXGBEVF_VFRSSRK_REGS; i++) IXGBE_WRITE_REG(hw, IXGBE_VFRSSRK(i), *(adapter->rss_key + i)); for (i = 0, j = 0; i < IXGBEVF_X550_VFRETA_SIZE; i++, j++) { if (j == rss_i) j = 0; adapter->rss_indir_tbl[i] = j; vfreta |= j << (i & 0x3) * 8; if ((i & 3) == 3) { IXGBE_WRITE_REG(hw, IXGBE_VFRETA(i >> 2), vfreta); vfreta = 0; } } /* Perform hash on these packet types */ vfmrqc |= IXGBE_VFMRQC_RSS_FIELD_IPV4 | IXGBE_VFMRQC_RSS_FIELD_IPV4_TCP | IXGBE_VFMRQC_RSS_FIELD_IPV6 | IXGBE_VFMRQC_RSS_FIELD_IPV6_TCP; vfmrqc |= IXGBE_VFMRQC_RSSEN; IXGBE_WRITE_REG(hw, IXGBE_VFMRQC, vfmrqc); } static void ixgbevf_configure_rx_ring(struct ixgbevf_adapter *adapter, struct ixgbevf_ring *ring) { struct ixgbe_hw *hw = &adapter->hw; union ixgbe_adv_rx_desc *rx_desc; u64 rdba = ring->dma; u32 rxdctl; u8 reg_idx = ring->reg_idx; /* disable queue to avoid issues while updating state */ rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(reg_idx)); ixgbevf_disable_rx_queue(adapter, ring); IXGBE_WRITE_REG(hw, IXGBE_VFRDBAL(reg_idx), rdba & DMA_BIT_MASK(32)); IXGBE_WRITE_REG(hw, IXGBE_VFRDBAH(reg_idx), rdba >> 32); IXGBE_WRITE_REG(hw, IXGBE_VFRDLEN(reg_idx), ring->count * sizeof(union ixgbe_adv_rx_desc)); #ifndef CONFIG_SPARC /* enable relaxed ordering */ IXGBE_WRITE_REG(hw, IXGBE_VFDCA_RXCTRL(reg_idx), IXGBE_DCA_RXCTRL_DESC_RRO_EN); #else IXGBE_WRITE_REG(hw, IXGBE_VFDCA_RXCTRL(reg_idx), IXGBE_DCA_RXCTRL_DESC_RRO_EN | IXGBE_DCA_RXCTRL_DATA_WRO_EN); #endif /* reset head and tail pointers */ IXGBE_WRITE_REG(hw, IXGBE_VFRDH(reg_idx), 0); IXGBE_WRITE_REG(hw, IXGBE_VFRDT(reg_idx), 0); ring->tail = adapter->io_addr + IXGBE_VFRDT(reg_idx); /* initialize rx_buffer_info */ memset(ring->rx_buffer_info, 0, sizeof(struct ixgbevf_rx_buffer) * ring->count); /* initialize Rx descriptor 0 */ rx_desc = IXGBEVF_RX_DESC(ring, 0); rx_desc->wb.upper.length = 0; /* reset ntu and ntc to place SW in sync with hardwdare */ ring->next_to_clean = 0; ring->next_to_use = 0; ring->next_to_alloc = 0; ixgbevf_configure_srrctl(adapter, ring, reg_idx); /* RXDCTL.RLPML does not work on 82599 */ if (adapter->hw.mac.type != ixgbe_mac_82599_vf) { rxdctl &= ~(IXGBE_RXDCTL_RLPMLMASK | IXGBE_RXDCTL_RLPML_EN); #if (PAGE_SIZE < 8192) /* Limit the maximum frame size so we don't overrun the skb */ if (ring_uses_build_skb(ring) && !ring_uses_large_buffer(ring)) rxdctl |= IXGBEVF_MAX_FRAME_BUILD_SKB | IXGBE_RXDCTL_RLPML_EN; #endif } rxdctl |= IXGBE_RXDCTL_ENABLE | IXGBE_RXDCTL_VME; IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(reg_idx), rxdctl); ixgbevf_rx_desc_queue_enable(adapter, ring); ixgbevf_alloc_rx_buffers(ring, ixgbevf_desc_unused(ring)); } static void ixgbevf_set_rx_buffer_len(struct ixgbevf_adapter *adapter, struct ixgbevf_ring *rx_ring) { struct net_device *netdev = adapter->netdev; unsigned int max_frame = netdev->mtu + ETH_HLEN + ETH_FCS_LEN; /* set build_skb and buffer size flags */ clear_ring_build_skb_enabled(rx_ring); clear_ring_uses_large_buffer(rx_ring); if (adapter->flags & IXGBEVF_FLAGS_LEGACY_RX) return; if (PAGE_SIZE < 8192) if (max_frame > IXGBEVF_MAX_FRAME_BUILD_SKB) set_ring_uses_large_buffer(rx_ring); /* 82599 can't rely on RXDCTL.RLPML to restrict the size of the frame */ if (adapter->hw.mac.type == ixgbe_mac_82599_vf && !ring_uses_large_buffer(rx_ring)) return; set_ring_build_skb_enabled(rx_ring); } /** * ixgbevf_configure_rx - Configure 82599 VF Receive Unit after Reset * @adapter: board private structure * * Configure the Rx unit of the MAC after a reset. **/ static void ixgbevf_configure_rx(struct ixgbevf_adapter *adapter) { struct ixgbe_hw *hw = &adapter->hw; struct net_device *netdev = adapter->netdev; int i, ret; ixgbevf_setup_psrtype(adapter); if (hw->mac.type >= ixgbe_mac_X550_vf) ixgbevf_setup_vfmrqc(adapter); spin_lock_bh(&adapter->mbx_lock); /* notify the PF of our intent to use this size of frame */ ret = hw->mac.ops.set_rlpml(hw, netdev->mtu + ETH_HLEN + ETH_FCS_LEN); spin_unlock_bh(&adapter->mbx_lock); if (ret) dev_err(&adapter->pdev->dev, "Failed to set MTU at %d\n", netdev->mtu); /* Setup the HW Rx Head and Tail Descriptor Pointers and * the Base and Length of the Rx Descriptor Ring */ for (i = 0; i < adapter->num_rx_queues; i++) { struct ixgbevf_ring *rx_ring = adapter->rx_ring[i]; ixgbevf_set_rx_buffer_len(adapter, rx_ring); ixgbevf_configure_rx_ring(adapter, rx_ring); } } static int ixgbevf_vlan_rx_add_vid(struct net_device *netdev, __be16 proto, u16 vid) { struct ixgbevf_adapter *adapter = netdev_priv(netdev); struct ixgbe_hw *hw = &adapter->hw; int err; spin_lock_bh(&adapter->mbx_lock); /* add VID to filter table */ err = hw->mac.ops.set_vfta(hw, vid, 0, true); spin_unlock_bh(&adapter->mbx_lock); if (err) { netdev_err(netdev, "VF could not set VLAN %d\n", vid); /* translate error return types so error makes sense */ if (err == IXGBE_ERR_MBX) return -EIO; if (err == IXGBE_ERR_INVALID_ARGUMENT) return -EACCES; } set_bit(vid, adapter->active_vlans); return err; } static int ixgbevf_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto, u16 vid) { struct ixgbevf_adapter *adapter = netdev_priv(netdev); struct ixgbe_hw *hw = &adapter->hw; int err; spin_lock_bh(&adapter->mbx_lock); /* remove VID from filter table */ err = hw->mac.ops.set_vfta(hw, vid, 0, false); spin_unlock_bh(&adapter->mbx_lock); if (err) netdev_err(netdev, "Could not remove VLAN %d\n", vid); clear_bit(vid, adapter->active_vlans); return err; } static void ixgbevf_restore_vlan(struct ixgbevf_adapter *adapter) { u16 vid; for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID) ixgbevf_vlan_rx_add_vid(adapter->netdev, htons(ETH_P_8021Q), vid); } static int ixgbevf_write_uc_addr_list(struct net_device *netdev) { struct ixgbevf_adapter *adapter = netdev_priv(netdev); struct ixgbe_hw *hw = &adapter->hw; int count = 0; if (!netdev_uc_empty(netdev)) { struct netdev_hw_addr *ha; netdev_for_each_uc_addr(ha, netdev) { hw->mac.ops.set_uc_addr(hw, ++count, ha->addr); udelay(200); } } else { /* If the list is empty then send message to PF driver to * clear all MAC VLANs on this VF. */ hw->mac.ops.set_uc_addr(hw, 0, NULL); } return count; } /** * ixgbevf_set_rx_mode - Multicast and unicast set * @netdev: network interface device structure * * The set_rx_method entry point is called whenever the multicast address * list, unicast address list or the network interface flags are updated. * This routine is responsible for configuring the hardware for proper * multicast mode and configuring requested unicast filters. **/ static void ixgbevf_set_rx_mode(struct net_device *netdev) { struct ixgbevf_adapter *adapter = netdev_priv(netdev); struct ixgbe_hw *hw = &adapter->hw; unsigned int flags = netdev->flags; int xcast_mode; /* request the most inclusive mode we need */ if (flags & IFF_PROMISC) xcast_mode = IXGBEVF_XCAST_MODE_PROMISC; else if (flags & IFF_ALLMULTI) xcast_mode = IXGBEVF_XCAST_MODE_ALLMULTI; else if (flags & (IFF_BROADCAST | IFF_MULTICAST)) xcast_mode = IXGBEVF_XCAST_MODE_MULTI; else xcast_mode = IXGBEVF_XCAST_MODE_NONE; spin_lock_bh(&adapter->mbx_lock); hw->mac.ops.update_xcast_mode(hw, xcast_mode); /* reprogram multicast list */ hw->mac.ops.update_mc_addr_list(hw, netdev); ixgbevf_write_uc_addr_list(netdev); spin_unlock_bh(&adapter->mbx_lock); } static void ixgbevf_napi_enable_all(struct ixgbevf_adapter *adapter) { int q_idx; struct ixgbevf_q_vector *q_vector; int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; for (q_idx = 0; q_idx < q_vectors; q_idx++) { q_vector = adapter->q_vector[q_idx]; napi_enable(&q_vector->napi); } } static void ixgbevf_napi_disable_all(struct ixgbevf_adapter *adapter) { int q_idx; struct ixgbevf_q_vector *q_vector; int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; for (q_idx = 0; q_idx < q_vectors; q_idx++) { q_vector = adapter->q_vector[q_idx]; napi_disable(&q_vector->napi); } } static int ixgbevf_configure_dcb(struct ixgbevf_adapter *adapter) { struct ixgbe_hw *hw = &adapter->hw; unsigned int def_q = 0; unsigned int num_tcs = 0; unsigned int num_rx_queues = adapter->num_rx_queues; unsigned int num_tx_queues = adapter->num_tx_queues; int err; spin_lock_bh(&adapter->mbx_lock); /* fetch queue configuration from the PF */ err = ixgbevf_get_queues(hw, &num_tcs, &def_q); spin_unlock_bh(&adapter->mbx_lock); if (err) return err; if (num_tcs > 1) { /* we need only one Tx queue */ num_tx_queues = 1; /* update default Tx ring register index */ adapter->tx_ring[0]->reg_idx = def_q; /* we need as many queues as traffic classes */ num_rx_queues = num_tcs; } /* if we have a bad config abort request queue reset */ if ((adapter->num_rx_queues != num_rx_queues) || (adapter->num_tx_queues != num_tx_queues)) { /* force mailbox timeout to prevent further messages */ hw->mbx.timeout = 0; /* wait for watchdog to come around and bail us out */ set_bit(__IXGBEVF_QUEUE_RESET_REQUESTED, &adapter->state); } return 0; } static void ixgbevf_configure(struct ixgbevf_adapter *adapter) { ixgbevf_configure_dcb(adapter); ixgbevf_set_rx_mode(adapter->netdev); ixgbevf_restore_vlan(adapter); ixgbevf_ipsec_restore(adapter); ixgbevf_configure_tx(adapter); ixgbevf_configure_rx(adapter); } static void ixgbevf_save_reset_stats(struct ixgbevf_adapter *adapter) { /* Only save pre-reset stats if there are some */ if (adapter->stats.vfgprc || adapter->stats.vfgptc) { adapter->stats.saved_reset_vfgprc += adapter->stats.vfgprc - adapter->stats.base_vfgprc; adapter->stats.saved_reset_vfgptc += adapter->stats.vfgptc - adapter->stats.base_vfgptc; adapter->stats.saved_reset_vfgorc += adapter->stats.vfgorc - adapter->stats.base_vfgorc; adapter->stats.saved_reset_vfgotc += adapter->stats.vfgotc - adapter->stats.base_vfgotc; adapter->stats.saved_reset_vfmprc += adapter->stats.vfmprc - adapter->stats.base_vfmprc; } } static void ixgbevf_init_last_counter_stats(struct ixgbevf_adapter *adapter) { struct ixgbe_hw *hw = &adapter->hw; adapter->stats.last_vfgprc = IXGBE_READ_REG(hw, IXGBE_VFGPRC); adapter->stats.last_vfgorc = IXGBE_READ_REG(hw, IXGBE_VFGORC_LSB); adapter->stats.last_vfgorc |= (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGORC_MSB))) << 32); adapter->stats.last_vfgptc = IXGBE_READ_REG(hw, IXGBE_VFGPTC); adapter->stats.last_vfgotc = IXGBE_READ_REG(hw, IXGBE_VFGOTC_LSB); adapter->stats.last_vfgotc |= (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGOTC_MSB))) << 32); adapter->stats.last_vfmprc = IXGBE_READ_REG(hw, IXGBE_VFMPRC); adapter->stats.base_vfgprc = adapter->stats.last_vfgprc; adapter->stats.base_vfgorc = adapter->stats.last_vfgorc; adapter->stats.base_vfgptc = adapter->stats.last_vfgptc; adapter->stats.base_vfgotc = adapter->stats.last_vfgotc; adapter->stats.base_vfmprc = adapter->stats.last_vfmprc; } static void ixgbevf_negotiate_api(struct ixgbevf_adapter *adapter) { struct ixgbe_hw *hw = &adapter->hw; static const int api[] = { ixgbe_mbox_api_15, ixgbe_mbox_api_14, ixgbe_mbox_api_13, ixgbe_mbox_api_12, ixgbe_mbox_api_11, ixgbe_mbox_api_10, ixgbe_mbox_api_unknown }; int err, idx = 0; spin_lock_bh(&adapter->mbx_lock); while (api[idx] != ixgbe_mbox_api_unknown) { err = hw->mac.ops.negotiate_api_version(hw, api[idx]); if (!err) break; idx++; } if (hw->api_version >= ixgbe_mbox_api_15) { hw->mbx.ops.init_params(hw); memcpy(&hw->mbx.ops, &ixgbevf_mbx_ops, sizeof(struct ixgbe_mbx_operations)); } spin_unlock_bh(&adapter->mbx_lock); } static void ixgbevf_up_complete(struct ixgbevf_adapter *adapter) { struct net_device *netdev = adapter->netdev; struct pci_dev *pdev = adapter->pdev; struct ixgbe_hw *hw = &adapter->hw; bool state; ixgbevf_configure_msix(adapter); spin_lock_bh(&adapter->mbx_lock); if (is_valid_ether_addr(hw->mac.addr)) hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0); else hw->mac.ops.set_rar(hw, 0, hw->mac.perm_addr, 0); spin_unlock_bh(&adapter->mbx_lock); state = adapter->link_state; hw->mac.ops.get_link_state(hw, &adapter->link_state); if (state && state != adapter->link_state) dev_info(&pdev->dev, "VF is administratively disabled\n"); smp_mb__before_atomic(); clear_bit(__IXGBEVF_DOWN, &adapter->state); ixgbevf_napi_enable_all(adapter); /* clear any pending interrupts, may auto mask */ IXGBE_READ_REG(hw, IXGBE_VTEICR); ixgbevf_irq_enable(adapter); /* enable transmits */ netif_tx_start_all_queues(netdev); ixgbevf_save_reset_stats(adapter); ixgbevf_init_last_counter_stats(adapter); hw->mac.get_link_status = 1; mod_timer(&adapter->service_timer, jiffies); } void ixgbevf_up(struct ixgbevf_adapter *adapter) { ixgbevf_configure(adapter); ixgbevf_up_complete(adapter); } /** * ixgbevf_clean_rx_ring - Free Rx Buffers per Queue * @rx_ring: ring to free buffers from **/ static void ixgbevf_clean_rx_ring(struct ixgbevf_ring *rx_ring) { u16 i = rx_ring->next_to_clean; /* Free Rx ring sk_buff */ if (rx_ring->skb) { dev_kfree_skb(rx_ring->skb); rx_ring->skb = NULL; } /* Free all the Rx ring pages */ while (i != rx_ring->next_to_alloc) { struct ixgbevf_rx_buffer *rx_buffer; rx_buffer = &rx_ring->rx_buffer_info[i]; /* Invalidate cache lines that may have been written to by * device so that we avoid corrupting memory. */ dma_sync_single_range_for_cpu(rx_ring->dev, rx_buffer->dma, rx_buffer->page_offset, ixgbevf_rx_bufsz(rx_ring), DMA_FROM_DEVICE); /* free resources associated with mapping */ dma_unmap_page_attrs(rx_ring->dev, rx_buffer->dma, ixgbevf_rx_pg_size(rx_ring), DMA_FROM_DEVICE, IXGBEVF_RX_DMA_ATTR); __page_frag_cache_drain(rx_buffer->page, rx_buffer->pagecnt_bias); i++; if (i == rx_ring->count) i = 0; } rx_ring->next_to_alloc = 0; rx_ring->next_to_clean = 0; rx_ring->next_to_use = 0; } /** * ixgbevf_clean_tx_ring - Free Tx Buffers * @tx_ring: ring to be cleaned **/ static void ixgbevf_clean_tx_ring(struct ixgbevf_ring *tx_ring) { u16 i = tx_ring->next_to_clean; struct ixgbevf_tx_buffer *tx_buffer = &tx_ring->tx_buffer_info[i]; while (i != tx_ring->next_to_use) { union ixgbe_adv_tx_desc *eop_desc, *tx_desc; /* Free all the Tx ring sk_buffs */ if (ring_is_xdp(tx_ring)) page_frag_free(tx_buffer->data); else dev_kfree_skb_any(tx_buffer->skb); /* unmap skb header data */ dma_unmap_single(tx_ring->dev, dma_unmap_addr(tx_buffer, dma), dma_unmap_len(tx_buffer, len), DMA_TO_DEVICE); /* check for eop_desc to determine the end of the packet */ eop_desc = tx_buffer->next_to_watch; tx_desc = IXGBEVF_TX_DESC(tx_ring, i); /* unmap remaining buffers */ while (tx_desc != eop_desc) { tx_buffer++; tx_desc++; i++; if (unlikely(i == tx_ring->count)) { i = 0; tx_buffer = tx_ring->tx_buffer_info; tx_desc = IXGBEVF_TX_DESC(tx_ring, 0); } /* unmap any remaining paged data */ if (dma_unmap_len(tx_buffer, len)) dma_unmap_page(tx_ring->dev, dma_unmap_addr(tx_buffer, dma), dma_unmap_len(tx_buffer, len), DMA_TO_DEVICE); } /* move us one more past the eop_desc for start of next pkt */ tx_buffer++; i++; if (unlikely(i == tx_ring->count)) { i = 0; tx_buffer = tx_ring->tx_buffer_info; } } /* reset next_to_use and next_to_clean */ tx_ring->next_to_use = 0; tx_ring->next_to_clean = 0; } /** * ixgbevf_clean_all_rx_rings - Free Rx Buffers for all queues * @adapter: board private structure **/ static void ixgbevf_clean_all_rx_rings(struct ixgbevf_adapter *adapter) { int i; for (i = 0; i < adapter->num_rx_queues; i++) ixgbevf_clean_rx_ring(adapter->rx_ring[i]); } /** * ixgbevf_clean_all_tx_rings - Free Tx Buffers for all queues * @adapter: board private structure **/ static void ixgbevf_clean_all_tx_rings(struct ixgbevf_adapter *adapter) { int i; for (i = 0; i < adapter->num_tx_queues; i++) ixgbevf_clean_tx_ring(adapter->tx_ring[i]); for (i = 0; i < adapter->num_xdp_queues; i++) ixgbevf_clean_tx_ring(adapter->xdp_ring[i]); } void ixgbevf_down(struct ixgbevf_adapter *adapter) { struct net_device *netdev = adapter->netdev; struct ixgbe_hw *hw = &adapter->hw; int i; /* signal that we are down to the interrupt handler */ if (test_and_set_bit(__IXGBEVF_DOWN, &adapter->state)) return; /* do nothing if already down */ /* disable all enabled Rx queues */ for (i = 0; i < adapter->num_rx_queues; i++) ixgbevf_disable_rx_queue(adapter, adapter->rx_ring[i]); usleep_range(10000, 20000); netif_tx_stop_all_queues(netdev); /* call carrier off first to avoid false dev_watchdog timeouts */ netif_carrier_off(netdev); netif_tx_disable(netdev); ixgbevf_irq_disable(adapter); ixgbevf_napi_disable_all(adapter); del_timer_sync(&adapter->service_timer); /* disable transmits in the hardware now that interrupts are off */ for (i = 0; i < adapter->num_tx_queues; i++) { u8 reg_idx = adapter->tx_ring[i]->reg_idx; IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(reg_idx), IXGBE_TXDCTL_SWFLSH); } for (i = 0; i < adapter->num_xdp_queues; i++) { u8 reg_idx = adapter->xdp_ring[i]->reg_idx; IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(reg_idx), IXGBE_TXDCTL_SWFLSH); } if (!pci_channel_offline(adapter->pdev)) ixgbevf_reset(adapter); ixgbevf_clean_all_tx_rings(adapter); ixgbevf_clean_all_rx_rings(adapter); } void ixgbevf_reinit_locked(struct ixgbevf_adapter *adapter) { while (test_and_set_bit(__IXGBEVF_RESETTING, &adapter->state)) msleep(1); ixgbevf_down(adapter); pci_set_master(adapter->pdev); ixgbevf_up(adapter); clear_bit(__IXGBEVF_RESETTING, &adapter->state); } void ixgbevf_reset(struct ixgbevf_adapter *adapter) { struct ixgbe_hw *hw = &adapter->hw; struct net_device *netdev = adapter->netdev; if (hw->mac.ops.reset_hw(hw)) { hw_dbg(hw, "PF still resetting\n"); } else { hw->mac.ops.init_hw(hw); ixgbevf_negotiate_api(adapter); } if (is_valid_ether_addr(adapter->hw.mac.addr)) { eth_hw_addr_set(netdev, adapter->hw.mac.addr); ether_addr_copy(netdev->perm_addr, adapter->hw.mac.addr); } adapter->last_reset = jiffies; } static int ixgbevf_acquire_msix_vectors(struct ixgbevf_adapter *adapter, int vectors) { int vector_threshold; /* We'll want at least 2 (vector_threshold): * 1) TxQ[0] + RxQ[0] handler * 2) Other (Link Status Change, etc.) */ vector_threshold = MIN_MSIX_COUNT; /* The more we get, the more we will assign to Tx/Rx Cleanup * for the separate queues...where Rx Cleanup >= Tx Cleanup. * Right now, we simply care about how many we'll get; we'll * set them up later while requesting irq's. */ vectors = pci_enable_msix_range(adapter->pdev, adapter->msix_entries, vector_threshold, vectors); if (vectors < 0) { dev_err(&adapter->pdev->dev, "Unable to allocate MSI-X interrupts\n"); kfree(adapter->msix_entries); adapter->msix_entries = NULL; return vectors; } /* Adjust for only the vectors we'll use, which is minimum * of max_msix_q_vectors + NON_Q_VECTORS, or the number of * vectors we were allocated. */ adapter->num_msix_vectors = vectors; return 0; } /** * ixgbevf_set_num_queues - Allocate queues for device, feature dependent * @adapter: board private structure to initialize * * This is the top level queue allocation routine. The order here is very * important, starting with the "most" number of features turned on at once, * and ending with the smallest set of features. This way large combinations * can be allocated if they're turned on, and smaller combinations are the * fall through conditions. * **/ static void ixgbevf_set_num_queues(struct ixgbevf_adapter *adapter) { struct ixgbe_hw *hw = &adapter->hw; unsigned int def_q = 0; unsigned int num_tcs = 0; int err; /* Start with base case */ adapter->num_rx_queues = 1; adapter->num_tx_queues = 1; adapter->num_xdp_queues = 0; spin_lock_bh(&adapter->mbx_lock); /* fetch queue configuration from the PF */ err = ixgbevf_get_queues(hw, &num_tcs, &def_q); spin_unlock_bh(&adapter->mbx_lock); if (err) return; /* we need as many queues as traffic classes */ if (num_tcs > 1) { adapter->num_rx_queues = num_tcs; } else { u16 rss = min_t(u16, num_online_cpus(), IXGBEVF_MAX_RSS_QUEUES); switch (hw->api_version) { case ixgbe_mbox_api_11: case ixgbe_mbox_api_12: case ixgbe_mbox_api_13: case ixgbe_mbox_api_14: case ixgbe_mbox_api_15: if (adapter->xdp_prog && hw->mac.max_tx_queues == rss) rss = rss > 3 ? 2 : 1; adapter->num_rx_queues = rss; adapter->num_tx_queues = rss; adapter->num_xdp_queues = adapter->xdp_prog ? rss : 0; break; default: break; } } } /** * ixgbevf_set_interrupt_capability - set MSI-X or FAIL if not supported * @adapter: board private structure to initialize * * Attempt to configure the interrupts using the best available * capabilities of the hardware and the kernel. **/ static int ixgbevf_set_interrupt_capability(struct ixgbevf_adapter *adapter) { int vector, v_budget; /* It's easy to be greedy for MSI-X vectors, but it really * doesn't do us much good if we have a lot more vectors * than CPU's. So let's be conservative and only ask for * (roughly) the same number of vectors as there are CPU's. * The default is to use pairs of vectors. */ v_budget = max(adapter->num_rx_queues, adapter->num_tx_queues); v_budget = min_t(int, v_budget, num_online_cpus()); v_budget += NON_Q_VECTORS; adapter->msix_entries = kcalloc(v_budget, sizeof(struct msix_entry), GFP_KERNEL); if (!adapter->msix_entries) return -ENOMEM; for (vector = 0; vector < v_budget; vector++) adapter->msix_entries[vector].entry = vector; /* A failure in MSI-X entry allocation isn't fatal, but the VF driver * does not support any other modes, so we will simply fail here. Note * that we clean up the msix_entries pointer else-where. */ return ixgbevf_acquire_msix_vectors(adapter, v_budget); } static void ixgbevf_add_ring(struct ixgbevf_ring *ring, struct ixgbevf_ring_container *head) { ring->next = head->ring; head->ring = ring; head->count++; } /** * ixgbevf_alloc_q_vector - Allocate memory for a single interrupt vector * @adapter: board private structure to initialize * @v_idx: index of vector in adapter struct * @txr_count: number of Tx rings for q vector * @txr_idx: index of first Tx ring to assign * @xdp_count: total number of XDP rings to allocate * @xdp_idx: index of first XDP ring to allocate * @rxr_count: number of Rx rings for q vector * @rxr_idx: index of first Rx ring to assign * * We allocate one q_vector. If allocation fails we return -ENOMEM. **/ static int ixgbevf_alloc_q_vector(struct ixgbevf_adapter *adapter, int v_idx, int txr_count, int txr_idx, int xdp_count, int xdp_idx, int rxr_count, int rxr_idx) { struct ixgbevf_q_vector *q_vector; int reg_idx = txr_idx + xdp_idx; struct ixgbevf_ring *ring; int ring_count, size; ring_count = txr_count + xdp_count + rxr_count; size = sizeof(*q_vector) + (sizeof(*ring) * ring_count); /* allocate q_vector and rings */ q_vector = kzalloc(size, GFP_KERNEL); if (!q_vector) return -ENOMEM; /* initialize NAPI */ netif_napi_add(adapter->netdev, &q_vector->napi, ixgbevf_poll); /* tie q_vector and adapter together */ adapter->q_vector[v_idx] = q_vector; q_vector->adapter = adapter; q_vector->v_idx = v_idx; /* initialize pointer to rings */ ring = q_vector->ring; while (txr_count) { /* assign generic ring traits */ ring->dev = &adapter->pdev->dev; ring->netdev = adapter->netdev; /* configure backlink on ring */ ring->q_vector = q_vector; /* update q_vector Tx values */ ixgbevf_add_ring(ring, &q_vector->tx); /* apply Tx specific ring traits */ ring->count = adapter->tx_ring_count; ring->queue_index = txr_idx; ring->reg_idx = reg_idx; /* assign ring to adapter */ adapter->tx_ring[txr_idx] = ring; /* update count and index */ txr_count--; txr_idx++; reg_idx++; /* push pointer to next ring */ ring++; } while (xdp_count) { /* assign generic ring traits */ ring->dev = &adapter->pdev->dev; ring->netdev = adapter->netdev; /* configure backlink on ring */ ring->q_vector = q_vector; /* update q_vector Tx values */ ixgbevf_add_ring(ring, &q_vector->tx); /* apply Tx specific ring traits */ ring->count = adapter->tx_ring_count; ring->queue_index = xdp_idx; ring->reg_idx = reg_idx; set_ring_xdp(ring); /* assign ring to adapter */ adapter->xdp_ring[xdp_idx] = ring; /* update count and index */ xdp_count--; xdp_idx++; reg_idx++; /* push pointer to next ring */ ring++; } while (rxr_count) { /* assign generic ring traits */ ring->dev = &adapter->pdev->dev; ring->netdev = adapter->netdev; /* configure backlink on ring */ ring->q_vector = q_vector; /* update q_vector Rx values */ ixgbevf_add_ring(ring, &q_vector->rx); /* apply Rx specific ring traits */ ring->count = adapter->rx_ring_count; ring->queue_index = rxr_idx; ring->reg_idx = rxr_idx; /* assign ring to adapter */ adapter->rx_ring[rxr_idx] = ring; /* update count and index */ rxr_count--; rxr_idx++; /* push pointer to next ring */ ring++; } return 0; } /** * ixgbevf_free_q_vector - Free memory allocated for specific interrupt vector * @adapter: board private structure to initialize * @v_idx: index of vector in adapter struct * * This function frees the memory allocated to the q_vector. In addition if * NAPI is enabled it will delete any references to the NAPI struct prior * to freeing the q_vector. **/ static void ixgbevf_free_q_vector(struct ixgbevf_adapter *adapter, int v_idx) { struct ixgbevf_q_vector *q_vector = adapter->q_vector[v_idx]; struct ixgbevf_ring *ring; ixgbevf_for_each_ring(ring, q_vector->tx) { if (ring_is_xdp(ring)) adapter->xdp_ring[ring->queue_index] = NULL; else adapter->tx_ring[ring->queue_index] = NULL; } ixgbevf_for_each_ring(ring, q_vector->rx) adapter->rx_ring[ring->queue_index] = NULL; adapter->q_vector[v_idx] = NULL; netif_napi_del(&q_vector->napi); /* ixgbevf_get_stats() might access the rings on this vector, * we must wait a grace period before freeing it. */ kfree_rcu(q_vector, rcu); } /** * ixgbevf_alloc_q_vectors - Allocate memory for interrupt vectors * @adapter: board private structure to initialize * * We allocate one q_vector per queue interrupt. If allocation fails we * return -ENOMEM. **/ static int ixgbevf_alloc_q_vectors(struct ixgbevf_adapter *adapter) { int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; int rxr_remaining = adapter->num_rx_queues; int txr_remaining = adapter->num_tx_queues; int xdp_remaining = adapter->num_xdp_queues; int rxr_idx = 0, txr_idx = 0, xdp_idx = 0, v_idx = 0; int err; if (q_vectors >= (rxr_remaining + txr_remaining + xdp_remaining)) { for (; rxr_remaining; v_idx++, q_vectors--) { int rqpv = DIV_ROUND_UP(rxr_remaining, q_vectors); err = ixgbevf_alloc_q_vector(adapter, v_idx, 0, 0, 0, 0, rqpv, rxr_idx); if (err) goto err_out; /* update counts and index */ rxr_remaining -= rqpv; rxr_idx += rqpv; } } for (; q_vectors; v_idx++, q_vectors--) { int rqpv = DIV_ROUND_UP(rxr_remaining, q_vectors); int tqpv = DIV_ROUND_UP(txr_remaining, q_vectors); int xqpv = DIV_ROUND_UP(xdp_remaining, q_vectors); err = ixgbevf_alloc_q_vector(adapter, v_idx, tqpv, txr_idx, xqpv, xdp_idx, rqpv, rxr_idx); if (err) goto err_out; /* update counts and index */ rxr_remaining -= rqpv; rxr_idx += rqpv; txr_remaining -= tqpv; txr_idx += tqpv; xdp_remaining -= xqpv; xdp_idx += xqpv; } return 0; err_out: while (v_idx) { v_idx--; ixgbevf_free_q_vector(adapter, v_idx); } return -ENOMEM; } /** * ixgbevf_free_q_vectors - Free memory allocated for interrupt vectors * @adapter: board private structure to initialize * * This function frees the memory allocated to the q_vectors. In addition if * NAPI is enabled it will delete any references to the NAPI struct prior * to freeing the q_vector. **/ static void ixgbevf_free_q_vectors(struct ixgbevf_adapter *adapter) { int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; while (q_vectors) { q_vectors--; ixgbevf_free_q_vector(adapter, q_vectors); } } /** * ixgbevf_reset_interrupt_capability - Reset MSIX setup * @adapter: board private structure * **/ static void ixgbevf_reset_interrupt_capability(struct ixgbevf_adapter *adapter) { if (!adapter->msix_entries) return; pci_disable_msix(adapter->pdev); kfree(adapter->msix_entries); adapter->msix_entries = NULL; } /** * ixgbevf_init_interrupt_scheme - Determine if MSIX is supported and init * @adapter: board private structure to initialize * **/ static int ixgbevf_init_interrupt_scheme(struct ixgbevf_adapter *adapter) { int err; /* Number of supported queues */ ixgbevf_set_num_queues(adapter); err = ixgbevf_set_interrupt_capability(adapter); if (err) { hw_dbg(&adapter->hw, "Unable to setup interrupt capabilities\n"); goto err_set_interrupt; } err = ixgbevf_alloc_q_vectors(adapter); if (err) { hw_dbg(&adapter->hw, "Unable to allocate memory for queue vectors\n"); goto err_alloc_q_vectors; } hw_dbg(&adapter->hw, "Multiqueue %s: Rx Queue count = %u, Tx Queue count = %u XDP Queue count %u\n", (adapter->num_rx_queues > 1) ? "Enabled" : "Disabled", adapter->num_rx_queues, adapter->num_tx_queues, adapter->num_xdp_queues); set_bit(__IXGBEVF_DOWN, &adapter->state); return 0; err_alloc_q_vectors: ixgbevf_reset_interrupt_capability(adapter); err_set_interrupt: return err; } /** * ixgbevf_clear_interrupt_scheme - Clear the current interrupt scheme settings * @adapter: board private structure to clear interrupt scheme on * * We go through and clear interrupt specific resources and reset the structure * to pre-load conditions **/ static void ixgbevf_clear_interrupt_scheme(struct ixgbevf_adapter *adapter) { adapter->num_tx_queues = 0; adapter->num_xdp_queues = 0; adapter->num_rx_queues = 0; ixgbevf_free_q_vectors(adapter); ixgbevf_reset_interrupt_capability(adapter); } /** * ixgbevf_sw_init - Initialize general software structures * @adapter: board private structure to initialize * * ixgbevf_sw_init initializes the Adapter private data structure. * Fields are initialized based on PCI device information and * OS network device settings (MTU size). **/ static int ixgbevf_sw_init(struct ixgbevf_adapter *adapter) { struct ixgbe_hw *hw = &adapter->hw; struct pci_dev *pdev = adapter->pdev; struct net_device *netdev = adapter->netdev; int err; /* PCI config space info */ hw->vendor_id = pdev->vendor; hw->device_id = pdev->device; hw->revision_id = pdev->revision; hw->subsystem_vendor_id = pdev->subsystem_vendor; hw->subsystem_device_id = pdev->subsystem_device; hw->mbx.ops.init_params(hw); if (hw->mac.type >= ixgbe_mac_X550_vf) { err = ixgbevf_init_rss_key(adapter); if (err) goto out; } /* assume legacy case in which PF would only give VF 2 queues */ hw->mac.max_tx_queues = 2; hw->mac.max_rx_queues = 2; /* lock to protect mailbox accesses */ spin_lock_init(&adapter->mbx_lock); err = hw->mac.ops.reset_hw(hw); if (err) { dev_info(&pdev->dev, "PF still in reset state. Is the PF interface up?\n"); } else { err = hw->mac.ops.init_hw(hw); if (err) { pr_err("init_shared_code failed: %d\n", err); goto out; } ixgbevf_negotiate_api(adapter); err = hw->mac.ops.get_mac_addr(hw, hw->mac.addr); if (err) dev_info(&pdev->dev, "Error reading MAC address\n"); else if (is_zero_ether_addr(adapter->hw.mac.addr)) dev_info(&pdev->dev, "MAC address not assigned by administrator.\n"); eth_hw_addr_set(netdev, hw->mac.addr); } if (!is_valid_ether_addr(netdev->dev_addr)) { dev_info(&pdev->dev, "Assigning random MAC address\n"); eth_hw_addr_random(netdev); ether_addr_copy(hw->mac.addr, netdev->dev_addr); ether_addr_copy(hw->mac.perm_addr, netdev->dev_addr); } /* Enable dynamic interrupt throttling rates */ adapter->rx_itr_setting = 1; adapter->tx_itr_setting = 1; /* set default ring sizes */ adapter->tx_ring_count = IXGBEVF_DEFAULT_TXD; adapter->rx_ring_count = IXGBEVF_DEFAULT_RXD; adapter->link_state = true; set_bit(__IXGBEVF_DOWN, &adapter->state); return 0; out: return err; } #define UPDATE_VF_COUNTER_32bit(reg, last_counter, counter) \ { \ u32 current_counter = IXGBE_READ_REG(hw, reg); \ if (current_counter < last_counter) \ counter += 0x100000000LL; \ last_counter = current_counter; \ counter &= 0xFFFFFFFF00000000LL; \ counter |= current_counter; \ } #define UPDATE_VF_COUNTER_36bit(reg_lsb, reg_msb, last_counter, counter) \ { \ u64 current_counter_lsb = IXGBE_READ_REG(hw, reg_lsb); \ u64 current_counter_msb = IXGBE_READ_REG(hw, reg_msb); \ u64 current_counter = (current_counter_msb << 32) | \ current_counter_lsb; \ if (current_counter < last_counter) \ counter += 0x1000000000LL; \ last_counter = current_counter; \ counter &= 0xFFFFFFF000000000LL; \ counter |= current_counter; \ } /** * ixgbevf_update_stats - Update the board statistics counters. * @adapter: board private structure **/ void ixgbevf_update_stats(struct ixgbevf_adapter *adapter) { struct ixgbe_hw *hw = &adapter->hw; u64 alloc_rx_page_failed = 0, alloc_rx_buff_failed = 0; u64 alloc_rx_page = 0, hw_csum_rx_error = 0; int i; if (test_bit(__IXGBEVF_DOWN, &adapter->state) || test_bit(__IXGBEVF_RESETTING, &adapter->state)) return; UPDATE_VF_COUNTER_32bit(IXGBE_VFGPRC, adapter->stats.last_vfgprc, adapter->stats.vfgprc); UPDATE_VF_COUNTER_32bit(IXGBE_VFGPTC, adapter->stats.last_vfgptc, adapter->stats.vfgptc); UPDATE_VF_COUNTER_36bit(IXGBE_VFGORC_LSB, IXGBE_VFGORC_MSB, adapter->stats.last_vfgorc, adapter->stats.vfgorc); UPDATE_VF_COUNTER_36bit(IXGBE_VFGOTC_LSB, IXGBE_VFGOTC_MSB, adapter->stats.last_vfgotc, adapter->stats.vfgotc); UPDATE_VF_COUNTER_32bit(IXGBE_VFMPRC, adapter->stats.last_vfmprc, adapter->stats.vfmprc); for (i = 0; i < adapter->num_rx_queues; i++) { struct ixgbevf_ring *rx_ring = adapter->rx_ring[i]; hw_csum_rx_error += rx_ring->rx_stats.csum_err; alloc_rx_page_failed += rx_ring->rx_stats.alloc_rx_page_failed; alloc_rx_buff_failed += rx_ring->rx_stats.alloc_rx_buff_failed; alloc_rx_page += rx_ring->rx_stats.alloc_rx_page; } adapter->hw_csum_rx_error = hw_csum_rx_error; adapter->alloc_rx_page_failed = alloc_rx_page_failed; adapter->alloc_rx_buff_failed = alloc_rx_buff_failed; adapter->alloc_rx_page = alloc_rx_page; } /** * ixgbevf_service_timer - Timer Call-back * @t: pointer to timer_list struct **/ static void ixgbevf_service_timer(struct timer_list *t) { struct ixgbevf_adapter *adapter = from_timer(adapter, t, service_timer); /* Reset the timer */ mod_timer(&adapter->service_timer, (HZ * 2) + jiffies); ixgbevf_service_event_schedule(adapter); } static void ixgbevf_reset_subtask(struct ixgbevf_adapter *adapter) { if (!test_and_clear_bit(__IXGBEVF_RESET_REQUESTED, &adapter->state)) return; rtnl_lock(); /* If we're already down or resetting, just bail */ if (test_bit(__IXGBEVF_DOWN, &adapter->state) || test_bit(__IXGBEVF_REMOVING, &adapter->state) || test_bit(__IXGBEVF_RESETTING, &adapter->state)) { rtnl_unlock(); return; } adapter->tx_timeout_count++; ixgbevf_reinit_locked(adapter); rtnl_unlock(); } /** * ixgbevf_check_hang_subtask - check for hung queues and dropped interrupts * @adapter: pointer to the device adapter structure * * This function serves two purposes. First it strobes the interrupt lines * in order to make certain interrupts are occurring. Secondly it sets the * bits needed to check for TX hangs. As a result we should immediately * determine if a hang has occurred. **/ static void ixgbevf_check_hang_subtask(struct ixgbevf_adapter *adapter) { struct ixgbe_hw *hw = &adapter->hw; u32 eics = 0; int i; /* If we're down or resetting, just bail */ if (test_bit(__IXGBEVF_DOWN, &adapter->state) || test_bit(__IXGBEVF_RESETTING, &adapter->state)) return; /* Force detection of hung controller */ if (netif_carrier_ok(adapter->netdev)) { for (i = 0; i < adapter->num_tx_queues; i++) set_check_for_tx_hang(adapter->tx_ring[i]); for (i = 0; i < adapter->num_xdp_queues; i++) set_check_for_tx_hang(adapter->xdp_ring[i]); } /* get one bit for every active Tx/Rx interrupt vector */ for (i = 0; i < adapter->num_msix_vectors - NON_Q_VECTORS; i++) { struct ixgbevf_q_vector *qv = adapter->q_vector[i]; if (qv->rx.ring || qv->tx.ring) eics |= BIT(i); } /* Cause software interrupt to ensure rings are cleaned */ IXGBE_WRITE_REG(hw, IXGBE_VTEICS, eics); } /** * ixgbevf_watchdog_update_link - update the link status * @adapter: pointer to the device adapter structure **/ static void ixgbevf_watchdog_update_link(struct ixgbevf_adapter *adapter) { struct ixgbe_hw *hw = &adapter->hw; u32 link_speed = adapter->link_speed; bool link_up = adapter->link_up; s32 err; spin_lock_bh(&adapter->mbx_lock); err = hw->mac.ops.check_link(hw, &link_speed, &link_up, false); spin_unlock_bh(&adapter->mbx_lock); /* if check for link returns error we will need to reset */ if (err && time_after(jiffies, adapter->last_reset + (10 * HZ))) { set_bit(__IXGBEVF_RESET_REQUESTED, &adapter->state); link_up = false; } adapter->link_up = link_up; adapter->link_speed = link_speed; } /** * ixgbevf_watchdog_link_is_up - update netif_carrier status and * print link up message * @adapter: pointer to the device adapter structure **/ static void ixgbevf_watchdog_link_is_up(struct ixgbevf_adapter *adapter) { struct net_device *netdev = adapter->netdev; /* only continue if link was previously down */ if (netif_carrier_ok(netdev)) return; dev_info(&adapter->pdev->dev, "NIC Link is Up %s\n", (adapter->link_speed == IXGBE_LINK_SPEED_10GB_FULL) ? "10 Gbps" : (adapter->link_speed == IXGBE_LINK_SPEED_1GB_FULL) ? "1 Gbps" : (adapter->link_speed == IXGBE_LINK_SPEED_100_FULL) ? "100 Mbps" : "unknown speed"); netif_carrier_on(netdev); } /** * ixgbevf_watchdog_link_is_down - update netif_carrier status and * print link down message * @adapter: pointer to the adapter structure **/ static void ixgbevf_watchdog_link_is_down(struct ixgbevf_adapter *adapter) { struct net_device *netdev = adapter->netdev; adapter->link_speed = 0; /* only continue if link was up previously */ if (!netif_carrier_ok(netdev)) return; dev_info(&adapter->pdev->dev, "NIC Link is Down\n"); netif_carrier_off(netdev); } /** * ixgbevf_watchdog_subtask - worker thread to bring link up * @adapter: board private structure **/ static void ixgbevf_watchdog_subtask(struct ixgbevf_adapter *adapter) { /* if interface is down do nothing */ if (test_bit(__IXGBEVF_DOWN, &adapter->state) || test_bit(__IXGBEVF_RESETTING, &adapter->state)) return; ixgbevf_watchdog_update_link(adapter); if (adapter->link_up && adapter->link_state) ixgbevf_watchdog_link_is_up(adapter); else ixgbevf_watchdog_link_is_down(adapter); ixgbevf_update_stats(adapter); } /** * ixgbevf_service_task - manages and runs subtasks * @work: pointer to work_struct containing our data **/ static void ixgbevf_service_task(struct work_struct *work) { struct ixgbevf_adapter *adapter = container_of(work, struct ixgbevf_adapter, service_task); struct ixgbe_hw *hw = &adapter->hw; if (IXGBE_REMOVED(hw->hw_addr)) { if (!test_bit(__IXGBEVF_DOWN, &adapter->state)) { rtnl_lock(); ixgbevf_down(adapter); rtnl_unlock(); } return; } ixgbevf_queue_reset_subtask(adapter); ixgbevf_reset_subtask(adapter); ixgbevf_watchdog_subtask(adapter); ixgbevf_check_hang_subtask(adapter); ixgbevf_service_event_complete(adapter); } /** * ixgbevf_free_tx_resources - Free Tx Resources per Queue * @tx_ring: Tx descriptor ring for a specific queue * * Free all transmit software resources **/ void ixgbevf_free_tx_resources(struct ixgbevf_ring *tx_ring) { ixgbevf_clean_tx_ring(tx_ring); vfree(tx_ring->tx_buffer_info); tx_ring->tx_buffer_info = NULL; /* if not set, then don't free */ if (!tx_ring->desc) return; dma_free_coherent(tx_ring->dev, tx_ring->size, tx_ring->desc, tx_ring->dma); tx_ring->desc = NULL; } /** * ixgbevf_free_all_tx_resources - Free Tx Resources for All Queues * @adapter: board private structure * * Free all transmit software resources **/ static void ixgbevf_free_all_tx_resources(struct ixgbevf_adapter *adapter) { int i; for (i = 0; i < adapter->num_tx_queues; i++) if (adapter->tx_ring[i]->desc) ixgbevf_free_tx_resources(adapter->tx_ring[i]); for (i = 0; i < adapter->num_xdp_queues; i++) if (adapter->xdp_ring[i]->desc) ixgbevf_free_tx_resources(adapter->xdp_ring[i]); } /** * ixgbevf_setup_tx_resources - allocate Tx resources (Descriptors) * @tx_ring: Tx descriptor ring (for a specific queue) to setup * * Return 0 on success, negative on failure **/ int ixgbevf_setup_tx_resources(struct ixgbevf_ring *tx_ring) { struct ixgbevf_adapter *adapter = netdev_priv(tx_ring->netdev); int size; size = sizeof(struct ixgbevf_tx_buffer) * tx_ring->count; tx_ring->tx_buffer_info = vmalloc(size); if (!tx_ring->tx_buffer_info) goto err; u64_stats_init(&tx_ring->syncp); /* round up to nearest 4K */ tx_ring->size = tx_ring->count * sizeof(union ixgbe_adv_tx_desc); tx_ring->size = ALIGN(tx_ring->size, 4096); tx_ring->desc = dma_alloc_coherent(tx_ring->dev, tx_ring->size, &tx_ring->dma, GFP_KERNEL); if (!tx_ring->desc) goto err; return 0; err: vfree(tx_ring->tx_buffer_info); tx_ring->tx_buffer_info = NULL; hw_dbg(&adapter->hw, "Unable to allocate memory for the transmit descriptor ring\n"); return -ENOMEM; } /** * ixgbevf_setup_all_tx_resources - allocate all queues Tx resources * @adapter: board private structure * * If this function returns with an error, then it's possible one or * more of the rings is populated (while the rest are not). It is the * callers duty to clean those orphaned rings. * * Return 0 on success, negative on failure **/ static int ixgbevf_setup_all_tx_resources(struct ixgbevf_adapter *adapter) { int i, j = 0, err = 0; for (i = 0; i < adapter->num_tx_queues; i++) { err = ixgbevf_setup_tx_resources(adapter->tx_ring[i]); if (!err) continue; hw_dbg(&adapter->hw, "Allocation for Tx Queue %u failed\n", i); goto err_setup_tx; } for (j = 0; j < adapter->num_xdp_queues; j++) { err = ixgbevf_setup_tx_resources(adapter->xdp_ring[j]); if (!err) continue; hw_dbg(&adapter->hw, "Allocation for XDP Queue %u failed\n", j); goto err_setup_tx; } return 0; err_setup_tx: /* rewind the index freeing the rings as we go */ while (j--) ixgbevf_free_tx_resources(adapter->xdp_ring[j]); while (i--) ixgbevf_free_tx_resources(adapter->tx_ring[i]); return err; } /** * ixgbevf_setup_rx_resources - allocate Rx resources (Descriptors) * @adapter: board private structure * @rx_ring: Rx descriptor ring (for a specific queue) to setup * * Returns 0 on success, negative on failure **/ int ixgbevf_setup_rx_resources(struct ixgbevf_adapter *adapter, struct ixgbevf_ring *rx_ring) { int size; size = sizeof(struct ixgbevf_rx_buffer) * rx_ring->count; rx_ring->rx_buffer_info = vmalloc(size); if (!rx_ring->rx_buffer_info) goto err; u64_stats_init(&rx_ring->syncp); /* Round up to nearest 4K */ rx_ring->size = rx_ring->count * sizeof(union ixgbe_adv_rx_desc); rx_ring->size = ALIGN(rx_ring->size, 4096); rx_ring->desc = dma_alloc_coherent(rx_ring->dev, rx_ring->size, &rx_ring->dma, GFP_KERNEL); if (!rx_ring->desc) goto err; /* XDP RX-queue info */ if (xdp_rxq_info_reg(&rx_ring->xdp_rxq, adapter->netdev, rx_ring->queue_index, 0) < 0) goto err; rx_ring->xdp_prog = adapter->xdp_prog; return 0; err: vfree(rx_ring->rx_buffer_info); rx_ring->rx_buffer_info = NULL; dev_err(rx_ring->dev, "Unable to allocate memory for the Rx descriptor ring\n"); return -ENOMEM; } /** * ixgbevf_setup_all_rx_resources - allocate all queues Rx resources * @adapter: board private structure * * If this function returns with an error, then it's possible one or * more of the rings is populated (while the rest are not). It is the * callers duty to clean those orphaned rings. * * Return 0 on success, negative on failure **/ static int ixgbevf_setup_all_rx_resources(struct ixgbevf_adapter *adapter) { int i, err = 0; for (i = 0; i < adapter->num_rx_queues; i++) { err = ixgbevf_setup_rx_resources(adapter, adapter->rx_ring[i]); if (!err) continue; hw_dbg(&adapter->hw, "Allocation for Rx Queue %u failed\n", i); goto err_setup_rx; } return 0; err_setup_rx: /* rewind the index freeing the rings as we go */ while (i--) ixgbevf_free_rx_resources(adapter->rx_ring[i]); return err; } /** * ixgbevf_free_rx_resources - Free Rx Resources * @rx_ring: ring to clean the resources from * * Free all receive software resources **/ void ixgbevf_free_rx_resources(struct ixgbevf_ring *rx_ring) { ixgbevf_clean_rx_ring(rx_ring); rx_ring->xdp_prog = NULL; xdp_rxq_info_unreg(&rx_ring->xdp_rxq); vfree(rx_ring->rx_buffer_info); rx_ring->rx_buffer_info = NULL; dma_free_coherent(rx_ring->dev, rx_ring->size, rx_ring->desc, rx_ring->dma); rx_ring->desc = NULL; } /** * ixgbevf_free_all_rx_resources - Free Rx Resources for All Queues * @adapter: board private structure * * Free all receive software resources **/ static void ixgbevf_free_all_rx_resources(struct ixgbevf_adapter *adapter) { int i; for (i = 0; i < adapter->num_rx_queues; i++) if (adapter->rx_ring[i]->desc) ixgbevf_free_rx_resources(adapter->rx_ring[i]); } /** * ixgbevf_open - Called when a network interface is made active * @netdev: network interface device structure * * Returns 0 on success, negative value on failure * * The open entry point is called when a network interface is made * active by the system (IFF_UP). At this point all resources needed * for transmit and receive operations are allocated, the interrupt * handler is registered with the OS, the watchdog timer is started, * and the stack is notified that the interface is ready. **/ int ixgbevf_open(struct net_device *netdev) { struct ixgbevf_adapter *adapter = netdev_priv(netdev); struct ixgbe_hw *hw = &adapter->hw; int err; /* A previous failure to open the device because of a lack of * available MSIX vector resources may have reset the number * of msix vectors variable to zero. The only way to recover * is to unload/reload the driver and hope that the system has * been able to recover some MSIX vector resources. */ if (!adapter->num_msix_vectors) return -ENOMEM; if (hw->adapter_stopped) { ixgbevf_reset(adapter); /* if adapter is still stopped then PF isn't up and * the VF can't start. */ if (hw->adapter_stopped) { err = IXGBE_ERR_MBX; pr_err("Unable to start - perhaps the PF Driver isn't up yet\n"); goto err_setup_reset; } } /* disallow open during test */ if (test_bit(__IXGBEVF_TESTING, &adapter->state)) return -EBUSY; netif_carrier_off(netdev); /* allocate transmit descriptors */ err = ixgbevf_setup_all_tx_resources(adapter); if (err) goto err_setup_tx; /* allocate receive descriptors */ err = ixgbevf_setup_all_rx_resources(adapter); if (err) goto err_setup_rx; ixgbevf_configure(adapter); err = ixgbevf_request_irq(adapter); if (err) goto err_req_irq; /* Notify the stack of the actual queue counts. */ err = netif_set_real_num_tx_queues(netdev, adapter->num_tx_queues); if (err) goto err_set_queues; err = netif_set_real_num_rx_queues(netdev, adapter->num_rx_queues); if (err) goto err_set_queues; ixgbevf_up_complete(adapter); return 0; err_set_queues: ixgbevf_free_irq(adapter); err_req_irq: ixgbevf_free_all_rx_resources(adapter); err_setup_rx: ixgbevf_free_all_tx_resources(adapter); err_setup_tx: ixgbevf_reset(adapter); err_setup_reset: return err; } /** * ixgbevf_close_suspend - actions necessary to both suspend and close flows * @adapter: the private adapter struct * * This function should contain the necessary work common to both suspending * and closing of the device. */ static void ixgbevf_close_suspend(struct ixgbevf_adapter *adapter) { ixgbevf_down(adapter); ixgbevf_free_irq(adapter); ixgbevf_free_all_tx_resources(adapter); ixgbevf_free_all_rx_resources(adapter); } /** * ixgbevf_close - Disables a network interface * @netdev: network interface device structure * * Returns 0, this is not allowed to fail * * The close entry point is called when an interface is de-activated * by the OS. The hardware is still under the drivers control, but * needs to be disabled. A global MAC reset is issued to stop the * hardware, and all transmit and receive resources are freed. **/ int ixgbevf_close(struct net_device *netdev) { struct ixgbevf_adapter *adapter = netdev_priv(netdev); if (netif_device_present(netdev)) ixgbevf_close_suspend(adapter); return 0; } static void ixgbevf_queue_reset_subtask(struct ixgbevf_adapter *adapter) { struct net_device *dev = adapter->netdev; if (!test_and_clear_bit(__IXGBEVF_QUEUE_RESET_REQUESTED, &adapter->state)) return; /* if interface is down do nothing */ if (test_bit(__IXGBEVF_DOWN, &adapter->state) || test_bit(__IXGBEVF_RESETTING, &adapter->state)) return; /* Hardware has to reinitialize queues and interrupts to * match packet buffer alignment. Unfortunately, the * hardware is not flexible enough to do this dynamically. */ rtnl_lock(); if (netif_running(dev)) ixgbevf_close(dev); ixgbevf_clear_interrupt_scheme(adapter); ixgbevf_init_interrupt_scheme(adapter); if (netif_running(dev)) ixgbevf_open(dev); rtnl_unlock(); } static void ixgbevf_tx_ctxtdesc(struct ixgbevf_ring *tx_ring, u32 vlan_macip_lens, u32 fceof_saidx, u32 type_tucmd, u32 mss_l4len_idx) { struct ixgbe_adv_tx_context_desc *context_desc; u16 i = tx_ring->next_to_use; context_desc = IXGBEVF_TX_CTXTDESC(tx_ring, i); i++; tx_ring->next_to_use = (i < tx_ring->count) ? i : 0; /* set bits to identify this as an advanced context descriptor */ type_tucmd |= IXGBE_TXD_CMD_DEXT | IXGBE_ADVTXD_DTYP_CTXT; context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens); context_desc->fceof_saidx = cpu_to_le32(fceof_saidx); context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd); context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx); } static int ixgbevf_tso(struct ixgbevf_ring *tx_ring, struct ixgbevf_tx_buffer *first, u8 *hdr_len, struct ixgbevf_ipsec_tx_data *itd) { u32 vlan_macip_lens, type_tucmd, mss_l4len_idx; struct sk_buff *skb = first->skb; union { struct iphdr *v4; struct ipv6hdr *v6; unsigned char *hdr; } ip; union { struct tcphdr *tcp; unsigned char *hdr; } l4; u32 paylen, l4_offset; u32 fceof_saidx = 0; int err; if (skb->ip_summed != CHECKSUM_PARTIAL) return 0; if (!skb_is_gso(skb)) return 0; err = skb_cow_head(skb, 0); if (err < 0) return err; if (eth_p_mpls(first->protocol)) ip.hdr = skb_inner_network_header(skb); else ip.hdr = skb_network_header(skb); l4.hdr = skb_checksum_start(skb); /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */ type_tucmd = IXGBE_ADVTXD_TUCMD_L4T_TCP; /* initialize outer IP header fields */ if (ip.v4->version == 4) { unsigned char *csum_start = skb_checksum_start(skb); unsigned char *trans_start = ip.hdr + (ip.v4->ihl * 4); int len = csum_start - trans_start; /* IP header will have to cancel out any data that * is not a part of the outer IP header, so set to * a reverse csum if needed, else init check to 0. */ ip.v4->check = (skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL) ? csum_fold(csum_partial(trans_start, len, 0)) : 0; type_tucmd |= IXGBE_ADVTXD_TUCMD_IPV4; ip.v4->tot_len = 0; first->tx_flags |= IXGBE_TX_FLAGS_TSO | IXGBE_TX_FLAGS_CSUM | IXGBE_TX_FLAGS_IPV4; } else { ip.v6->payload_len = 0; first->tx_flags |= IXGBE_TX_FLAGS_TSO | IXGBE_TX_FLAGS_CSUM; } /* determine offset of inner transport header */ l4_offset = l4.hdr - skb->data; /* compute length of segmentation header */ *hdr_len = (l4.tcp->doff * 4) + l4_offset; /* remove payload length from inner checksum */ paylen = skb->len - l4_offset; csum_replace_by_diff(&l4.tcp->check, (__force __wsum)htonl(paylen)); /* update gso size and bytecount with header size */ first->gso_segs = skb_shinfo(skb)->gso_segs; first->bytecount += (first->gso_segs - 1) * *hdr_len; /* mss_l4len_id: use 1 as index for TSO */ mss_l4len_idx = (*hdr_len - l4_offset) << IXGBE_ADVTXD_L4LEN_SHIFT; mss_l4len_idx |= skb_shinfo(skb)->gso_size << IXGBE_ADVTXD_MSS_SHIFT; mss_l4len_idx |= (1u << IXGBE_ADVTXD_IDX_SHIFT); fceof_saidx |= itd->pfsa; type_tucmd |= itd->flags | itd->trailer_len; /* vlan_macip_lens: HEADLEN, MACLEN, VLAN tag */ vlan_macip_lens = l4.hdr - ip.hdr; vlan_macip_lens |= (ip.hdr - skb->data) << IXGBE_ADVTXD_MACLEN_SHIFT; vlan_macip_lens |= first->tx_flags & IXGBE_TX_FLAGS_VLAN_MASK; ixgbevf_tx_ctxtdesc(tx_ring, vlan_macip_lens, fceof_saidx, type_tucmd, mss_l4len_idx); return 1; } static void ixgbevf_tx_csum(struct ixgbevf_ring *tx_ring, struct ixgbevf_tx_buffer *first, struct ixgbevf_ipsec_tx_data *itd) { struct sk_buff *skb = first->skb; u32 vlan_macip_lens = 0; u32 fceof_saidx = 0; u32 type_tucmd = 0; if (skb->ip_summed != CHECKSUM_PARTIAL) goto no_csum; switch (skb->csum_offset) { case offsetof(struct tcphdr, check): type_tucmd = IXGBE_ADVTXD_TUCMD_L4T_TCP; fallthrough; case offsetof(struct udphdr, check): break; case offsetof(struct sctphdr, checksum): /* validate that this is actually an SCTP request */ if (skb_csum_is_sctp(skb)) { type_tucmd = IXGBE_ADVTXD_TUCMD_L4T_SCTP; break; } fallthrough; default: skb_checksum_help(skb); goto no_csum; } if (first->protocol == htons(ETH_P_IP)) type_tucmd |= IXGBE_ADVTXD_TUCMD_IPV4; /* update TX checksum flag */ first->tx_flags |= IXGBE_TX_FLAGS_CSUM; vlan_macip_lens = skb_checksum_start_offset(skb) - skb_network_offset(skb); no_csum: /* vlan_macip_lens: MACLEN, VLAN tag */ vlan_macip_lens |= skb_network_offset(skb) << IXGBE_ADVTXD_MACLEN_SHIFT; vlan_macip_lens |= first->tx_flags & IXGBE_TX_FLAGS_VLAN_MASK; fceof_saidx |= itd->pfsa; type_tucmd |= itd->flags | itd->trailer_len; ixgbevf_tx_ctxtdesc(tx_ring, vlan_macip_lens, fceof_saidx, type_tucmd, 0); } static __le32 ixgbevf_tx_cmd_type(u32 tx_flags) { /* set type for advanced descriptor with frame checksum insertion */ __le32 cmd_type = cpu_to_le32(IXGBE_ADVTXD_DTYP_DATA | IXGBE_ADVTXD_DCMD_IFCS | IXGBE_ADVTXD_DCMD_DEXT); /* set HW VLAN bit if VLAN is present */ if (tx_flags & IXGBE_TX_FLAGS_VLAN) cmd_type |= cpu_to_le32(IXGBE_ADVTXD_DCMD_VLE); /* set segmentation enable bits for TSO/FSO */ if (tx_flags & IXGBE_TX_FLAGS_TSO) cmd_type |= cpu_to_le32(IXGBE_ADVTXD_DCMD_TSE); return cmd_type; } static void ixgbevf_tx_olinfo_status(union ixgbe_adv_tx_desc *tx_desc, u32 tx_flags, unsigned int paylen) { __le32 olinfo_status = cpu_to_le32(paylen << IXGBE_ADVTXD_PAYLEN_SHIFT); /* enable L4 checksum for TSO and TX checksum offload */ if (tx_flags & IXGBE_TX_FLAGS_CSUM) olinfo_status |= cpu_to_le32(IXGBE_ADVTXD_POPTS_TXSM); /* enble IPv4 checksum for TSO */ if (tx_flags & IXGBE_TX_FLAGS_IPV4) olinfo_status |= cpu_to_le32(IXGBE_ADVTXD_POPTS_IXSM); /* enable IPsec */ if (tx_flags & IXGBE_TX_FLAGS_IPSEC) olinfo_status |= cpu_to_le32(IXGBE_ADVTXD_POPTS_IPSEC); /* use index 1 context for TSO/FSO/FCOE/IPSEC */ if (tx_flags & (IXGBE_TX_FLAGS_TSO | IXGBE_TX_FLAGS_IPSEC)) olinfo_status |= cpu_to_le32(1u << IXGBE_ADVTXD_IDX_SHIFT); /* Check Context must be set if Tx switch is enabled, which it * always is for case where virtual functions are running */ olinfo_status |= cpu_to_le32(IXGBE_ADVTXD_CC); tx_desc->read.olinfo_status = olinfo_status; } static void ixgbevf_tx_map(struct ixgbevf_ring *tx_ring, struct ixgbevf_tx_buffer *first, const u8 hdr_len) { struct sk_buff *skb = first->skb; struct ixgbevf_tx_buffer *tx_buffer; union ixgbe_adv_tx_desc *tx_desc; skb_frag_t *frag; dma_addr_t dma; unsigned int data_len, size; u32 tx_flags = first->tx_flags; __le32 cmd_type = ixgbevf_tx_cmd_type(tx_flags); u16 i = tx_ring->next_to_use; tx_desc = IXGBEVF_TX_DESC(tx_ring, i); ixgbevf_tx_olinfo_status(tx_desc, tx_flags, skb->len - hdr_len); size = skb_headlen(skb); data_len = skb->data_len; dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE); tx_buffer = first; for (frag = &skb_shinfo(skb)->frags[0];; frag++) { if (dma_mapping_error(tx_ring->dev, dma)) goto dma_error; /* record length, and DMA address */ dma_unmap_len_set(tx_buffer, len, size); dma_unmap_addr_set(tx_buffer, dma, dma); tx_desc->read.buffer_addr = cpu_to_le64(dma); while (unlikely(size > IXGBE_MAX_DATA_PER_TXD)) { tx_desc->read.cmd_type_len = cmd_type | cpu_to_le32(IXGBE_MAX_DATA_PER_TXD); i++; tx_desc++; if (i == tx_ring->count) { tx_desc = IXGBEVF_TX_DESC(tx_ring, 0); i = 0; } tx_desc->read.olinfo_status = 0; dma += IXGBE_MAX_DATA_PER_TXD; size -= IXGBE_MAX_DATA_PER_TXD; tx_desc->read.buffer_addr = cpu_to_le64(dma); } if (likely(!data_len)) break; tx_desc->read.cmd_type_len = cmd_type | cpu_to_le32(size); i++; tx_desc++; if (i == tx_ring->count) { tx_desc = IXGBEVF_TX_DESC(tx_ring, 0); i = 0; } tx_desc->read.olinfo_status = 0; size = skb_frag_size(frag); data_len -= size; dma = skb_frag_dma_map(tx_ring->dev, frag, 0, size, DMA_TO_DEVICE); tx_buffer = &tx_ring->tx_buffer_info[i]; } /* write last descriptor with RS and EOP bits */ cmd_type |= cpu_to_le32(size) | cpu_to_le32(IXGBE_TXD_CMD); tx_desc->read.cmd_type_len = cmd_type; /* set the timestamp */ first->time_stamp = jiffies; skb_tx_timestamp(skb); /* Force memory writes to complete before letting h/w know there * are new descriptors to fetch. (Only applicable for weak-ordered * memory model archs, such as IA-64). * * We also need this memory barrier (wmb) to make certain all of the * status bits have been updated before next_to_watch is written. */ wmb(); /* set next_to_watch value indicating a packet is present */ first->next_to_watch = tx_desc; i++; if (i == tx_ring->count) i = 0; tx_ring->next_to_use = i; /* notify HW of packet */ ixgbevf_write_tail(tx_ring, i); return; dma_error: dev_err(tx_ring->dev, "TX DMA map failed\n"); tx_buffer = &tx_ring->tx_buffer_info[i]; /* clear dma mappings for failed tx_buffer_info map */ while (tx_buffer != first) { if (dma_unmap_len(tx_buffer, len)) dma_unmap_page(tx_ring->dev, dma_unmap_addr(tx_buffer, dma), dma_unmap_len(tx_buffer, len), DMA_TO_DEVICE); dma_unmap_len_set(tx_buffer, len, 0); if (i-- == 0) i += tx_ring->count; tx_buffer = &tx_ring->tx_buffer_info[i]; } if (dma_unmap_len(tx_buffer, len)) dma_unmap_single(tx_ring->dev, dma_unmap_addr(tx_buffer, dma), dma_unmap_len(tx_buffer, len), DMA_TO_DEVICE); dma_unmap_len_set(tx_buffer, len, 0); dev_kfree_skb_any(tx_buffer->skb); tx_buffer->skb = NULL; tx_ring->next_to_use = i; } static int __ixgbevf_maybe_stop_tx(struct ixgbevf_ring *tx_ring, int size) { netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index); /* Herbert's original patch had: * smp_mb__after_netif_stop_queue(); * but since that doesn't exist yet, just open code it. */ smp_mb(); /* We need to check again in a case another CPU has just * made room available. */ if (likely(ixgbevf_desc_unused(tx_ring) < size)) return -EBUSY; /* A reprieve! - use start_queue because it doesn't call schedule */ netif_start_subqueue(tx_ring->netdev, tx_ring->queue_index); ++tx_ring->tx_stats.restart_queue; return 0; } static int ixgbevf_maybe_stop_tx(struct ixgbevf_ring *tx_ring, int size) { if (likely(ixgbevf_desc_unused(tx_ring) >= size)) return 0; return __ixgbevf_maybe_stop_tx(tx_ring, size); } static int ixgbevf_xmit_frame_ring(struct sk_buff *skb, struct ixgbevf_ring *tx_ring) { struct ixgbevf_tx_buffer *first; int tso; u32 tx_flags = 0; u16 count = TXD_USE_COUNT(skb_headlen(skb)); struct ixgbevf_ipsec_tx_data ipsec_tx = { 0 }; #if PAGE_SIZE > IXGBE_MAX_DATA_PER_TXD unsigned short f; #endif u8 hdr_len = 0; u8 *dst_mac = skb_header_pointer(skb, 0, 0, NULL); if (!dst_mac || is_link_local_ether_addr(dst_mac)) { dev_kfree_skb_any(skb); return NETDEV_TX_OK; } /* need: 1 descriptor per page * PAGE_SIZE/IXGBE_MAX_DATA_PER_TXD, * + 1 desc for skb_headlen/IXGBE_MAX_DATA_PER_TXD, * + 2 desc gap to keep tail from touching head, * + 1 desc for context descriptor, * otherwise try next time */ #if PAGE_SIZE > IXGBE_MAX_DATA_PER_TXD for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) { skb_frag_t *frag = &skb_shinfo(skb)->frags[f]; count += TXD_USE_COUNT(skb_frag_size(frag)); } #else count += skb_shinfo(skb)->nr_frags; #endif if (ixgbevf_maybe_stop_tx(tx_ring, count + 3)) { tx_ring->tx_stats.tx_busy++; return NETDEV_TX_BUSY; } /* record the location of the first descriptor for this packet */ first = &tx_ring->tx_buffer_info[tx_ring->next_to_use]; first->skb = skb; first->bytecount = skb->len; first->gso_segs = 1; if (skb_vlan_tag_present(skb)) { tx_flags |= skb_vlan_tag_get(skb); tx_flags <<= IXGBE_TX_FLAGS_VLAN_SHIFT; tx_flags |= IXGBE_TX_FLAGS_VLAN; } /* record initial flags and protocol */ first->tx_flags = tx_flags; first->protocol = vlan_get_protocol(skb); #ifdef CONFIG_IXGBEVF_IPSEC if (xfrm_offload(skb) && !ixgbevf_ipsec_tx(tx_ring, first, &ipsec_tx)) goto out_drop; #endif tso = ixgbevf_tso(tx_ring, first, &hdr_len, &ipsec_tx); if (tso < 0) goto out_drop; else if (!tso) ixgbevf_tx_csum(tx_ring, first, &ipsec_tx); ixgbevf_tx_map(tx_ring, first, hdr_len); ixgbevf_maybe_stop_tx(tx_ring, DESC_NEEDED); return NETDEV_TX_OK; out_drop: dev_kfree_skb_any(first->skb); first->skb = NULL; return NETDEV_TX_OK; } static netdev_tx_t ixgbevf_xmit_frame(struct sk_buff *skb, struct net_device *netdev) { struct ixgbevf_adapter *adapter = netdev_priv(netdev); struct ixgbevf_ring *tx_ring; if (skb->len <= 0) { dev_kfree_skb_any(skb); return NETDEV_TX_OK; } /* The minimum packet size for olinfo paylen is 17 so pad the skb * in order to meet this minimum size requirement. */ if (skb->len < 17) { if (skb_padto(skb, 17)) return NETDEV_TX_OK; skb->len = 17; } tx_ring = adapter->tx_ring[skb->queue_mapping]; return ixgbevf_xmit_frame_ring(skb, tx_ring); } /** * ixgbevf_set_mac - Change the Ethernet Address of the NIC * @netdev: network interface device structure * @p: pointer to an address structure * * Returns 0 on success, negative on failure **/ static int ixgbevf_set_mac(struct net_device *netdev, void *p) { struct ixgbevf_adapter *adapter = netdev_priv(netdev); struct ixgbe_hw *hw = &adapter->hw; struct sockaddr *addr = p; int err; if (!is_valid_ether_addr(addr->sa_data)) return -EADDRNOTAVAIL; spin_lock_bh(&adapter->mbx_lock); err = hw->mac.ops.set_rar(hw, 0, addr->sa_data, 0); spin_unlock_bh(&adapter->mbx_lock); if (err) return -EPERM; ether_addr_copy(hw->mac.addr, addr->sa_data); ether_addr_copy(hw->mac.perm_addr, addr->sa_data); eth_hw_addr_set(netdev, addr->sa_data); return 0; } /** * ixgbevf_change_mtu - Change the Maximum Transfer Unit * @netdev: network interface device structure * @new_mtu: new value for maximum frame size * * Returns 0 on success, negative on failure **/ static int ixgbevf_change_mtu(struct net_device *netdev, int new_mtu) { struct ixgbevf_adapter *adapter = netdev_priv(netdev); struct ixgbe_hw *hw = &adapter->hw; int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN; int ret; /* prevent MTU being changed to a size unsupported by XDP */ if (adapter->xdp_prog) { dev_warn(&adapter->pdev->dev, "MTU cannot be changed while XDP program is loaded\n"); return -EPERM; } spin_lock_bh(&adapter->mbx_lock); /* notify the PF of our intent to use this size of frame */ ret = hw->mac.ops.set_rlpml(hw, max_frame); spin_unlock_bh(&adapter->mbx_lock); if (ret) return -EINVAL; hw_dbg(hw, "changing MTU from %d to %d\n", netdev->mtu, new_mtu); /* must set new MTU before calling down or up */ netdev->mtu = new_mtu; if (netif_running(netdev)) ixgbevf_reinit_locked(adapter); return 0; } static int __maybe_unused ixgbevf_suspend(struct device *dev_d) { struct net_device *netdev = dev_get_drvdata(dev_d); struct ixgbevf_adapter *adapter = netdev_priv(netdev); rtnl_lock(); netif_device_detach(netdev); if (netif_running(netdev)) ixgbevf_close_suspend(adapter); ixgbevf_clear_interrupt_scheme(adapter); rtnl_unlock(); return 0; } static int __maybe_unused ixgbevf_resume(struct device *dev_d) { struct pci_dev *pdev = to_pci_dev(dev_d); struct net_device *netdev = pci_get_drvdata(pdev); struct ixgbevf_adapter *adapter = netdev_priv(netdev); u32 err; adapter->hw.hw_addr = adapter->io_addr; smp_mb__before_atomic(); clear_bit(__IXGBEVF_DISABLED, &adapter->state); pci_set_master(pdev); ixgbevf_reset(adapter); rtnl_lock(); err = ixgbevf_init_interrupt_scheme(adapter); if (!err && netif_running(netdev)) err = ixgbevf_open(netdev); rtnl_unlock(); if (err) return err; netif_device_attach(netdev); return err; } static void ixgbevf_shutdown(struct pci_dev *pdev) { ixgbevf_suspend(&pdev->dev); } static void ixgbevf_get_tx_ring_stats(struct rtnl_link_stats64 *stats, const struct ixgbevf_ring *ring) { u64 bytes, packets; unsigned int start; if (ring) { do { start = u64_stats_fetch_begin(&ring->syncp); bytes = ring->stats.bytes; packets = ring->stats.packets; } while (u64_stats_fetch_retry(&ring->syncp, start)); stats->tx_bytes += bytes; stats->tx_packets += packets; } } static void ixgbevf_get_stats(struct net_device *netdev, struct rtnl_link_stats64 *stats) { struct ixgbevf_adapter *adapter = netdev_priv(netdev); unsigned int start; u64 bytes, packets; const struct ixgbevf_ring *ring; int i; ixgbevf_update_stats(adapter); stats->multicast = adapter->stats.vfmprc - adapter->stats.base_vfmprc; rcu_read_lock(); for (i = 0; i < adapter->num_rx_queues; i++) { ring = adapter->rx_ring[i]; do { start = u64_stats_fetch_begin(&ring->syncp); bytes = ring->stats.bytes; packets = ring->stats.packets; } while (u64_stats_fetch_retry(&ring->syncp, start)); stats->rx_bytes += bytes; stats->rx_packets += packets; } for (i = 0; i < adapter->num_tx_queues; i++) { ring = adapter->tx_ring[i]; ixgbevf_get_tx_ring_stats(stats, ring); } for (i = 0; i < adapter->num_xdp_queues; i++) { ring = adapter->xdp_ring[i]; ixgbevf_get_tx_ring_stats(stats, ring); } rcu_read_unlock(); } #define IXGBEVF_MAX_MAC_HDR_LEN 127 #define IXGBEVF_MAX_NETWORK_HDR_LEN 511 static netdev_features_t ixgbevf_features_check(struct sk_buff *skb, struct net_device *dev, netdev_features_t features) { unsigned int network_hdr_len, mac_hdr_len; /* Make certain the headers can be described by a context descriptor */ mac_hdr_len = skb_network_header(skb) - skb->data; if (unlikely(mac_hdr_len > IXGBEVF_MAX_MAC_HDR_LEN)) return features & ~(NETIF_F_HW_CSUM | NETIF_F_SCTP_CRC | NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_TSO | NETIF_F_TSO6); network_hdr_len = skb_checksum_start(skb) - skb_network_header(skb); if (unlikely(network_hdr_len > IXGBEVF_MAX_NETWORK_HDR_LEN)) return features & ~(NETIF_F_HW_CSUM | NETIF_F_SCTP_CRC | NETIF_F_TSO | NETIF_F_TSO6); /* We can only support IPV4 TSO in tunnels if we can mangle the * inner IP ID field, so strip TSO if MANGLEID is not supported. */ if (skb->encapsulation && !(features & NETIF_F_TSO_MANGLEID)) features &= ~NETIF_F_TSO; return features; } static int ixgbevf_xdp_setup(struct net_device *dev, struct bpf_prog *prog) { int i, frame_size = dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN; struct ixgbevf_adapter *adapter = netdev_priv(dev); struct bpf_prog *old_prog; /* verify ixgbevf ring attributes are sufficient for XDP */ for (i = 0; i < adapter->num_rx_queues; i++) { struct ixgbevf_ring *ring = adapter->rx_ring[i]; if (frame_size > ixgbevf_rx_bufsz(ring)) return -EINVAL; } old_prog = xchg(&adapter->xdp_prog, prog); /* If transitioning XDP modes reconfigure rings */ if (!!prog != !!old_prog) { /* Hardware has to reinitialize queues and interrupts to * match packet buffer alignment. Unfortunately, the * hardware is not flexible enough to do this dynamically. */ if (netif_running(dev)) ixgbevf_close(dev); ixgbevf_clear_interrupt_scheme(adapter); ixgbevf_init_interrupt_scheme(adapter); if (netif_running(dev)) ixgbevf_open(dev); } else { for (i = 0; i < adapter->num_rx_queues; i++) xchg(&adapter->rx_ring[i]->xdp_prog, adapter->xdp_prog); } if (old_prog) bpf_prog_put(old_prog); return 0; } static int ixgbevf_xdp(struct net_device *dev, struct netdev_bpf *xdp) { switch (xdp->command) { case XDP_SETUP_PROG: return ixgbevf_xdp_setup(dev, xdp->prog); default: return -EINVAL; } } static const struct net_device_ops ixgbevf_netdev_ops = { .ndo_open = ixgbevf_open, .ndo_stop = ixgbevf_close, .ndo_start_xmit = ixgbevf_xmit_frame, .ndo_set_rx_mode = ixgbevf_set_rx_mode, .ndo_get_stats64 = ixgbevf_get_stats, .ndo_validate_addr = eth_validate_addr, .ndo_set_mac_address = ixgbevf_set_mac, .ndo_change_mtu = ixgbevf_change_mtu, .ndo_tx_timeout = ixgbevf_tx_timeout, .ndo_vlan_rx_add_vid = ixgbevf_vlan_rx_add_vid, .ndo_vlan_rx_kill_vid = ixgbevf_vlan_rx_kill_vid, .ndo_features_check = ixgbevf_features_check, .ndo_bpf = ixgbevf_xdp, }; static void ixgbevf_assign_netdev_ops(struct net_device *dev) { dev->netdev_ops = &ixgbevf_netdev_ops; ixgbevf_set_ethtool_ops(dev); dev->watchdog_timeo = 5 * HZ; } /** * ixgbevf_probe - Device Initialization Routine * @pdev: PCI device information struct * @ent: entry in ixgbevf_pci_tbl * * Returns 0 on success, negative on failure * * ixgbevf_probe initializes an adapter identified by a pci_dev structure. * The OS initialization, configuring of the adapter private structure, * and a hardware reset occur. **/ static int ixgbevf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) { struct net_device *netdev; struct ixgbevf_adapter *adapter = NULL; struct ixgbe_hw *hw = NULL; const struct ixgbevf_info *ii = ixgbevf_info_tbl[ent->driver_data]; bool disable_dev = false; int err; err = pci_enable_device(pdev); if (err) return err; err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); if (err) { dev_err(&pdev->dev, "No usable DMA configuration, aborting\n"); goto err_dma; } err = pci_request_regions(pdev, ixgbevf_driver_name); if (err) { dev_err(&pdev->dev, "pci_request_regions failed 0x%x\n", err); goto err_pci_reg; } pci_set_master(pdev); netdev = alloc_etherdev_mq(sizeof(struct ixgbevf_adapter), MAX_TX_QUEUES); if (!netdev) { err = -ENOMEM; goto err_alloc_etherdev; } SET_NETDEV_DEV(netdev, &pdev->dev); adapter = netdev_priv(netdev); adapter->netdev = netdev; adapter->pdev = pdev; hw = &adapter->hw; hw->back = adapter; adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE); /* call save state here in standalone driver because it relies on * adapter struct to exist, and needs to call netdev_priv */ pci_save_state(pdev); hw->hw_addr = ioremap(pci_resource_start(pdev, 0), pci_resource_len(pdev, 0)); adapter->io_addr = hw->hw_addr; if (!hw->hw_addr) { err = -EIO; goto err_ioremap; } ixgbevf_assign_netdev_ops(netdev); /* Setup HW API */ memcpy(&hw->mac.ops, ii->mac_ops, sizeof(hw->mac.ops)); hw->mac.type = ii->mac; memcpy(&hw->mbx.ops, &ixgbevf_mbx_ops_legacy, sizeof(struct ixgbe_mbx_operations)); /* setup the private structure */ err = ixgbevf_sw_init(adapter); if (err) goto err_sw_init; /* The HW MAC address was set and/or determined in sw_init */ if (!is_valid_ether_addr(netdev->dev_addr)) { pr_err("invalid MAC address\n"); err = -EIO; goto err_sw_init; } netdev->hw_features = NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_RXCSUM | NETIF_F_HW_CSUM | NETIF_F_SCTP_CRC; #define IXGBEVF_GSO_PARTIAL_FEATURES (NETIF_F_GSO_GRE | \ NETIF_F_GSO_GRE_CSUM | \ NETIF_F_GSO_IPXIP4 | \ NETIF_F_GSO_IPXIP6 | \ NETIF_F_GSO_UDP_TUNNEL | \ NETIF_F_GSO_UDP_TUNNEL_CSUM) netdev->gso_partial_features = IXGBEVF_GSO_PARTIAL_FEATURES; netdev->hw_features |= NETIF_F_GSO_PARTIAL | IXGBEVF_GSO_PARTIAL_FEATURES; netdev->features = netdev->hw_features | NETIF_F_HIGHDMA; netdev->vlan_features |= netdev->features | NETIF_F_TSO_MANGLEID; netdev->mpls_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_HW_CSUM; netdev->mpls_features |= IXGBEVF_GSO_PARTIAL_FEATURES; netdev->hw_enc_features |= netdev->vlan_features; /* set this bit last since it cannot be part of vlan_features */ netdev->features |= NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_TX; netdev->priv_flags |= IFF_UNICAST_FLT; netdev->xdp_features = NETDEV_XDP_ACT_BASIC; /* MTU range: 68 - 1504 or 9710 */ netdev->min_mtu = ETH_MIN_MTU; switch (adapter->hw.api_version) { case ixgbe_mbox_api_11: case ixgbe_mbox_api_12: case ixgbe_mbox_api_13: case ixgbe_mbox_api_14: case ixgbe_mbox_api_15: netdev->max_mtu = IXGBE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN); break; default: if (adapter->hw.mac.type != ixgbe_mac_82599_vf) netdev->max_mtu = IXGBE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN); else netdev->max_mtu = ETH_DATA_LEN + ETH_FCS_LEN; break; } if (IXGBE_REMOVED(hw->hw_addr)) { err = -EIO; goto err_sw_init; } timer_setup(&adapter->service_timer, ixgbevf_service_timer, 0); INIT_WORK(&adapter->service_task, ixgbevf_service_task); set_bit(__IXGBEVF_SERVICE_INITED, &adapter->state); clear_bit(__IXGBEVF_SERVICE_SCHED, &adapter->state); err = ixgbevf_init_interrupt_scheme(adapter); if (err) goto err_sw_init; strcpy(netdev->name, "eth%d"); err = register_netdev(netdev); if (err) goto err_register; pci_set_drvdata(pdev, netdev); netif_carrier_off(netdev); ixgbevf_init_ipsec_offload(adapter); ixgbevf_init_last_counter_stats(adapter); /* print the VF info */ dev_info(&pdev->dev, "%pM\n", netdev->dev_addr); dev_info(&pdev->dev, "MAC: %d\n", hw->mac.type); switch (hw->mac.type) { case ixgbe_mac_X550_vf: dev_info(&pdev->dev, "Intel(R) X550 Virtual Function\n"); break; case ixgbe_mac_X540_vf: dev_info(&pdev->dev, "Intel(R) X540 Virtual Function\n"); break; case ixgbe_mac_82599_vf: default: dev_info(&pdev->dev, "Intel(R) 82599 Virtual Function\n"); break; } return 0; err_register: ixgbevf_clear_interrupt_scheme(adapter); err_sw_init: ixgbevf_reset_interrupt_capability(adapter); iounmap(adapter->io_addr); kfree(adapter->rss_key); err_ioremap: disable_dev = !test_and_set_bit(__IXGBEVF_DISABLED, &adapter->state); free_netdev(netdev); err_alloc_etherdev: pci_release_regions(pdev); err_pci_reg: err_dma: if (!adapter || disable_dev) pci_disable_device(pdev); return err; } /** * ixgbevf_remove - Device Removal Routine * @pdev: PCI device information struct * * ixgbevf_remove is called by the PCI subsystem to alert the driver * that it should release a PCI device. The could be caused by a * Hot-Plug event, or because the driver is going to be removed from * memory. **/ static void ixgbevf_remove(struct pci_dev *pdev) { struct net_device *netdev = pci_get_drvdata(pdev); struct ixgbevf_adapter *adapter; bool disable_dev; if (!netdev) return; adapter = netdev_priv(netdev); set_bit(__IXGBEVF_REMOVING, &adapter->state); cancel_work_sync(&adapter->service_task); if (netdev->reg_state == NETREG_REGISTERED) unregister_netdev(netdev); ixgbevf_stop_ipsec_offload(adapter); ixgbevf_clear_interrupt_scheme(adapter); ixgbevf_reset_interrupt_capability(adapter); iounmap(adapter->io_addr); pci_release_regions(pdev); hw_dbg(&adapter->hw, "Remove complete\n"); kfree(adapter->rss_key); disable_dev = !test_and_set_bit(__IXGBEVF_DISABLED, &adapter->state); free_netdev(netdev); if (disable_dev) pci_disable_device(pdev); } /** * ixgbevf_io_error_detected - called when PCI error is detected * @pdev: Pointer to PCI device * @state: The current pci connection state * * This function is called after a PCI bus error affecting * this device has been detected. **/ static pci_ers_result_t ixgbevf_io_error_detected(struct pci_dev *pdev, pci_channel_state_t state) { struct net_device *netdev = pci_get_drvdata(pdev); struct ixgbevf_adapter *adapter = netdev_priv(netdev); if (!test_bit(__IXGBEVF_SERVICE_INITED, &adapter->state)) return PCI_ERS_RESULT_DISCONNECT; rtnl_lock(); netif_device_detach(netdev); if (netif_running(netdev)) ixgbevf_close_suspend(adapter); if (state == pci_channel_io_perm_failure) { rtnl_unlock(); return PCI_ERS_RESULT_DISCONNECT; } if (!test_and_set_bit(__IXGBEVF_DISABLED, &adapter->state)) pci_disable_device(pdev); rtnl_unlock(); /* Request a slot reset. */ return PCI_ERS_RESULT_NEED_RESET; } /** * ixgbevf_io_slot_reset - called after the pci bus has been reset. * @pdev: Pointer to PCI device * * Restart the card from scratch, as if from a cold-boot. Implementation * resembles the first-half of the ixgbevf_resume routine. **/ static pci_ers_result_t ixgbevf_io_slot_reset(struct pci_dev *pdev) { struct net_device *netdev = pci_get_drvdata(pdev); struct ixgbevf_adapter *adapter = netdev_priv(netdev); if (pci_enable_device_mem(pdev)) { dev_err(&pdev->dev, "Cannot re-enable PCI device after reset.\n"); return PCI_ERS_RESULT_DISCONNECT; } adapter->hw.hw_addr = adapter->io_addr; smp_mb__before_atomic(); clear_bit(__IXGBEVF_DISABLED, &adapter->state); pci_set_master(pdev); ixgbevf_reset(adapter); return PCI_ERS_RESULT_RECOVERED; } /** * ixgbevf_io_resume - called when traffic can start flowing again. * @pdev: Pointer to PCI device * * This callback is called when the error recovery driver tells us that * its OK to resume normal operation. Implementation resembles the * second-half of the ixgbevf_resume routine. **/ static void ixgbevf_io_resume(struct pci_dev *pdev) { struct net_device *netdev = pci_get_drvdata(pdev); rtnl_lock(); if (netif_running(netdev)) ixgbevf_open(netdev); netif_device_attach(netdev); rtnl_unlock(); } /* PCI Error Recovery (ERS) */ static const struct pci_error_handlers ixgbevf_err_handler = { .error_detected = ixgbevf_io_error_detected, .slot_reset = ixgbevf_io_slot_reset, .resume = ixgbevf_io_resume, }; static SIMPLE_DEV_PM_OPS(ixgbevf_pm_ops, ixgbevf_suspend, ixgbevf_resume); static struct pci_driver ixgbevf_driver = { .name = ixgbevf_driver_name, .id_table = ixgbevf_pci_tbl, .probe = ixgbevf_probe, .remove = ixgbevf_remove, /* Power Management Hooks */ .driver.pm = &ixgbevf_pm_ops, .shutdown = ixgbevf_shutdown, .err_handler = &ixgbevf_err_handler }; /** * ixgbevf_init_module - Driver Registration Routine * * ixgbevf_init_module is the first routine called when the driver is * loaded. All it does is register with the PCI subsystem. **/ static int __init ixgbevf_init_module(void) { int err; pr_info("%s\n", ixgbevf_driver_string); pr_info("%s\n", ixgbevf_copyright); ixgbevf_wq = create_singlethread_workqueue(ixgbevf_driver_name); if (!ixgbevf_wq) { pr_err("%s: Failed to create workqueue\n", ixgbevf_driver_name); return -ENOMEM; } err = pci_register_driver(&ixgbevf_driver); if (err) { destroy_workqueue(ixgbevf_wq); return err; } return 0; } module_init(ixgbevf_init_module); /** * ixgbevf_exit_module - Driver Exit Cleanup Routine * * ixgbevf_exit_module is called just before the driver is removed * from memory. **/ static void __exit ixgbevf_exit_module(void) { pci_unregister_driver(&ixgbevf_driver); if (ixgbevf_wq) { destroy_workqueue(ixgbevf_wq); ixgbevf_wq = NULL; } } #ifdef DEBUG /** * ixgbevf_get_hw_dev_name - return device name string * used by hardware layer to print debugging information * @hw: pointer to private hardware struct **/ char *ixgbevf_get_hw_dev_name(struct ixgbe_hw *hw) { struct ixgbevf_adapter *adapter = hw->back; return adapter->netdev->name; } #endif module_exit(ixgbevf_exit_module); /* ixgbevf_main.c */
linux-master
drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
// SPDX-License-Identifier: GPL-2.0 /* Copyright(c) 1999 - 2018 Intel Corporation. */ /* ethtool support for ixgbevf */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/types.h> #include <linux/module.h> #include <linux/slab.h> #include <linux/pci.h> #include <linux/netdevice.h> #include <linux/ethtool.h> #include <linux/vmalloc.h> #include <linux/if_vlan.h> #include <linux/uaccess.h> #include "ixgbevf.h" enum {NETDEV_STATS, IXGBEVF_STATS}; struct ixgbe_stats { char stat_string[ETH_GSTRING_LEN]; int type; int sizeof_stat; int stat_offset; }; #define IXGBEVF_STAT(_name, _stat) { \ .stat_string = _name, \ .type = IXGBEVF_STATS, \ .sizeof_stat = sizeof_field(struct ixgbevf_adapter, _stat), \ .stat_offset = offsetof(struct ixgbevf_adapter, _stat) \ } #define IXGBEVF_NETDEV_STAT(_net_stat) { \ .stat_string = #_net_stat, \ .type = NETDEV_STATS, \ .sizeof_stat = sizeof_field(struct net_device_stats, _net_stat), \ .stat_offset = offsetof(struct net_device_stats, _net_stat) \ } static struct ixgbe_stats ixgbevf_gstrings_stats[] = { IXGBEVF_NETDEV_STAT(rx_packets), IXGBEVF_NETDEV_STAT(tx_packets), IXGBEVF_NETDEV_STAT(rx_bytes), IXGBEVF_NETDEV_STAT(tx_bytes), IXGBEVF_STAT("tx_busy", tx_busy), IXGBEVF_STAT("tx_restart_queue", restart_queue), IXGBEVF_STAT("tx_timeout_count", tx_timeout_count), IXGBEVF_NETDEV_STAT(multicast), IXGBEVF_STAT("rx_csum_offload_errors", hw_csum_rx_error), IXGBEVF_STAT("alloc_rx_page", alloc_rx_page), IXGBEVF_STAT("alloc_rx_page_failed", alloc_rx_page_failed), IXGBEVF_STAT("alloc_rx_buff_failed", alloc_rx_buff_failed), IXGBEVF_STAT("tx_ipsec", tx_ipsec), IXGBEVF_STAT("rx_ipsec", rx_ipsec), }; #define IXGBEVF_QUEUE_STATS_LEN ( \ (((struct ixgbevf_adapter *)netdev_priv(netdev))->num_tx_queues + \ ((struct ixgbevf_adapter *)netdev_priv(netdev))->num_xdp_queues + \ ((struct ixgbevf_adapter *)netdev_priv(netdev))->num_rx_queues) * \ (sizeof(struct ixgbevf_stats) / sizeof(u64))) #define IXGBEVF_GLOBAL_STATS_LEN ARRAY_SIZE(ixgbevf_gstrings_stats) #define IXGBEVF_STATS_LEN (IXGBEVF_GLOBAL_STATS_LEN + IXGBEVF_QUEUE_STATS_LEN) static const char ixgbe_gstrings_test[][ETH_GSTRING_LEN] = { "Register test (offline)", "Link test (on/offline)" }; #define IXGBEVF_TEST_LEN (sizeof(ixgbe_gstrings_test) / ETH_GSTRING_LEN) static const char ixgbevf_priv_flags_strings[][ETH_GSTRING_LEN] = { #define IXGBEVF_PRIV_FLAGS_LEGACY_RX BIT(0) "legacy-rx", }; #define IXGBEVF_PRIV_FLAGS_STR_LEN ARRAY_SIZE(ixgbevf_priv_flags_strings) static int ixgbevf_get_link_ksettings(struct net_device *netdev, struct ethtool_link_ksettings *cmd) { struct ixgbevf_adapter *adapter = netdev_priv(netdev); ethtool_link_ksettings_zero_link_mode(cmd, supported); ethtool_link_ksettings_add_link_mode(cmd, supported, 10000baseT_Full); cmd->base.autoneg = AUTONEG_DISABLE; cmd->base.port = -1; if (adapter->link_up) { __u32 speed = SPEED_10000; switch (adapter->link_speed) { case IXGBE_LINK_SPEED_10GB_FULL: speed = SPEED_10000; break; case IXGBE_LINK_SPEED_1GB_FULL: speed = SPEED_1000; break; case IXGBE_LINK_SPEED_100_FULL: speed = SPEED_100; break; } cmd->base.speed = speed; cmd->base.duplex = DUPLEX_FULL; } else { cmd->base.speed = SPEED_UNKNOWN; cmd->base.duplex = DUPLEX_UNKNOWN; } return 0; } static u32 ixgbevf_get_msglevel(struct net_device *netdev) { struct ixgbevf_adapter *adapter = netdev_priv(netdev); return adapter->msg_enable; } static void ixgbevf_set_msglevel(struct net_device *netdev, u32 data) { struct ixgbevf_adapter *adapter = netdev_priv(netdev); adapter->msg_enable = data; } static int ixgbevf_get_regs_len(struct net_device *netdev) { #define IXGBE_REGS_LEN 45 return IXGBE_REGS_LEN * sizeof(u32); } static void ixgbevf_get_regs(struct net_device *netdev, struct ethtool_regs *regs, void *p) { struct ixgbevf_adapter *adapter = netdev_priv(netdev); struct ixgbe_hw *hw = &adapter->hw; u32 *regs_buff = p; u32 regs_len = ixgbevf_get_regs_len(netdev); u8 i; memset(p, 0, regs_len); /* generate a number suitable for ethtool's register version */ regs->version = (1u << 24) | (hw->revision_id << 16) | hw->device_id; /* General Registers */ regs_buff[0] = IXGBE_READ_REG(hw, IXGBE_VFCTRL); regs_buff[1] = IXGBE_READ_REG(hw, IXGBE_VFSTATUS); regs_buff[2] = IXGBE_READ_REG(hw, IXGBE_VFLINKS); regs_buff[3] = IXGBE_READ_REG(hw, IXGBE_VFRXMEMWRAP); regs_buff[4] = IXGBE_READ_REG(hw, IXGBE_VFFRTIMER); /* Interrupt */ /* don't read EICR because it can clear interrupt causes, instead * read EICS which is a shadow but doesn't clear EICR */ regs_buff[5] = IXGBE_READ_REG(hw, IXGBE_VTEICS); regs_buff[6] = IXGBE_READ_REG(hw, IXGBE_VTEICS); regs_buff[7] = IXGBE_READ_REG(hw, IXGBE_VTEIMS); regs_buff[8] = IXGBE_READ_REG(hw, IXGBE_VTEIMC); regs_buff[9] = IXGBE_READ_REG(hw, IXGBE_VTEIAC); regs_buff[10] = IXGBE_READ_REG(hw, IXGBE_VTEIAM); regs_buff[11] = IXGBE_READ_REG(hw, IXGBE_VTEITR(0)); regs_buff[12] = IXGBE_READ_REG(hw, IXGBE_VTIVAR(0)); regs_buff[13] = IXGBE_READ_REG(hw, IXGBE_VTIVAR_MISC); /* Receive DMA */ for (i = 0; i < 2; i++) regs_buff[14 + i] = IXGBE_READ_REG(hw, IXGBE_VFRDBAL(i)); for (i = 0; i < 2; i++) regs_buff[16 + i] = IXGBE_READ_REG(hw, IXGBE_VFRDBAH(i)); for (i = 0; i < 2; i++) regs_buff[18 + i] = IXGBE_READ_REG(hw, IXGBE_VFRDLEN(i)); for (i = 0; i < 2; i++) regs_buff[20 + i] = IXGBE_READ_REG(hw, IXGBE_VFRDH(i)); for (i = 0; i < 2; i++) regs_buff[22 + i] = IXGBE_READ_REG(hw, IXGBE_VFRDT(i)); for (i = 0; i < 2; i++) regs_buff[24 + i] = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i)); for (i = 0; i < 2; i++) regs_buff[26 + i] = IXGBE_READ_REG(hw, IXGBE_VFSRRCTL(i)); /* Receive */ regs_buff[28] = IXGBE_READ_REG(hw, IXGBE_VFPSRTYPE); /* Transmit */ for (i = 0; i < 2; i++) regs_buff[29 + i] = IXGBE_READ_REG(hw, IXGBE_VFTDBAL(i)); for (i = 0; i < 2; i++) regs_buff[31 + i] = IXGBE_READ_REG(hw, IXGBE_VFTDBAH(i)); for (i = 0; i < 2; i++) regs_buff[33 + i] = IXGBE_READ_REG(hw, IXGBE_VFTDLEN(i)); for (i = 0; i < 2; i++) regs_buff[35 + i] = IXGBE_READ_REG(hw, IXGBE_VFTDH(i)); for (i = 0; i < 2; i++) regs_buff[37 + i] = IXGBE_READ_REG(hw, IXGBE_VFTDT(i)); for (i = 0; i < 2; i++) regs_buff[39 + i] = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(i)); for (i = 0; i < 2; i++) regs_buff[41 + i] = IXGBE_READ_REG(hw, IXGBE_VFTDWBAL(i)); for (i = 0; i < 2; i++) regs_buff[43 + i] = IXGBE_READ_REG(hw, IXGBE_VFTDWBAH(i)); } static void ixgbevf_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo) { struct ixgbevf_adapter *adapter = netdev_priv(netdev); strscpy(drvinfo->driver, ixgbevf_driver_name, sizeof(drvinfo->driver)); strscpy(drvinfo->bus_info, pci_name(adapter->pdev), sizeof(drvinfo->bus_info)); drvinfo->n_priv_flags = IXGBEVF_PRIV_FLAGS_STR_LEN; } static void ixgbevf_get_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring, struct kernel_ethtool_ringparam *kernel_ring, struct netlink_ext_ack *extack) { struct ixgbevf_adapter *adapter = netdev_priv(netdev); ring->rx_max_pending = IXGBEVF_MAX_RXD; ring->tx_max_pending = IXGBEVF_MAX_TXD; ring->rx_pending = adapter->rx_ring_count; ring->tx_pending = adapter->tx_ring_count; } static int ixgbevf_set_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring, struct kernel_ethtool_ringparam *kernel_ring, struct netlink_ext_ack *extack) { struct ixgbevf_adapter *adapter = netdev_priv(netdev); struct ixgbevf_ring *tx_ring = NULL, *rx_ring = NULL; u32 new_rx_count, new_tx_count; int i, j, err = 0; if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending)) return -EINVAL; new_tx_count = max_t(u32, ring->tx_pending, IXGBEVF_MIN_TXD); new_tx_count = min_t(u32, new_tx_count, IXGBEVF_MAX_TXD); new_tx_count = ALIGN(new_tx_count, IXGBE_REQ_TX_DESCRIPTOR_MULTIPLE); new_rx_count = max_t(u32, ring->rx_pending, IXGBEVF_MIN_RXD); new_rx_count = min_t(u32, new_rx_count, IXGBEVF_MAX_RXD); new_rx_count = ALIGN(new_rx_count, IXGBE_REQ_RX_DESCRIPTOR_MULTIPLE); /* if nothing to do return success */ if ((new_tx_count == adapter->tx_ring_count) && (new_rx_count == adapter->rx_ring_count)) return 0; while (test_and_set_bit(__IXGBEVF_RESETTING, &adapter->state)) usleep_range(1000, 2000); if (!netif_running(adapter->netdev)) { for (i = 0; i < adapter->num_tx_queues; i++) adapter->tx_ring[i]->count = new_tx_count; for (i = 0; i < adapter->num_xdp_queues; i++) adapter->xdp_ring[i]->count = new_tx_count; for (i = 0; i < adapter->num_rx_queues; i++) adapter->rx_ring[i]->count = new_rx_count; adapter->tx_ring_count = new_tx_count; adapter->xdp_ring_count = new_tx_count; adapter->rx_ring_count = new_rx_count; goto clear_reset; } if (new_tx_count != adapter->tx_ring_count) { tx_ring = vmalloc(array_size(sizeof(*tx_ring), adapter->num_tx_queues + adapter->num_xdp_queues)); if (!tx_ring) { err = -ENOMEM; goto clear_reset; } for (i = 0; i < adapter->num_tx_queues; i++) { /* clone ring and setup updated count */ tx_ring[i] = *adapter->tx_ring[i]; tx_ring[i].count = new_tx_count; err = ixgbevf_setup_tx_resources(&tx_ring[i]); if (err) { while (i) { i--; ixgbevf_free_tx_resources(&tx_ring[i]); } vfree(tx_ring); tx_ring = NULL; goto clear_reset; } } for (j = 0; j < adapter->num_xdp_queues; i++, j++) { /* clone ring and setup updated count */ tx_ring[i] = *adapter->xdp_ring[j]; tx_ring[i].count = new_tx_count; err = ixgbevf_setup_tx_resources(&tx_ring[i]); if (err) { while (i) { i--; ixgbevf_free_tx_resources(&tx_ring[i]); } vfree(tx_ring); tx_ring = NULL; goto clear_reset; } } } if (new_rx_count != adapter->rx_ring_count) { rx_ring = vmalloc(array_size(sizeof(*rx_ring), adapter->num_rx_queues)); if (!rx_ring) { err = -ENOMEM; goto clear_reset; } for (i = 0; i < adapter->num_rx_queues; i++) { /* clone ring and setup updated count */ rx_ring[i] = *adapter->rx_ring[i]; /* Clear copied XDP RX-queue info */ memset(&rx_ring[i].xdp_rxq, 0, sizeof(rx_ring[i].xdp_rxq)); rx_ring[i].count = new_rx_count; err = ixgbevf_setup_rx_resources(adapter, &rx_ring[i]); if (err) { while (i) { i--; ixgbevf_free_rx_resources(&rx_ring[i]); } vfree(rx_ring); rx_ring = NULL; goto clear_reset; } } } /* bring interface down to prepare for update */ ixgbevf_down(adapter); /* Tx */ if (tx_ring) { for (i = 0; i < adapter->num_tx_queues; i++) { ixgbevf_free_tx_resources(adapter->tx_ring[i]); *adapter->tx_ring[i] = tx_ring[i]; } adapter->tx_ring_count = new_tx_count; for (j = 0; j < adapter->num_xdp_queues; i++, j++) { ixgbevf_free_tx_resources(adapter->xdp_ring[j]); *adapter->xdp_ring[j] = tx_ring[i]; } adapter->xdp_ring_count = new_tx_count; vfree(tx_ring); tx_ring = NULL; } /* Rx */ if (rx_ring) { for (i = 0; i < adapter->num_rx_queues; i++) { ixgbevf_free_rx_resources(adapter->rx_ring[i]); *adapter->rx_ring[i] = rx_ring[i]; } adapter->rx_ring_count = new_rx_count; vfree(rx_ring); rx_ring = NULL; } /* restore interface using new values */ ixgbevf_up(adapter); clear_reset: /* free Tx resources if Rx error is encountered */ if (tx_ring) { for (i = 0; i < adapter->num_tx_queues + adapter->num_xdp_queues; i++) ixgbevf_free_tx_resources(&tx_ring[i]); vfree(tx_ring); } clear_bit(__IXGBEVF_RESETTING, &adapter->state); return err; } static int ixgbevf_get_sset_count(struct net_device *netdev, int stringset) { switch (stringset) { case ETH_SS_TEST: return IXGBEVF_TEST_LEN; case ETH_SS_STATS: return IXGBEVF_STATS_LEN; case ETH_SS_PRIV_FLAGS: return IXGBEVF_PRIV_FLAGS_STR_LEN; default: return -EINVAL; } } static void ixgbevf_get_ethtool_stats(struct net_device *netdev, struct ethtool_stats *stats, u64 *data) { struct ixgbevf_adapter *adapter = netdev_priv(netdev); struct rtnl_link_stats64 temp; const struct rtnl_link_stats64 *net_stats; unsigned int start; struct ixgbevf_ring *ring; int i, j; char *p; ixgbevf_update_stats(adapter); net_stats = dev_get_stats(netdev, &temp); for (i = 0; i < IXGBEVF_GLOBAL_STATS_LEN; i++) { switch (ixgbevf_gstrings_stats[i].type) { case NETDEV_STATS: p = (char *)net_stats + ixgbevf_gstrings_stats[i].stat_offset; break; case IXGBEVF_STATS: p = (char *)adapter + ixgbevf_gstrings_stats[i].stat_offset; break; default: data[i] = 0; continue; } data[i] = (ixgbevf_gstrings_stats[i].sizeof_stat == sizeof(u64)) ? *(u64 *)p : *(u32 *)p; } /* populate Tx queue data */ for (j = 0; j < adapter->num_tx_queues; j++) { ring = adapter->tx_ring[j]; if (!ring) { data[i++] = 0; data[i++] = 0; continue; } do { start = u64_stats_fetch_begin(&ring->syncp); data[i] = ring->stats.packets; data[i + 1] = ring->stats.bytes; } while (u64_stats_fetch_retry(&ring->syncp, start)); i += 2; } /* populate XDP queue data */ for (j = 0; j < adapter->num_xdp_queues; j++) { ring = adapter->xdp_ring[j]; if (!ring) { data[i++] = 0; data[i++] = 0; continue; } do { start = u64_stats_fetch_begin(&ring->syncp); data[i] = ring->stats.packets; data[i + 1] = ring->stats.bytes; } while (u64_stats_fetch_retry(&ring->syncp, start)); i += 2; } /* populate Rx queue data */ for (j = 0; j < adapter->num_rx_queues; j++) { ring = adapter->rx_ring[j]; if (!ring) { data[i++] = 0; data[i++] = 0; continue; } do { start = u64_stats_fetch_begin(&ring->syncp); data[i] = ring->stats.packets; data[i + 1] = ring->stats.bytes; } while (u64_stats_fetch_retry(&ring->syncp, start)); i += 2; } } static void ixgbevf_get_strings(struct net_device *netdev, u32 stringset, u8 *data) { struct ixgbevf_adapter *adapter = netdev_priv(netdev); char *p = (char *)data; int i; switch (stringset) { case ETH_SS_TEST: memcpy(data, *ixgbe_gstrings_test, IXGBEVF_TEST_LEN * ETH_GSTRING_LEN); break; case ETH_SS_STATS: for (i = 0; i < IXGBEVF_GLOBAL_STATS_LEN; i++) { memcpy(p, ixgbevf_gstrings_stats[i].stat_string, ETH_GSTRING_LEN); p += ETH_GSTRING_LEN; } for (i = 0; i < adapter->num_tx_queues; i++) { sprintf(p, "tx_queue_%u_packets", i); p += ETH_GSTRING_LEN; sprintf(p, "tx_queue_%u_bytes", i); p += ETH_GSTRING_LEN; } for (i = 0; i < adapter->num_xdp_queues; i++) { sprintf(p, "xdp_queue_%u_packets", i); p += ETH_GSTRING_LEN; sprintf(p, "xdp_queue_%u_bytes", i); p += ETH_GSTRING_LEN; } for (i = 0; i < adapter->num_rx_queues; i++) { sprintf(p, "rx_queue_%u_packets", i); p += ETH_GSTRING_LEN; sprintf(p, "rx_queue_%u_bytes", i); p += ETH_GSTRING_LEN; } break; case ETH_SS_PRIV_FLAGS: memcpy(data, ixgbevf_priv_flags_strings, IXGBEVF_PRIV_FLAGS_STR_LEN * ETH_GSTRING_LEN); break; } } static int ixgbevf_link_test(struct ixgbevf_adapter *adapter, u64 *data) { struct ixgbe_hw *hw = &adapter->hw; bool link_up; u32 link_speed = 0; *data = 0; hw->mac.ops.check_link(hw, &link_speed, &link_up, true); if (!link_up) *data = 1; return *data; } /* ethtool register test data */ struct ixgbevf_reg_test { u16 reg; u8 array_len; u8 test_type; u32 mask; u32 write; }; /* In the hardware, registers are laid out either singly, in arrays * spaced 0x40 bytes apart, or in contiguous tables. We assume * most tests take place on arrays or single registers (handled * as a single-element array) and special-case the tables. * Table tests are always pattern tests. * * We also make provision for some required setup steps by specifying * registers to be written without any read-back testing. */ #define PATTERN_TEST 1 #define SET_READ_TEST 2 #define WRITE_NO_TEST 3 #define TABLE32_TEST 4 #define TABLE64_TEST_LO 5 #define TABLE64_TEST_HI 6 /* default VF register test */ static const struct ixgbevf_reg_test reg_test_vf[] = { { IXGBE_VFRDBAL(0), 2, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFF80 }, { IXGBE_VFRDBAH(0), 2, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, { IXGBE_VFRDLEN(0), 2, PATTERN_TEST, 0x000FFF80, 0x000FFFFF }, { IXGBE_VFRXDCTL(0), 2, WRITE_NO_TEST, 0, IXGBE_RXDCTL_ENABLE }, { IXGBE_VFRDT(0), 2, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, { IXGBE_VFRXDCTL(0), 2, WRITE_NO_TEST, 0, 0 }, { IXGBE_VFTDBAL(0), 2, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF }, { IXGBE_VFTDBAH(0), 2, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, { IXGBE_VFTDLEN(0), 2, PATTERN_TEST, 0x000FFF80, 0x000FFF80 }, { .reg = 0 } }; static const u32 register_test_patterns[] = { 0x5A5A5A5A, 0xA5A5A5A5, 0x00000000, 0xFFFFFFFF }; static bool reg_pattern_test(struct ixgbevf_adapter *adapter, u64 *data, int reg, u32 mask, u32 write) { u32 pat, val, before; if (IXGBE_REMOVED(adapter->hw.hw_addr)) { *data = 1; return true; } for (pat = 0; pat < ARRAY_SIZE(register_test_patterns); pat++) { before = ixgbevf_read_reg(&adapter->hw, reg); ixgbe_write_reg(&adapter->hw, reg, register_test_patterns[pat] & write); val = ixgbevf_read_reg(&adapter->hw, reg); if (val != (register_test_patterns[pat] & write & mask)) { hw_dbg(&adapter->hw, "pattern test reg %04X failed: got 0x%08X expected 0x%08X\n", reg, val, register_test_patterns[pat] & write & mask); *data = reg; ixgbe_write_reg(&adapter->hw, reg, before); return true; } ixgbe_write_reg(&adapter->hw, reg, before); } return false; } static bool reg_set_and_check(struct ixgbevf_adapter *adapter, u64 *data, int reg, u32 mask, u32 write) { u32 val, before; if (IXGBE_REMOVED(adapter->hw.hw_addr)) { *data = 1; return true; } before = ixgbevf_read_reg(&adapter->hw, reg); ixgbe_write_reg(&adapter->hw, reg, write & mask); val = ixgbevf_read_reg(&adapter->hw, reg); if ((write & mask) != (val & mask)) { pr_err("set/check reg %04X test failed: got 0x%08X expected 0x%08X\n", reg, (val & mask), write & mask); *data = reg; ixgbe_write_reg(&adapter->hw, reg, before); return true; } ixgbe_write_reg(&adapter->hw, reg, before); return false; } static int ixgbevf_reg_test(struct ixgbevf_adapter *adapter, u64 *data) { const struct ixgbevf_reg_test *test; u32 i; if (IXGBE_REMOVED(adapter->hw.hw_addr)) { dev_err(&adapter->pdev->dev, "Adapter removed - register test blocked\n"); *data = 1; return 1; } test = reg_test_vf; /* Perform the register test, looping through the test table * until we either fail or reach the null entry. */ while (test->reg) { for (i = 0; i < test->array_len; i++) { bool b = false; switch (test->test_type) { case PATTERN_TEST: b = reg_pattern_test(adapter, data, test->reg + (i * 0x40), test->mask, test->write); break; case SET_READ_TEST: b = reg_set_and_check(adapter, data, test->reg + (i * 0x40), test->mask, test->write); break; case WRITE_NO_TEST: ixgbe_write_reg(&adapter->hw, test->reg + (i * 0x40), test->write); break; case TABLE32_TEST: b = reg_pattern_test(adapter, data, test->reg + (i * 4), test->mask, test->write); break; case TABLE64_TEST_LO: b = reg_pattern_test(adapter, data, test->reg + (i * 8), test->mask, test->write); break; case TABLE64_TEST_HI: b = reg_pattern_test(adapter, data, test->reg + 4 + (i * 8), test->mask, test->write); break; } if (b) return 1; } test++; } *data = 0; return *data; } static void ixgbevf_diag_test(struct net_device *netdev, struct ethtool_test *eth_test, u64 *data) { struct ixgbevf_adapter *adapter = netdev_priv(netdev); bool if_running = netif_running(netdev); if (IXGBE_REMOVED(adapter->hw.hw_addr)) { dev_err(&adapter->pdev->dev, "Adapter removed - test blocked\n"); data[0] = 1; data[1] = 1; eth_test->flags |= ETH_TEST_FL_FAILED; return; } set_bit(__IXGBEVF_TESTING, &adapter->state); if (eth_test->flags == ETH_TEST_FL_OFFLINE) { /* Offline tests */ hw_dbg(&adapter->hw, "offline testing starting\n"); /* Link test performed before hardware reset so autoneg doesn't * interfere with test result */ if (ixgbevf_link_test(adapter, &data[1])) eth_test->flags |= ETH_TEST_FL_FAILED; if (if_running) /* indicate we're in test mode */ ixgbevf_close(netdev); else ixgbevf_reset(adapter); hw_dbg(&adapter->hw, "register testing starting\n"); if (ixgbevf_reg_test(adapter, &data[0])) eth_test->flags |= ETH_TEST_FL_FAILED; ixgbevf_reset(adapter); clear_bit(__IXGBEVF_TESTING, &adapter->state); if (if_running) ixgbevf_open(netdev); } else { hw_dbg(&adapter->hw, "online testing starting\n"); /* Online tests */ if (ixgbevf_link_test(adapter, &data[1])) eth_test->flags |= ETH_TEST_FL_FAILED; /* Online tests aren't run; pass by default */ data[0] = 0; clear_bit(__IXGBEVF_TESTING, &adapter->state); } msleep_interruptible(4 * 1000); } static int ixgbevf_nway_reset(struct net_device *netdev) { struct ixgbevf_adapter *adapter = netdev_priv(netdev); if (netif_running(netdev)) ixgbevf_reinit_locked(adapter); return 0; } static int ixgbevf_get_coalesce(struct net_device *netdev, struct ethtool_coalesce *ec, struct kernel_ethtool_coalesce *kernel_coal, struct netlink_ext_ack *extack) { struct ixgbevf_adapter *adapter = netdev_priv(netdev); /* only valid if in constant ITR mode */ if (adapter->rx_itr_setting <= 1) ec->rx_coalesce_usecs = adapter->rx_itr_setting; else ec->rx_coalesce_usecs = adapter->rx_itr_setting >> 2; /* if in mixed Tx/Rx queues per vector mode, report only Rx settings */ if (adapter->q_vector[0]->tx.count && adapter->q_vector[0]->rx.count) return 0; /* only valid if in constant ITR mode */ if (adapter->tx_itr_setting <= 1) ec->tx_coalesce_usecs = adapter->tx_itr_setting; else ec->tx_coalesce_usecs = adapter->tx_itr_setting >> 2; return 0; } static int ixgbevf_set_coalesce(struct net_device *netdev, struct ethtool_coalesce *ec, struct kernel_ethtool_coalesce *kernel_coal, struct netlink_ext_ack *extack) { struct ixgbevf_adapter *adapter = netdev_priv(netdev); struct ixgbevf_q_vector *q_vector; int num_vectors, i; u16 tx_itr_param, rx_itr_param; /* don't accept Tx specific changes if we've got mixed RxTx vectors */ if (adapter->q_vector[0]->tx.count && adapter->q_vector[0]->rx.count && ec->tx_coalesce_usecs) return -EINVAL; if ((ec->rx_coalesce_usecs > (IXGBE_MAX_EITR >> 2)) || (ec->tx_coalesce_usecs > (IXGBE_MAX_EITR >> 2))) return -EINVAL; if (ec->rx_coalesce_usecs > 1) adapter->rx_itr_setting = ec->rx_coalesce_usecs << 2; else adapter->rx_itr_setting = ec->rx_coalesce_usecs; if (adapter->rx_itr_setting == 1) rx_itr_param = IXGBE_20K_ITR; else rx_itr_param = adapter->rx_itr_setting; if (ec->tx_coalesce_usecs > 1) adapter->tx_itr_setting = ec->tx_coalesce_usecs << 2; else adapter->tx_itr_setting = ec->tx_coalesce_usecs; if (adapter->tx_itr_setting == 1) tx_itr_param = IXGBE_12K_ITR; else tx_itr_param = adapter->tx_itr_setting; num_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; for (i = 0; i < num_vectors; i++) { q_vector = adapter->q_vector[i]; if (q_vector->tx.count && !q_vector->rx.count) /* Tx only */ q_vector->itr = tx_itr_param; else /* Rx only or mixed */ q_vector->itr = rx_itr_param; ixgbevf_write_eitr(q_vector); } return 0; } static int ixgbevf_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info, u32 *rules __always_unused) { struct ixgbevf_adapter *adapter = netdev_priv(dev); switch (info->cmd) { case ETHTOOL_GRXRINGS: info->data = adapter->num_rx_queues; return 0; default: hw_dbg(&adapter->hw, "Command parameters not supported\n"); return -EOPNOTSUPP; } } static u32 ixgbevf_get_rxfh_indir_size(struct net_device *netdev) { struct ixgbevf_adapter *adapter = netdev_priv(netdev); if (adapter->hw.mac.type >= ixgbe_mac_X550_vf) return IXGBEVF_X550_VFRETA_SIZE; return IXGBEVF_82599_RETA_SIZE; } static u32 ixgbevf_get_rxfh_key_size(struct net_device *netdev) { return IXGBEVF_RSS_HASH_KEY_SIZE; } static int ixgbevf_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key, u8 *hfunc) { struct ixgbevf_adapter *adapter = netdev_priv(netdev); int err = 0; if (hfunc) *hfunc = ETH_RSS_HASH_TOP; if (adapter->hw.mac.type >= ixgbe_mac_X550_vf) { if (key) memcpy(key, adapter->rss_key, ixgbevf_get_rxfh_key_size(netdev)); if (indir) { int i; for (i = 0; i < IXGBEVF_X550_VFRETA_SIZE; i++) indir[i] = adapter->rss_indir_tbl[i]; } } else { /* If neither indirection table nor hash key was requested * - just return a success avoiding taking any locks. */ if (!indir && !key) return 0; spin_lock_bh(&adapter->mbx_lock); if (indir) err = ixgbevf_get_reta_locked(&adapter->hw, indir, adapter->num_rx_queues); if (!err && key) err = ixgbevf_get_rss_key_locked(&adapter->hw, key); spin_unlock_bh(&adapter->mbx_lock); } return err; } static u32 ixgbevf_get_priv_flags(struct net_device *netdev) { struct ixgbevf_adapter *adapter = netdev_priv(netdev); u32 priv_flags = 0; if (adapter->flags & IXGBEVF_FLAGS_LEGACY_RX) priv_flags |= IXGBEVF_PRIV_FLAGS_LEGACY_RX; return priv_flags; } static int ixgbevf_set_priv_flags(struct net_device *netdev, u32 priv_flags) { struct ixgbevf_adapter *adapter = netdev_priv(netdev); unsigned int flags = adapter->flags; flags &= ~IXGBEVF_FLAGS_LEGACY_RX; if (priv_flags & IXGBEVF_PRIV_FLAGS_LEGACY_RX) flags |= IXGBEVF_FLAGS_LEGACY_RX; if (flags != adapter->flags) { adapter->flags = flags; /* reset interface to repopulate queues */ if (netif_running(netdev)) ixgbevf_reinit_locked(adapter); } return 0; } static const struct ethtool_ops ixgbevf_ethtool_ops = { .supported_coalesce_params = ETHTOOL_COALESCE_USECS, .get_drvinfo = ixgbevf_get_drvinfo, .get_regs_len = ixgbevf_get_regs_len, .get_regs = ixgbevf_get_regs, .nway_reset = ixgbevf_nway_reset, .get_link = ethtool_op_get_link, .get_ringparam = ixgbevf_get_ringparam, .set_ringparam = ixgbevf_set_ringparam, .get_msglevel = ixgbevf_get_msglevel, .set_msglevel = ixgbevf_set_msglevel, .self_test = ixgbevf_diag_test, .get_sset_count = ixgbevf_get_sset_count, .get_strings = ixgbevf_get_strings, .get_ethtool_stats = ixgbevf_get_ethtool_stats, .get_coalesce = ixgbevf_get_coalesce, .set_coalesce = ixgbevf_set_coalesce, .get_rxnfc = ixgbevf_get_rxnfc, .get_rxfh_indir_size = ixgbevf_get_rxfh_indir_size, .get_rxfh_key_size = ixgbevf_get_rxfh_key_size, .get_rxfh = ixgbevf_get_rxfh, .get_link_ksettings = ixgbevf_get_link_ksettings, .get_priv_flags = ixgbevf_get_priv_flags, .set_priv_flags = ixgbevf_set_priv_flags, }; void ixgbevf_set_ethtool_ops(struct net_device *netdev) { netdev->ethtool_ops = &ixgbevf_ethtool_ops; }
linux-master
drivers/net/ethernet/intel/ixgbevf/ethtool.c
// SPDX-License-Identifier: GPL-2.0 /* Copyright(c) 2018 Oracle and/or its affiliates. All rights reserved. */ #include "ixgbevf.h" #include <net/xfrm.h> #include <crypto/aead.h> #define IXGBE_IPSEC_KEY_BITS 160 static const char aes_gcm_name[] = "rfc4106(gcm(aes))"; /** * ixgbevf_ipsec_set_pf_sa - ask the PF to set up an SA * @adapter: board private structure * @xs: xfrm info to be sent to the PF * * Returns: positive offload handle from the PF, or negative error code **/ static int ixgbevf_ipsec_set_pf_sa(struct ixgbevf_adapter *adapter, struct xfrm_state *xs) { u32 msgbuf[IXGBE_VFMAILBOX_SIZE] = { 0 }; struct ixgbe_hw *hw = &adapter->hw; struct sa_mbx_msg *sam; int ret; /* send the important bits to the PF */ sam = (struct sa_mbx_msg *)(&msgbuf[1]); sam->dir = xs->xso.dir; sam->spi = xs->id.spi; sam->proto = xs->id.proto; sam->family = xs->props.family; if (xs->props.family == AF_INET6) memcpy(sam->addr, &xs->id.daddr.a6, sizeof(xs->id.daddr.a6)); else memcpy(sam->addr, &xs->id.daddr.a4, sizeof(xs->id.daddr.a4)); memcpy(sam->key, xs->aead->alg_key, sizeof(sam->key)); msgbuf[0] = IXGBE_VF_IPSEC_ADD; spin_lock_bh(&adapter->mbx_lock); ret = ixgbevf_write_mbx(hw, msgbuf, IXGBE_VFMAILBOX_SIZE); if (ret) goto out; ret = ixgbevf_poll_mbx(hw, msgbuf, 2); if (ret) goto out; ret = (int)msgbuf[1]; if (msgbuf[0] & IXGBE_VT_MSGTYPE_FAILURE && ret >= 0) ret = -1; out: spin_unlock_bh(&adapter->mbx_lock); return ret; } /** * ixgbevf_ipsec_del_pf_sa - ask the PF to delete an SA * @adapter: board private structure * @pfsa: sa index returned from PF when created, -1 for all * * Returns: 0 on success, or negative error code **/ static int ixgbevf_ipsec_del_pf_sa(struct ixgbevf_adapter *adapter, int pfsa) { struct ixgbe_hw *hw = &adapter->hw; u32 msgbuf[2]; int err; memset(msgbuf, 0, sizeof(msgbuf)); msgbuf[0] = IXGBE_VF_IPSEC_DEL; msgbuf[1] = (u32)pfsa; spin_lock_bh(&adapter->mbx_lock); err = ixgbevf_write_mbx(hw, msgbuf, 2); if (err) goto out; err = ixgbevf_poll_mbx(hw, msgbuf, 2); if (err) goto out; out: spin_unlock_bh(&adapter->mbx_lock); return err; } /** * ixgbevf_ipsec_restore - restore the IPsec HW settings after a reset * @adapter: board private structure * * Reload the HW tables from the SW tables after they've been bashed * by a chip reset. While we're here, make sure any stale VF data is * removed, since we go through reset when num_vfs changes. **/ void ixgbevf_ipsec_restore(struct ixgbevf_adapter *adapter) { struct ixgbevf_ipsec *ipsec = adapter->ipsec; struct net_device *netdev = adapter->netdev; int i; if (!(adapter->netdev->features & NETIF_F_HW_ESP)) return; /* reload the Rx and Tx keys */ for (i = 0; i < IXGBE_IPSEC_MAX_SA_COUNT; i++) { struct rx_sa *r = &ipsec->rx_tbl[i]; struct tx_sa *t = &ipsec->tx_tbl[i]; int ret; if (r->used) { ret = ixgbevf_ipsec_set_pf_sa(adapter, r->xs); if (ret < 0) netdev_err(netdev, "reload rx_tbl[%d] failed = %d\n", i, ret); } if (t->used) { ret = ixgbevf_ipsec_set_pf_sa(adapter, t->xs); if (ret < 0) netdev_err(netdev, "reload tx_tbl[%d] failed = %d\n", i, ret); } } } /** * ixgbevf_ipsec_find_empty_idx - find the first unused security parameter index * @ipsec: pointer to IPsec struct * @rxtable: true if we need to look in the Rx table * * Returns the first unused index in either the Rx or Tx SA table **/ static int ixgbevf_ipsec_find_empty_idx(struct ixgbevf_ipsec *ipsec, bool rxtable) { u32 i; if (rxtable) { if (ipsec->num_rx_sa == IXGBE_IPSEC_MAX_SA_COUNT) return -ENOSPC; /* search rx sa table */ for (i = 0; i < IXGBE_IPSEC_MAX_SA_COUNT; i++) { if (!ipsec->rx_tbl[i].used) return i; } } else { if (ipsec->num_tx_sa == IXGBE_IPSEC_MAX_SA_COUNT) return -ENOSPC; /* search tx sa table */ for (i = 0; i < IXGBE_IPSEC_MAX_SA_COUNT; i++) { if (!ipsec->tx_tbl[i].used) return i; } } return -ENOSPC; } /** * ixgbevf_ipsec_find_rx_state - find the state that matches * @ipsec: pointer to IPsec struct * @daddr: inbound address to match * @proto: protocol to match * @spi: SPI to match * @ip4: true if using an IPv4 address * * Returns a pointer to the matching SA state information **/ static struct xfrm_state *ixgbevf_ipsec_find_rx_state(struct ixgbevf_ipsec *ipsec, __be32 *daddr, u8 proto, __be32 spi, bool ip4) { struct xfrm_state *ret = NULL; struct rx_sa *rsa; rcu_read_lock(); hash_for_each_possible_rcu(ipsec->rx_sa_list, rsa, hlist, (__force u32)spi) { if (spi == rsa->xs->id.spi && ((ip4 && *daddr == rsa->xs->id.daddr.a4) || (!ip4 && !memcmp(daddr, &rsa->xs->id.daddr.a6, sizeof(rsa->xs->id.daddr.a6)))) && proto == rsa->xs->id.proto) { ret = rsa->xs; xfrm_state_hold(ret); break; } } rcu_read_unlock(); return ret; } /** * ixgbevf_ipsec_parse_proto_keys - find the key and salt based on the protocol * @xs: pointer to xfrm_state struct * @mykey: pointer to key array to populate * @mysalt: pointer to salt value to populate * * This copies the protocol keys and salt to our own data tables. The * 82599 family only supports the one algorithm. **/ static int ixgbevf_ipsec_parse_proto_keys(struct xfrm_state *xs, u32 *mykey, u32 *mysalt) { struct net_device *dev = xs->xso.real_dev; unsigned char *key_data; char *alg_name = NULL; int key_len; if (!xs->aead) { netdev_err(dev, "Unsupported IPsec algorithm\n"); return -EINVAL; } if (xs->aead->alg_icv_len != IXGBE_IPSEC_AUTH_BITS) { netdev_err(dev, "IPsec offload requires %d bit authentication\n", IXGBE_IPSEC_AUTH_BITS); return -EINVAL; } key_data = &xs->aead->alg_key[0]; key_len = xs->aead->alg_key_len; alg_name = xs->aead->alg_name; if (strcmp(alg_name, aes_gcm_name)) { netdev_err(dev, "Unsupported IPsec algorithm - please use %s\n", aes_gcm_name); return -EINVAL; } /* The key bytes come down in a big endian array of bytes, so * we don't need to do any byte swapping. * 160 accounts for 16 byte key and 4 byte salt */ if (key_len > IXGBE_IPSEC_KEY_BITS) { *mysalt = ((u32 *)key_data)[4]; } else if (key_len == IXGBE_IPSEC_KEY_BITS) { *mysalt = 0; } else { netdev_err(dev, "IPsec hw offload only supports keys up to 128 bits with a 32 bit salt\n"); return -EINVAL; } memcpy(mykey, key_data, 16); return 0; } /** * ixgbevf_ipsec_add_sa - program device with a security association * @xs: pointer to transformer state struct * @extack: extack point to fill failure reason **/ static int ixgbevf_ipsec_add_sa(struct xfrm_state *xs, struct netlink_ext_ack *extack) { struct net_device *dev = xs->xso.real_dev; struct ixgbevf_adapter *adapter; struct ixgbevf_ipsec *ipsec; u16 sa_idx; int ret; adapter = netdev_priv(dev); ipsec = adapter->ipsec; if (xs->id.proto != IPPROTO_ESP && xs->id.proto != IPPROTO_AH) { NL_SET_ERR_MSG_MOD(extack, "Unsupported protocol for IPsec offload"); return -EINVAL; } if (xs->props.mode != XFRM_MODE_TRANSPORT) { NL_SET_ERR_MSG_MOD(extack, "Unsupported mode for ipsec offload"); return -EINVAL; } if (xs->xso.type != XFRM_DEV_OFFLOAD_CRYPTO) { NL_SET_ERR_MSG_MOD(extack, "Unsupported ipsec offload type"); return -EINVAL; } if (xs->xso.dir == XFRM_DEV_OFFLOAD_IN) { struct rx_sa rsa; if (xs->calg) { NL_SET_ERR_MSG_MOD(extack, "Compression offload not supported"); return -EINVAL; } /* find the first unused index */ ret = ixgbevf_ipsec_find_empty_idx(ipsec, true); if (ret < 0) { NL_SET_ERR_MSG_MOD(extack, "No space for SA in Rx table!"); return ret; } sa_idx = (u16)ret; memset(&rsa, 0, sizeof(rsa)); rsa.used = true; rsa.xs = xs; if (rsa.xs->id.proto & IPPROTO_ESP) rsa.decrypt = xs->ealg || xs->aead; /* get the key and salt */ ret = ixgbevf_ipsec_parse_proto_keys(xs, rsa.key, &rsa.salt); if (ret) { NL_SET_ERR_MSG_MOD(extack, "Failed to get key data for Rx SA table"); return ret; } /* get ip for rx sa table */ if (xs->props.family == AF_INET6) memcpy(rsa.ipaddr, &xs->id.daddr.a6, 16); else memcpy(&rsa.ipaddr[3], &xs->id.daddr.a4, 4); rsa.mode = IXGBE_RXMOD_VALID; if (rsa.xs->id.proto & IPPROTO_ESP) rsa.mode |= IXGBE_RXMOD_PROTO_ESP; if (rsa.decrypt) rsa.mode |= IXGBE_RXMOD_DECRYPT; if (rsa.xs->props.family == AF_INET6) rsa.mode |= IXGBE_RXMOD_IPV6; ret = ixgbevf_ipsec_set_pf_sa(adapter, xs); if (ret < 0) return ret; rsa.pfsa = ret; /* the preparations worked, so save the info */ memcpy(&ipsec->rx_tbl[sa_idx], &rsa, sizeof(rsa)); xs->xso.offload_handle = sa_idx + IXGBE_IPSEC_BASE_RX_INDEX; ipsec->num_rx_sa++; /* hash the new entry for faster search in Rx path */ hash_add_rcu(ipsec->rx_sa_list, &ipsec->rx_tbl[sa_idx].hlist, (__force u32)rsa.xs->id.spi); } else { struct tx_sa tsa; /* find the first unused index */ ret = ixgbevf_ipsec_find_empty_idx(ipsec, false); if (ret < 0) { NL_SET_ERR_MSG_MOD(extack, "No space for SA in Tx table"); return ret; } sa_idx = (u16)ret; memset(&tsa, 0, sizeof(tsa)); tsa.used = true; tsa.xs = xs; if (xs->id.proto & IPPROTO_ESP) tsa.encrypt = xs->ealg || xs->aead; ret = ixgbevf_ipsec_parse_proto_keys(xs, tsa.key, &tsa.salt); if (ret) { NL_SET_ERR_MSG_MOD(extack, "Failed to get key data for Tx SA table"); memset(&tsa, 0, sizeof(tsa)); return ret; } ret = ixgbevf_ipsec_set_pf_sa(adapter, xs); if (ret < 0) return ret; tsa.pfsa = ret; /* the preparations worked, so save the info */ memcpy(&ipsec->tx_tbl[sa_idx], &tsa, sizeof(tsa)); xs->xso.offload_handle = sa_idx + IXGBE_IPSEC_BASE_TX_INDEX; ipsec->num_tx_sa++; } return 0; } /** * ixgbevf_ipsec_del_sa - clear out this specific SA * @xs: pointer to transformer state struct **/ static void ixgbevf_ipsec_del_sa(struct xfrm_state *xs) { struct net_device *dev = xs->xso.real_dev; struct ixgbevf_adapter *adapter; struct ixgbevf_ipsec *ipsec; u16 sa_idx; adapter = netdev_priv(dev); ipsec = adapter->ipsec; if (xs->xso.dir == XFRM_DEV_OFFLOAD_IN) { sa_idx = xs->xso.offload_handle - IXGBE_IPSEC_BASE_RX_INDEX; if (!ipsec->rx_tbl[sa_idx].used) { netdev_err(dev, "Invalid Rx SA selected sa_idx=%d offload_handle=%lu\n", sa_idx, xs->xso.offload_handle); return; } ixgbevf_ipsec_del_pf_sa(adapter, ipsec->rx_tbl[sa_idx].pfsa); hash_del_rcu(&ipsec->rx_tbl[sa_idx].hlist); memset(&ipsec->rx_tbl[sa_idx], 0, sizeof(struct rx_sa)); ipsec->num_rx_sa--; } else { sa_idx = xs->xso.offload_handle - IXGBE_IPSEC_BASE_TX_INDEX; if (!ipsec->tx_tbl[sa_idx].used) { netdev_err(dev, "Invalid Tx SA selected sa_idx=%d offload_handle=%lu\n", sa_idx, xs->xso.offload_handle); return; } ixgbevf_ipsec_del_pf_sa(adapter, ipsec->tx_tbl[sa_idx].pfsa); memset(&ipsec->tx_tbl[sa_idx], 0, sizeof(struct tx_sa)); ipsec->num_tx_sa--; } } /** * ixgbevf_ipsec_offload_ok - can this packet use the xfrm hw offload * @skb: current data packet * @xs: pointer to transformer state struct **/ static bool ixgbevf_ipsec_offload_ok(struct sk_buff *skb, struct xfrm_state *xs) { if (xs->props.family == AF_INET) { /* Offload with IPv4 options is not supported yet */ if (ip_hdr(skb)->ihl != 5) return false; } else { /* Offload with IPv6 extension headers is not support yet */ if (ipv6_ext_hdr(ipv6_hdr(skb)->nexthdr)) return false; } return true; } static const struct xfrmdev_ops ixgbevf_xfrmdev_ops = { .xdo_dev_state_add = ixgbevf_ipsec_add_sa, .xdo_dev_state_delete = ixgbevf_ipsec_del_sa, .xdo_dev_offload_ok = ixgbevf_ipsec_offload_ok, }; /** * ixgbevf_ipsec_tx - setup Tx flags for IPsec offload * @tx_ring: outgoing context * @first: current data packet * @itd: ipsec Tx data for later use in building context descriptor **/ int ixgbevf_ipsec_tx(struct ixgbevf_ring *tx_ring, struct ixgbevf_tx_buffer *first, struct ixgbevf_ipsec_tx_data *itd) { struct ixgbevf_adapter *adapter = netdev_priv(tx_ring->netdev); struct ixgbevf_ipsec *ipsec = adapter->ipsec; struct xfrm_state *xs; struct sec_path *sp; struct tx_sa *tsa; u16 sa_idx; sp = skb_sec_path(first->skb); if (unlikely(!sp->len)) { netdev_err(tx_ring->netdev, "%s: no xfrm state len = %d\n", __func__, sp->len); return 0; } xs = xfrm_input_state(first->skb); if (unlikely(!xs)) { netdev_err(tx_ring->netdev, "%s: no xfrm_input_state() xs = %p\n", __func__, xs); return 0; } sa_idx = xs->xso.offload_handle - IXGBE_IPSEC_BASE_TX_INDEX; if (unlikely(sa_idx >= IXGBE_IPSEC_MAX_SA_COUNT)) { netdev_err(tx_ring->netdev, "%s: bad sa_idx=%d handle=%lu\n", __func__, sa_idx, xs->xso.offload_handle); return 0; } tsa = &ipsec->tx_tbl[sa_idx]; if (unlikely(!tsa->used)) { netdev_err(tx_ring->netdev, "%s: unused sa_idx=%d\n", __func__, sa_idx); return 0; } itd->pfsa = tsa->pfsa - IXGBE_IPSEC_BASE_TX_INDEX; first->tx_flags |= IXGBE_TX_FLAGS_IPSEC | IXGBE_TX_FLAGS_CSUM; if (xs->id.proto == IPPROTO_ESP) { itd->flags |= IXGBE_ADVTXD_TUCMD_IPSEC_TYPE_ESP | IXGBE_ADVTXD_TUCMD_L4T_TCP; if (first->protocol == htons(ETH_P_IP)) itd->flags |= IXGBE_ADVTXD_TUCMD_IPV4; /* The actual trailer length is authlen (16 bytes) plus * 2 bytes for the proto and the padlen values, plus * padlen bytes of padding. This ends up not the same * as the static value found in xs->props.trailer_len (21). * * ... but if we're doing GSO, don't bother as the stack * doesn't add a trailer for those. */ if (!skb_is_gso(first->skb)) { /* The "correct" way to get the auth length would be * to use * authlen = crypto_aead_authsize(xs->data); * but since we know we only have one size to worry * about * we can let the compiler use the constant * and save us a few CPU cycles. */ const int authlen = IXGBE_IPSEC_AUTH_BITS / 8; struct sk_buff *skb = first->skb; u8 padlen; int ret; ret = skb_copy_bits(skb, skb->len - (authlen + 2), &padlen, 1); if (unlikely(ret)) return 0; itd->trailer_len = authlen + 2 + padlen; } } if (tsa->encrypt) itd->flags |= IXGBE_ADVTXD_TUCMD_IPSEC_ENCRYPT_EN; return 1; } /** * ixgbevf_ipsec_rx - decode IPsec bits from Rx descriptor * @rx_ring: receiving ring * @rx_desc: receive data descriptor * @skb: current data packet * * Determine if there was an IPsec encapsulation noticed, and if so set up * the resulting status for later in the receive stack. **/ void ixgbevf_ipsec_rx(struct ixgbevf_ring *rx_ring, union ixgbe_adv_rx_desc *rx_desc, struct sk_buff *skb) { struct ixgbevf_adapter *adapter = netdev_priv(rx_ring->netdev); __le16 pkt_info = rx_desc->wb.lower.lo_dword.hs_rss.pkt_info; __le16 ipsec_pkt_types = cpu_to_le16(IXGBE_RXDADV_PKTTYPE_IPSEC_AH | IXGBE_RXDADV_PKTTYPE_IPSEC_ESP); struct ixgbevf_ipsec *ipsec = adapter->ipsec; struct xfrm_offload *xo = NULL; struct xfrm_state *xs = NULL; struct ipv6hdr *ip6 = NULL; struct iphdr *ip4 = NULL; struct sec_path *sp; void *daddr; __be32 spi; u8 *c_hdr; u8 proto; /* Find the IP and crypto headers in the data. * We can assume no VLAN header in the way, b/c the * hw won't recognize the IPsec packet and anyway the * currently VLAN device doesn't support xfrm offload. */ if (pkt_info & cpu_to_le16(IXGBE_RXDADV_PKTTYPE_IPV4)) { ip4 = (struct iphdr *)(skb->data + ETH_HLEN); daddr = &ip4->daddr; c_hdr = (u8 *)ip4 + ip4->ihl * 4; } else if (pkt_info & cpu_to_le16(IXGBE_RXDADV_PKTTYPE_IPV6)) { ip6 = (struct ipv6hdr *)(skb->data + ETH_HLEN); daddr = &ip6->daddr; c_hdr = (u8 *)ip6 + sizeof(struct ipv6hdr); } else { return; } switch (pkt_info & ipsec_pkt_types) { case cpu_to_le16(IXGBE_RXDADV_PKTTYPE_IPSEC_AH): spi = ((struct ip_auth_hdr *)c_hdr)->spi; proto = IPPROTO_AH; break; case cpu_to_le16(IXGBE_RXDADV_PKTTYPE_IPSEC_ESP): spi = ((struct ip_esp_hdr *)c_hdr)->spi; proto = IPPROTO_ESP; break; default: return; } xs = ixgbevf_ipsec_find_rx_state(ipsec, daddr, proto, spi, !!ip4); if (unlikely(!xs)) return; sp = secpath_set(skb); if (unlikely(!sp)) return; sp->xvec[sp->len++] = xs; sp->olen++; xo = xfrm_offload(skb); xo->flags = CRYPTO_DONE; xo->status = CRYPTO_SUCCESS; adapter->rx_ipsec++; } /** * ixgbevf_init_ipsec_offload - initialize registers for IPsec operation * @adapter: board private structure **/ void ixgbevf_init_ipsec_offload(struct ixgbevf_adapter *adapter) { struct ixgbevf_ipsec *ipsec; size_t size; switch (adapter->hw.api_version) { case ixgbe_mbox_api_14: case ixgbe_mbox_api_15: break; default: return; } ipsec = kzalloc(sizeof(*ipsec), GFP_KERNEL); if (!ipsec) goto err1; hash_init(ipsec->rx_sa_list); size = sizeof(struct rx_sa) * IXGBE_IPSEC_MAX_SA_COUNT; ipsec->rx_tbl = kzalloc(size, GFP_KERNEL); if (!ipsec->rx_tbl) goto err2; size = sizeof(struct tx_sa) * IXGBE_IPSEC_MAX_SA_COUNT; ipsec->tx_tbl = kzalloc(size, GFP_KERNEL); if (!ipsec->tx_tbl) goto err2; ipsec->num_rx_sa = 0; ipsec->num_tx_sa = 0; adapter->ipsec = ipsec; adapter->netdev->xfrmdev_ops = &ixgbevf_xfrmdev_ops; #define IXGBEVF_ESP_FEATURES (NETIF_F_HW_ESP | \ NETIF_F_HW_ESP_TX_CSUM | \ NETIF_F_GSO_ESP) adapter->netdev->features |= IXGBEVF_ESP_FEATURES; adapter->netdev->hw_enc_features |= IXGBEVF_ESP_FEATURES; return; err2: kfree(ipsec->rx_tbl); kfree(ipsec->tx_tbl); kfree(ipsec); err1: netdev_err(adapter->netdev, "Unable to allocate memory for SA tables"); } /** * ixgbevf_stop_ipsec_offload - tear down the IPsec offload * @adapter: board private structure **/ void ixgbevf_stop_ipsec_offload(struct ixgbevf_adapter *adapter) { struct ixgbevf_ipsec *ipsec = adapter->ipsec; adapter->ipsec = NULL; if (ipsec) { kfree(ipsec->rx_tbl); kfree(ipsec->tx_tbl); kfree(ipsec); } }
linux-master
drivers/net/ethernet/intel/ixgbevf/ipsec.c
// SPDX-License-Identifier: GPL-2.0 /* Copyright(c) 1999 - 2018 Intel Corporation. */ #include "vf.h" #include "ixgbevf.h" /* On Hyper-V, to reset, we need to read from this offset * from the PCI config space. This is the mechanism used on * Hyper-V to support PF/VF communication. */ #define IXGBE_HV_RESET_OFFSET 0x201 static inline s32 ixgbevf_write_msg_read_ack(struct ixgbe_hw *hw, u32 *msg, u32 *retmsg, u16 size) { s32 retval = ixgbevf_write_mbx(hw, msg, size); if (retval) return retval; return ixgbevf_poll_mbx(hw, retmsg, size); } /** * ixgbevf_start_hw_vf - Prepare hardware for Tx/Rx * @hw: pointer to hardware structure * * Starts the hardware by filling the bus info structure and media type, clears * all on chip counters, initializes receive address registers, multicast * table, VLAN filter table, calls routine to set up link and flow control * settings, and leaves transmit and receive units disabled and uninitialized **/ static s32 ixgbevf_start_hw_vf(struct ixgbe_hw *hw) { /* Clear adapter stopped flag */ hw->adapter_stopped = false; return 0; } /** * ixgbevf_init_hw_vf - virtual function hardware initialization * @hw: pointer to hardware structure * * Initialize the hardware by resetting the hardware and then starting * the hardware **/ static s32 ixgbevf_init_hw_vf(struct ixgbe_hw *hw) { s32 status = hw->mac.ops.start_hw(hw); hw->mac.ops.get_mac_addr(hw, hw->mac.addr); return status; } /** * ixgbevf_reset_hw_vf - Performs hardware reset * @hw: pointer to hardware structure * * Resets the hardware by resetting the transmit and receive units, masks and * clears all interrupts. **/ static s32 ixgbevf_reset_hw_vf(struct ixgbe_hw *hw) { struct ixgbe_mbx_info *mbx = &hw->mbx; u32 timeout = IXGBE_VF_INIT_TIMEOUT; u32 msgbuf[IXGBE_VF_PERMADDR_MSG_LEN]; u8 *addr = (u8 *)(&msgbuf[1]); s32 ret_val; /* Call adapter stop to disable tx/rx and clear interrupts */ hw->mac.ops.stop_adapter(hw); /* reset the api version */ hw->api_version = ixgbe_mbox_api_10; hw->mbx.ops.init_params(hw); memcpy(&hw->mbx.ops, &ixgbevf_mbx_ops_legacy, sizeof(struct ixgbe_mbx_operations)); IXGBE_WRITE_REG(hw, IXGBE_VFCTRL, IXGBE_CTRL_RST); IXGBE_WRITE_FLUSH(hw); /* we cannot reset while the RSTI / RSTD bits are asserted */ while (!mbx->ops.check_for_rst(hw) && timeout) { timeout--; udelay(5); } if (!timeout) return IXGBE_ERR_RESET_FAILED; /* mailbox timeout can now become active */ mbx->timeout = IXGBE_VF_MBX_INIT_TIMEOUT; msgbuf[0] = IXGBE_VF_RESET; ixgbevf_write_mbx(hw, msgbuf, 1); mdelay(10); /* set our "perm_addr" based on info provided by PF * also set up the mc_filter_type which is piggy backed * on the mac address in word 3 */ ret_val = ixgbevf_poll_mbx(hw, msgbuf, IXGBE_VF_PERMADDR_MSG_LEN); if (ret_val) return ret_val; /* New versions of the PF may NACK the reset return message * to indicate that no MAC address has yet been assigned for * the VF. */ if (msgbuf[0] != (IXGBE_VF_RESET | IXGBE_VT_MSGTYPE_SUCCESS) && msgbuf[0] != (IXGBE_VF_RESET | IXGBE_VT_MSGTYPE_FAILURE)) return IXGBE_ERR_INVALID_MAC_ADDR; if (msgbuf[0] == (IXGBE_VF_RESET | IXGBE_VT_MSGTYPE_SUCCESS)) ether_addr_copy(hw->mac.perm_addr, addr); hw->mac.mc_filter_type = msgbuf[IXGBE_VF_MC_TYPE_WORD]; return 0; } /** * ixgbevf_hv_reset_hw_vf - reset via Hyper-V * @hw: pointer to private hardware struct * * Hyper-V variant; the VF/PF communication is through the PCI * config space. */ static s32 ixgbevf_hv_reset_hw_vf(struct ixgbe_hw *hw) { #if IS_ENABLED(CONFIG_PCI_MMCONFIG) struct ixgbevf_adapter *adapter = hw->back; int i; for (i = 0; i < 6; i++) pci_read_config_byte(adapter->pdev, (i + IXGBE_HV_RESET_OFFSET), &hw->mac.perm_addr[i]); return 0; #else pr_err("PCI_MMCONFIG needs to be enabled for Hyper-V\n"); return -EOPNOTSUPP; #endif } /** * ixgbevf_stop_hw_vf - Generic stop Tx/Rx units * @hw: pointer to hardware structure * * Sets the adapter_stopped flag within ixgbe_hw struct. Clears interrupts, * disables transmit and receive units. The adapter_stopped flag is used by * the shared code and drivers to determine if the adapter is in a stopped * state and should not touch the hardware. **/ static s32 ixgbevf_stop_hw_vf(struct ixgbe_hw *hw) { u32 number_of_queues; u32 reg_val; u16 i; /* Set the adapter_stopped flag so other driver functions stop touching * the hardware */ hw->adapter_stopped = true; /* Disable the receive unit by stopped each queue */ number_of_queues = hw->mac.max_rx_queues; for (i = 0; i < number_of_queues; i++) { reg_val = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i)); if (reg_val & IXGBE_RXDCTL_ENABLE) { reg_val &= ~IXGBE_RXDCTL_ENABLE; IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(i), reg_val); } } IXGBE_WRITE_FLUSH(hw); /* Clear interrupt mask to stop from interrupts being generated */ IXGBE_WRITE_REG(hw, IXGBE_VTEIMC, IXGBE_VF_IRQ_CLEAR_MASK); /* Clear any pending interrupts */ IXGBE_READ_REG(hw, IXGBE_VTEICR); /* Disable the transmit unit. Each queue must be disabled. */ number_of_queues = hw->mac.max_tx_queues; for (i = 0; i < number_of_queues; i++) { reg_val = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(i)); if (reg_val & IXGBE_TXDCTL_ENABLE) { reg_val &= ~IXGBE_TXDCTL_ENABLE; IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(i), reg_val); } } return 0; } /** * ixgbevf_mta_vector - Determines bit-vector in multicast table to set * @hw: pointer to hardware structure * @mc_addr: the multicast address * * Extracts the 12 bits, from a multicast address, to determine which * bit-vector to set in the multicast table. The hardware uses 12 bits, from * incoming Rx multicast addresses, to determine the bit-vector to check in * the MTA. Which of the 4 combination, of 12-bits, the hardware uses is set * by the MO field of the MCSTCTRL. The MO field is set during initialization * to mc_filter_type. **/ static s32 ixgbevf_mta_vector(struct ixgbe_hw *hw, u8 *mc_addr) { u32 vector = 0; switch (hw->mac.mc_filter_type) { case 0: /* use bits [47:36] of the address */ vector = ((mc_addr[4] >> 4) | (((u16)mc_addr[5]) << 4)); break; case 1: /* use bits [46:35] of the address */ vector = ((mc_addr[4] >> 3) | (((u16)mc_addr[5]) << 5)); break; case 2: /* use bits [45:34] of the address */ vector = ((mc_addr[4] >> 2) | (((u16)mc_addr[5]) << 6)); break; case 3: /* use bits [43:32] of the address */ vector = ((mc_addr[4]) | (((u16)mc_addr[5]) << 8)); break; default: /* Invalid mc_filter_type */ break; } /* vector can only be 12-bits or boundary will be exceeded */ vector &= 0xFFF; return vector; } /** * ixgbevf_get_mac_addr_vf - Read device MAC address * @hw: pointer to the HW structure * @mac_addr: pointer to storage for retrieved MAC address **/ static s32 ixgbevf_get_mac_addr_vf(struct ixgbe_hw *hw, u8 *mac_addr) { ether_addr_copy(mac_addr, hw->mac.perm_addr); return 0; } static s32 ixgbevf_set_uc_addr_vf(struct ixgbe_hw *hw, u32 index, u8 *addr) { u32 msgbuf[3], msgbuf_chk; u8 *msg_addr = (u8 *)(&msgbuf[1]); s32 ret_val; memset(msgbuf, 0, sizeof(msgbuf)); /* If index is one then this is the start of a new list and needs * indication to the PF so it can do it's own list management. * If it is zero then that tells the PF to just clear all of * this VF's macvlans and there is no new list. */ msgbuf[0] |= index << IXGBE_VT_MSGINFO_SHIFT; msgbuf[0] |= IXGBE_VF_SET_MACVLAN; msgbuf_chk = msgbuf[0]; if (addr) ether_addr_copy(msg_addr, addr); ret_val = ixgbevf_write_msg_read_ack(hw, msgbuf, msgbuf, ARRAY_SIZE(msgbuf)); if (!ret_val) { msgbuf[0] &= ~IXGBE_VT_MSGTYPE_CTS; if (msgbuf[0] == (msgbuf_chk | IXGBE_VT_MSGTYPE_FAILURE)) return -ENOMEM; } return ret_val; } static s32 ixgbevf_hv_set_uc_addr_vf(struct ixgbe_hw *hw, u32 index, u8 *addr) { return -EOPNOTSUPP; } /** * ixgbevf_get_reta_locked - get the RSS redirection table (RETA) contents. * @hw: pointer to hardware structure * @reta: buffer to fill with RETA contents. * @num_rx_queues: Number of Rx queues configured for this port * * The "reta" buffer should be big enough to contain 32 registers. * * Returns: 0 on success. * if API doesn't support this operation - (-EOPNOTSUPP). */ int ixgbevf_get_reta_locked(struct ixgbe_hw *hw, u32 *reta, int num_rx_queues) { int err, i, j; u32 msgbuf[IXGBE_VFMAILBOX_SIZE]; u32 *hw_reta = &msgbuf[1]; u32 mask = 0; /* We have to use a mailbox for 82599 and x540 devices only. * For these devices RETA has 128 entries. * Also these VFs support up to 4 RSS queues. Therefore PF will compress * 16 RETA entries in each DWORD giving 2 bits to each entry. */ int dwords = IXGBEVF_82599_RETA_SIZE / 16; /* We support the RSS querying for 82599 and x540 devices only. * Thus return an error if API doesn't support RETA querying or querying * is not supported for this device type. */ switch (hw->api_version) { case ixgbe_mbox_api_15: case ixgbe_mbox_api_14: case ixgbe_mbox_api_13: case ixgbe_mbox_api_12: if (hw->mac.type < ixgbe_mac_X550_vf) break; fallthrough; default: return -EOPNOTSUPP; } msgbuf[0] = IXGBE_VF_GET_RETA; err = ixgbevf_write_mbx(hw, msgbuf, 1); if (err) return err; err = ixgbevf_poll_mbx(hw, msgbuf, dwords + 1); if (err) return err; msgbuf[0] &= ~IXGBE_VT_MSGTYPE_CTS; /* If the operation has been refused by a PF return -EPERM */ if (msgbuf[0] == (IXGBE_VF_GET_RETA | IXGBE_VT_MSGTYPE_FAILURE)) return -EPERM; /* If we didn't get an ACK there must have been * some sort of mailbox error so we should treat it * as such. */ if (msgbuf[0] != (IXGBE_VF_GET_RETA | IXGBE_VT_MSGTYPE_SUCCESS)) return IXGBE_ERR_MBX; /* ixgbevf doesn't support more than 2 queues at the moment */ if (num_rx_queues > 1) mask = 0x1; for (i = 0; i < dwords; i++) for (j = 0; j < 16; j++) reta[i * 16 + j] = (hw_reta[i] >> (2 * j)) & mask; return 0; } /** * ixgbevf_get_rss_key_locked - get the RSS Random Key * @hw: pointer to the HW structure * @rss_key: buffer to fill with RSS Hash Key contents. * * The "rss_key" buffer should be big enough to contain 10 registers. * * Returns: 0 on success. * if API doesn't support this operation - (-EOPNOTSUPP). */ int ixgbevf_get_rss_key_locked(struct ixgbe_hw *hw, u8 *rss_key) { int err; u32 msgbuf[IXGBE_VFMAILBOX_SIZE]; /* We currently support the RSS Random Key retrieval for 82599 and x540 * devices only. * * Thus return an error if API doesn't support RSS Random Key retrieval * or if the operation is not supported for this device type. */ switch (hw->api_version) { case ixgbe_mbox_api_15: case ixgbe_mbox_api_14: case ixgbe_mbox_api_13: case ixgbe_mbox_api_12: if (hw->mac.type < ixgbe_mac_X550_vf) break; fallthrough; default: return -EOPNOTSUPP; } msgbuf[0] = IXGBE_VF_GET_RSS_KEY; err = ixgbevf_write_mbx(hw, msgbuf, 1); if (err) return err; err = ixgbevf_poll_mbx(hw, msgbuf, 11); if (err) return err; msgbuf[0] &= ~IXGBE_VT_MSGTYPE_CTS; /* If the operation has been refused by a PF return -EPERM */ if (msgbuf[0] == (IXGBE_VF_GET_RSS_KEY | IXGBE_VT_MSGTYPE_FAILURE)) return -EPERM; /* If we didn't get an ACK there must have been * some sort of mailbox error so we should treat it * as such. */ if (msgbuf[0] != (IXGBE_VF_GET_RSS_KEY | IXGBE_VT_MSGTYPE_SUCCESS)) return IXGBE_ERR_MBX; memcpy(rss_key, msgbuf + 1, IXGBEVF_RSS_HASH_KEY_SIZE); return 0; } /** * ixgbevf_set_rar_vf - set device MAC address * @hw: pointer to hardware structure * @index: Receive address register to write * @addr: Address to put into receive address register * @vmdq: Unused in this implementation **/ static s32 ixgbevf_set_rar_vf(struct ixgbe_hw *hw, u32 index, u8 *addr, u32 vmdq) { u32 msgbuf[3]; u8 *msg_addr = (u8 *)(&msgbuf[1]); s32 ret_val; memset(msgbuf, 0, sizeof(msgbuf)); msgbuf[0] = IXGBE_VF_SET_MAC_ADDR; ether_addr_copy(msg_addr, addr); ret_val = ixgbevf_write_msg_read_ack(hw, msgbuf, msgbuf, ARRAY_SIZE(msgbuf)); msgbuf[0] &= ~IXGBE_VT_MSGTYPE_CTS; /* if nacked the address was rejected, use "perm_addr" */ if (!ret_val && (msgbuf[0] == (IXGBE_VF_SET_MAC_ADDR | IXGBE_VT_MSGTYPE_FAILURE))) { ixgbevf_get_mac_addr_vf(hw, hw->mac.addr); return IXGBE_ERR_MBX; } return ret_val; } /** * ixgbevf_hv_set_rar_vf - set device MAC address Hyper-V variant * @hw: pointer to hardware structure * @index: Receive address register to write * @addr: Address to put into receive address register * @vmdq: Unused in this implementation * * We don't really allow setting the device MAC address. However, * if the address being set is the permanent MAC address we will * permit that. **/ static s32 ixgbevf_hv_set_rar_vf(struct ixgbe_hw *hw, u32 index, u8 *addr, u32 vmdq) { if (ether_addr_equal(addr, hw->mac.perm_addr)) return 0; return -EOPNOTSUPP; } /** * ixgbevf_update_mc_addr_list_vf - Update Multicast addresses * @hw: pointer to the HW structure * @netdev: pointer to net device structure * * Updates the Multicast Table Array. **/ static s32 ixgbevf_update_mc_addr_list_vf(struct ixgbe_hw *hw, struct net_device *netdev) { struct netdev_hw_addr *ha; u32 msgbuf[IXGBE_VFMAILBOX_SIZE]; u16 *vector_list = (u16 *)&msgbuf[1]; u32 cnt, i; /* Each entry in the list uses 1 16 bit word. We have 30 * 16 bit words available in our HW msg buffer (minus 1 for the * msg type). That's 30 hash values if we pack 'em right. If * there are more than 30 MC addresses to add then punt the * extras for now and then add code to handle more than 30 later. * It would be unusual for a server to request that many multi-cast * addresses except for in large enterprise network environments. */ cnt = netdev_mc_count(netdev); if (cnt > 30) cnt = 30; msgbuf[0] = IXGBE_VF_SET_MULTICAST; msgbuf[0] |= cnt << IXGBE_VT_MSGINFO_SHIFT; i = 0; netdev_for_each_mc_addr(ha, netdev) { if (i == cnt) break; if (is_link_local_ether_addr(ha->addr)) continue; vector_list[i++] = ixgbevf_mta_vector(hw, ha->addr); } return ixgbevf_write_msg_read_ack(hw, msgbuf, msgbuf, IXGBE_VFMAILBOX_SIZE); } /** * ixgbevf_hv_update_mc_addr_list_vf - stub * @hw: unused * @netdev: unused * * Hyper-V variant - just a stub. */ static s32 ixgbevf_hv_update_mc_addr_list_vf(struct ixgbe_hw *hw, struct net_device *netdev) { return -EOPNOTSUPP; } /** * ixgbevf_update_xcast_mode - Update Multicast mode * @hw: pointer to the HW structure * @xcast_mode: new multicast mode * * Updates the Multicast Mode of VF. **/ static s32 ixgbevf_update_xcast_mode(struct ixgbe_hw *hw, int xcast_mode) { u32 msgbuf[2]; s32 err; switch (hw->api_version) { case ixgbe_mbox_api_12: /* promisc introduced in 1.3 version */ if (xcast_mode == IXGBEVF_XCAST_MODE_PROMISC) return -EOPNOTSUPP; fallthrough; case ixgbe_mbox_api_13: case ixgbe_mbox_api_14: case ixgbe_mbox_api_15: break; default: return -EOPNOTSUPP; } msgbuf[0] = IXGBE_VF_UPDATE_XCAST_MODE; msgbuf[1] = xcast_mode; err = ixgbevf_write_msg_read_ack(hw, msgbuf, msgbuf, ARRAY_SIZE(msgbuf)); if (err) return err; msgbuf[0] &= ~IXGBE_VT_MSGTYPE_CTS; if (msgbuf[0] == (IXGBE_VF_UPDATE_XCAST_MODE | IXGBE_VT_MSGTYPE_FAILURE)) return -EPERM; return 0; } /** * ixgbevf_hv_update_xcast_mode - stub * @hw: unused * @xcast_mode: unused * * Hyper-V variant - just a stub. */ static s32 ixgbevf_hv_update_xcast_mode(struct ixgbe_hw *hw, int xcast_mode) { return -EOPNOTSUPP; } /** * ixgbevf_get_link_state_vf - Get VF link state from PF * @hw: pointer to the HW structure * @link_state: link state storage * * Returns state of the operation error or success. */ static s32 ixgbevf_get_link_state_vf(struct ixgbe_hw *hw, bool *link_state) { u32 msgbuf[2]; s32 ret_val; s32 err; msgbuf[0] = IXGBE_VF_GET_LINK_STATE; msgbuf[1] = 0x0; err = ixgbevf_write_msg_read_ack(hw, msgbuf, msgbuf, 2); if (err || (msgbuf[0] & IXGBE_VT_MSGTYPE_FAILURE)) { ret_val = IXGBE_ERR_MBX; } else { ret_val = 0; *link_state = msgbuf[1]; } return ret_val; } /** * ixgbevf_hv_get_link_state_vf - * Hyper-V variant - just a stub. * @hw: unused * @link_state: unused * * Hyper-V variant; there is no mailbox communication. */ static s32 ixgbevf_hv_get_link_state_vf(struct ixgbe_hw *hw, bool *link_state) { return -EOPNOTSUPP; } /** * ixgbevf_set_vfta_vf - Set/Unset VLAN filter table address * @hw: pointer to the HW structure * @vlan: 12 bit VLAN ID * @vind: unused by VF drivers * @vlan_on: if true then set bit, else clear bit **/ static s32 ixgbevf_set_vfta_vf(struct ixgbe_hw *hw, u32 vlan, u32 vind, bool vlan_on) { u32 msgbuf[2]; s32 err; msgbuf[0] = IXGBE_VF_SET_VLAN; msgbuf[1] = vlan; /* Setting the 8 bit field MSG INFO to TRUE indicates "add" */ msgbuf[0] |= vlan_on << IXGBE_VT_MSGINFO_SHIFT; err = ixgbevf_write_msg_read_ack(hw, msgbuf, msgbuf, ARRAY_SIZE(msgbuf)); if (err) goto mbx_err; /* remove extra bits from the message */ msgbuf[0] &= ~IXGBE_VT_MSGTYPE_CTS; msgbuf[0] &= ~(0xFF << IXGBE_VT_MSGINFO_SHIFT); if (msgbuf[0] != (IXGBE_VF_SET_VLAN | IXGBE_VT_MSGTYPE_SUCCESS)) err = IXGBE_ERR_INVALID_ARGUMENT; mbx_err: return err; } /** * ixgbevf_hv_set_vfta_vf - * Hyper-V variant - just a stub. * @hw: unused * @vlan: unused * @vind: unused * @vlan_on: unused */ static s32 ixgbevf_hv_set_vfta_vf(struct ixgbe_hw *hw, u32 vlan, u32 vind, bool vlan_on) { return -EOPNOTSUPP; } /** * ixgbevf_setup_mac_link_vf - Setup MAC link settings * @hw: pointer to hardware structure * @speed: Unused in this implementation * @autoneg: Unused in this implementation * @autoneg_wait_to_complete: Unused in this implementation * * Do nothing and return success. VF drivers are not allowed to change * global settings. Maintained for driver compatibility. **/ static s32 ixgbevf_setup_mac_link_vf(struct ixgbe_hw *hw, ixgbe_link_speed speed, bool autoneg, bool autoneg_wait_to_complete) { return 0; } /** * ixgbevf_check_mac_link_vf - Get link/speed status * @hw: pointer to hardware structure * @speed: pointer to link speed * @link_up: true is link is up, false otherwise * @autoneg_wait_to_complete: unused * * Reads the links register to determine if link is up and the current speed **/ static s32 ixgbevf_check_mac_link_vf(struct ixgbe_hw *hw, ixgbe_link_speed *speed, bool *link_up, bool autoneg_wait_to_complete) { struct ixgbe_mbx_info *mbx = &hw->mbx; struct ixgbe_mac_info *mac = &hw->mac; s32 ret_val = 0; u32 links_reg; u32 in_msg = 0; /* If we were hit with a reset drop the link */ if (!mbx->ops.check_for_rst(hw) || !mbx->timeout) mac->get_link_status = true; if (!mac->get_link_status) goto out; /* if link status is down no point in checking to see if pf is up */ links_reg = IXGBE_READ_REG(hw, IXGBE_VFLINKS); if (!(links_reg & IXGBE_LINKS_UP)) goto out; /* for SFP+ modules and DA cables on 82599 it can take up to 500usecs * before the link status is correct */ if (mac->type == ixgbe_mac_82599_vf) { int i; for (i = 0; i < 5; i++) { udelay(100); links_reg = IXGBE_READ_REG(hw, IXGBE_VFLINKS); if (!(links_reg & IXGBE_LINKS_UP)) goto out; } } switch (links_reg & IXGBE_LINKS_SPEED_82599) { case IXGBE_LINKS_SPEED_10G_82599: *speed = IXGBE_LINK_SPEED_10GB_FULL; break; case IXGBE_LINKS_SPEED_1G_82599: *speed = IXGBE_LINK_SPEED_1GB_FULL; break; case IXGBE_LINKS_SPEED_100_82599: *speed = IXGBE_LINK_SPEED_100_FULL; break; } /* if the read failed it could just be a mailbox collision, best wait * until we are called again and don't report an error */ if (mbx->ops.read(hw, &in_msg, 1)) { if (hw->api_version >= ixgbe_mbox_api_15) mac->get_link_status = false; goto out; } if (!(in_msg & IXGBE_VT_MSGTYPE_CTS)) { /* msg is not CTS and is NACK we must have lost CTS status */ if (in_msg & IXGBE_VT_MSGTYPE_FAILURE) ret_val = -1; goto out; } /* the pf is talking, if we timed out in the past we reinit */ if (!mbx->timeout) { ret_val = -1; goto out; } /* if we passed all the tests above then the link is up and we no * longer need to check for link */ mac->get_link_status = false; out: *link_up = !mac->get_link_status; return ret_val; } /** * ixgbevf_hv_check_mac_link_vf - check link * @hw: pointer to private hardware struct * @speed: pointer to link speed * @link_up: true is link is up, false otherwise * @autoneg_wait_to_complete: unused * * Hyper-V variant; there is no mailbox communication. */ static s32 ixgbevf_hv_check_mac_link_vf(struct ixgbe_hw *hw, ixgbe_link_speed *speed, bool *link_up, bool autoneg_wait_to_complete) { struct ixgbe_mbx_info *mbx = &hw->mbx; struct ixgbe_mac_info *mac = &hw->mac; u32 links_reg; /* If we were hit with a reset drop the link */ if (!mbx->ops.check_for_rst(hw) || !mbx->timeout) mac->get_link_status = true; if (!mac->get_link_status) goto out; /* if link status is down no point in checking to see if pf is up */ links_reg = IXGBE_READ_REG(hw, IXGBE_VFLINKS); if (!(links_reg & IXGBE_LINKS_UP)) goto out; /* for SFP+ modules and DA cables on 82599 it can take up to 500usecs * before the link status is correct */ if (mac->type == ixgbe_mac_82599_vf) { int i; for (i = 0; i < 5; i++) { udelay(100); links_reg = IXGBE_READ_REG(hw, IXGBE_VFLINKS); if (!(links_reg & IXGBE_LINKS_UP)) goto out; } } switch (links_reg & IXGBE_LINKS_SPEED_82599) { case IXGBE_LINKS_SPEED_10G_82599: *speed = IXGBE_LINK_SPEED_10GB_FULL; break; case IXGBE_LINKS_SPEED_1G_82599: *speed = IXGBE_LINK_SPEED_1GB_FULL; break; case IXGBE_LINKS_SPEED_100_82599: *speed = IXGBE_LINK_SPEED_100_FULL; break; } /* if we passed all the tests above then the link is up and we no * longer need to check for link */ mac->get_link_status = false; out: *link_up = !mac->get_link_status; return 0; } /** * ixgbevf_set_rlpml_vf - Set the maximum receive packet length * @hw: pointer to the HW structure * @max_size: value to assign to max frame size **/ static s32 ixgbevf_set_rlpml_vf(struct ixgbe_hw *hw, u16 max_size) { u32 msgbuf[2]; s32 ret_val; msgbuf[0] = IXGBE_VF_SET_LPE; msgbuf[1] = max_size; ret_val = ixgbevf_write_msg_read_ack(hw, msgbuf, msgbuf, ARRAY_SIZE(msgbuf)); if (ret_val) return ret_val; if ((msgbuf[0] & IXGBE_VF_SET_LPE) && (msgbuf[0] & IXGBE_VT_MSGTYPE_FAILURE)) return IXGBE_ERR_MBX; return 0; } /** * ixgbevf_hv_set_rlpml_vf - Set the maximum receive packet length * @hw: pointer to the HW structure * @max_size: value to assign to max frame size * Hyper-V variant. **/ static s32 ixgbevf_hv_set_rlpml_vf(struct ixgbe_hw *hw, u16 max_size) { u32 reg; /* If we are on Hyper-V, we implement this functionality * differently. */ reg = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(0)); /* CRC == 4 */ reg |= ((max_size + 4) | IXGBE_RXDCTL_RLPML_EN); IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(0), reg); return 0; } /** * ixgbevf_negotiate_api_version_vf - Negotiate supported API version * @hw: pointer to the HW structure * @api: integer containing requested API version **/ static int ixgbevf_negotiate_api_version_vf(struct ixgbe_hw *hw, int api) { int err; u32 msg[3]; /* Negotiate the mailbox API version */ msg[0] = IXGBE_VF_API_NEGOTIATE; msg[1] = api; msg[2] = 0; err = ixgbevf_write_msg_read_ack(hw, msg, msg, ARRAY_SIZE(msg)); if (!err) { msg[0] &= ~IXGBE_VT_MSGTYPE_CTS; /* Store value and return 0 on success */ if (msg[0] == (IXGBE_VF_API_NEGOTIATE | IXGBE_VT_MSGTYPE_SUCCESS)) { hw->api_version = api; return 0; } err = IXGBE_ERR_INVALID_ARGUMENT; } return err; } /** * ixgbevf_hv_negotiate_api_version_vf - Negotiate supported API version * @hw: pointer to the HW structure * @api: integer containing requested API version * Hyper-V version - only ixgbe_mbox_api_10 supported. **/ static int ixgbevf_hv_negotiate_api_version_vf(struct ixgbe_hw *hw, int api) { /* Hyper-V only supports api version ixgbe_mbox_api_10 */ if (api != ixgbe_mbox_api_10) return IXGBE_ERR_INVALID_ARGUMENT; return 0; } int ixgbevf_get_queues(struct ixgbe_hw *hw, unsigned int *num_tcs, unsigned int *default_tc) { int err; u32 msg[5]; /* do nothing if API doesn't support ixgbevf_get_queues */ switch (hw->api_version) { case ixgbe_mbox_api_11: case ixgbe_mbox_api_12: case ixgbe_mbox_api_13: case ixgbe_mbox_api_14: case ixgbe_mbox_api_15: break; default: return 0; } /* Fetch queue configuration from the PF */ msg[0] = IXGBE_VF_GET_QUEUE; msg[1] = msg[2] = msg[3] = msg[4] = 0; err = ixgbevf_write_msg_read_ack(hw, msg, msg, ARRAY_SIZE(msg)); if (!err) { msg[0] &= ~IXGBE_VT_MSGTYPE_CTS; /* if we didn't get an ACK there must have been * some sort of mailbox error so we should treat it * as such */ if (msg[0] != (IXGBE_VF_GET_QUEUE | IXGBE_VT_MSGTYPE_SUCCESS)) return IXGBE_ERR_MBX; /* record and validate values from message */ hw->mac.max_tx_queues = msg[IXGBE_VF_TX_QUEUES]; if (hw->mac.max_tx_queues == 0 || hw->mac.max_tx_queues > IXGBE_VF_MAX_TX_QUEUES) hw->mac.max_tx_queues = IXGBE_VF_MAX_TX_QUEUES; hw->mac.max_rx_queues = msg[IXGBE_VF_RX_QUEUES]; if (hw->mac.max_rx_queues == 0 || hw->mac.max_rx_queues > IXGBE_VF_MAX_RX_QUEUES) hw->mac.max_rx_queues = IXGBE_VF_MAX_RX_QUEUES; *num_tcs = msg[IXGBE_VF_TRANS_VLAN]; /* in case of unknown state assume we cannot tag frames */ if (*num_tcs > hw->mac.max_rx_queues) *num_tcs = 1; *default_tc = msg[IXGBE_VF_DEF_QUEUE]; /* default to queue 0 on out-of-bounds queue number */ if (*default_tc >= hw->mac.max_tx_queues) *default_tc = 0; } return err; } static const struct ixgbe_mac_operations ixgbevf_mac_ops = { .init_hw = ixgbevf_init_hw_vf, .reset_hw = ixgbevf_reset_hw_vf, .start_hw = ixgbevf_start_hw_vf, .get_mac_addr = ixgbevf_get_mac_addr_vf, .stop_adapter = ixgbevf_stop_hw_vf, .setup_link = ixgbevf_setup_mac_link_vf, .check_link = ixgbevf_check_mac_link_vf, .negotiate_api_version = ixgbevf_negotiate_api_version_vf, .set_rar = ixgbevf_set_rar_vf, .update_mc_addr_list = ixgbevf_update_mc_addr_list_vf, .update_xcast_mode = ixgbevf_update_xcast_mode, .get_link_state = ixgbevf_get_link_state_vf, .set_uc_addr = ixgbevf_set_uc_addr_vf, .set_vfta = ixgbevf_set_vfta_vf, .set_rlpml = ixgbevf_set_rlpml_vf, }; static const struct ixgbe_mac_operations ixgbevf_hv_mac_ops = { .init_hw = ixgbevf_init_hw_vf, .reset_hw = ixgbevf_hv_reset_hw_vf, .start_hw = ixgbevf_start_hw_vf, .get_mac_addr = ixgbevf_get_mac_addr_vf, .stop_adapter = ixgbevf_stop_hw_vf, .setup_link = ixgbevf_setup_mac_link_vf, .check_link = ixgbevf_hv_check_mac_link_vf, .negotiate_api_version = ixgbevf_hv_negotiate_api_version_vf, .set_rar = ixgbevf_hv_set_rar_vf, .update_mc_addr_list = ixgbevf_hv_update_mc_addr_list_vf, .update_xcast_mode = ixgbevf_hv_update_xcast_mode, .get_link_state = ixgbevf_hv_get_link_state_vf, .set_uc_addr = ixgbevf_hv_set_uc_addr_vf, .set_vfta = ixgbevf_hv_set_vfta_vf, .set_rlpml = ixgbevf_hv_set_rlpml_vf, }; const struct ixgbevf_info ixgbevf_82599_vf_info = { .mac = ixgbe_mac_82599_vf, .mac_ops = &ixgbevf_mac_ops, }; const struct ixgbevf_info ixgbevf_82599_vf_hv_info = { .mac = ixgbe_mac_82599_vf, .mac_ops = &ixgbevf_hv_mac_ops, }; const struct ixgbevf_info ixgbevf_X540_vf_info = { .mac = ixgbe_mac_X540_vf, .mac_ops = &ixgbevf_mac_ops, }; const struct ixgbevf_info ixgbevf_X540_vf_hv_info = { .mac = ixgbe_mac_X540_vf, .mac_ops = &ixgbevf_hv_mac_ops, }; const struct ixgbevf_info ixgbevf_X550_vf_info = { .mac = ixgbe_mac_X550_vf, .mac_ops = &ixgbevf_mac_ops, }; const struct ixgbevf_info ixgbevf_X550_vf_hv_info = { .mac = ixgbe_mac_X550_vf, .mac_ops = &ixgbevf_hv_mac_ops, }; const struct ixgbevf_info ixgbevf_X550EM_x_vf_info = { .mac = ixgbe_mac_X550EM_x_vf, .mac_ops = &ixgbevf_mac_ops, }; const struct ixgbevf_info ixgbevf_X550EM_x_vf_hv_info = { .mac = ixgbe_mac_X550EM_x_vf, .mac_ops = &ixgbevf_hv_mac_ops, }; const struct ixgbevf_info ixgbevf_x550em_a_vf_info = { .mac = ixgbe_mac_x550em_a_vf, .mac_ops = &ixgbevf_mac_ops, };
linux-master
drivers/net/ethernet/intel/ixgbevf/vf.c
// SPDX-License-Identifier: GPL-2.0 /* Copyright(c) 1999 - 2018 Intel Corporation. */ #include "mbx.h" #include "ixgbevf.h" /** * ixgbevf_poll_for_msg - Wait for message notification * @hw: pointer to the HW structure * * returns 0 if it successfully received a message notification **/ static s32 ixgbevf_poll_for_msg(struct ixgbe_hw *hw) { struct ixgbe_mbx_info *mbx = &hw->mbx; int countdown = mbx->timeout; if (!countdown || !mbx->ops.check_for_msg) return IXGBE_ERR_CONFIG; while (countdown && mbx->ops.check_for_msg(hw)) { countdown--; udelay(mbx->udelay); } return countdown ? 0 : IXGBE_ERR_TIMEOUT; } /** * ixgbevf_poll_for_ack - Wait for message acknowledgment * @hw: pointer to the HW structure * * returns 0 if it successfully received a message acknowledgment **/ static s32 ixgbevf_poll_for_ack(struct ixgbe_hw *hw) { struct ixgbe_mbx_info *mbx = &hw->mbx; int countdown = mbx->timeout; if (!countdown || !mbx->ops.check_for_ack) return IXGBE_ERR_CONFIG; while (countdown && mbx->ops.check_for_ack(hw)) { countdown--; udelay(mbx->udelay); } return countdown ? 0 : IXGBE_ERR_TIMEOUT; } /** * ixgbevf_read_mailbox_vf - read VF's mailbox register * @hw: pointer to the HW structure * * This function is used to read the mailbox register dedicated for VF without * losing the read to clear status bits. **/ static u32 ixgbevf_read_mailbox_vf(struct ixgbe_hw *hw) { u32 vf_mailbox = IXGBE_READ_REG(hw, IXGBE_VFMAILBOX); vf_mailbox |= hw->mbx.vf_mailbox; hw->mbx.vf_mailbox |= vf_mailbox & IXGBE_VFMAILBOX_R2C_BITS; return vf_mailbox; } /** * ixgbevf_clear_msg_vf - clear PF status bit * @hw: pointer to the HW structure * * This function is used to clear PFSTS bit in the VFMAILBOX register **/ static void ixgbevf_clear_msg_vf(struct ixgbe_hw *hw) { u32 vf_mailbox = ixgbevf_read_mailbox_vf(hw); if (vf_mailbox & IXGBE_VFMAILBOX_PFSTS) { hw->mbx.stats.reqs++; hw->mbx.vf_mailbox &= ~IXGBE_VFMAILBOX_PFSTS; } } /** * ixgbevf_clear_ack_vf - clear PF ACK bit * @hw: pointer to the HW structure * * This function is used to clear PFACK bit in the VFMAILBOX register **/ static void ixgbevf_clear_ack_vf(struct ixgbe_hw *hw) { u32 vf_mailbox = ixgbevf_read_mailbox_vf(hw); if (vf_mailbox & IXGBE_VFMAILBOX_PFACK) { hw->mbx.stats.acks++; hw->mbx.vf_mailbox &= ~IXGBE_VFMAILBOX_PFACK; } } /** * ixgbevf_clear_rst_vf - clear PF reset bit * @hw: pointer to the HW structure * * This function is used to clear reset indication and reset done bit in * VFMAILBOX register after reset the shared resources and the reset sequence. **/ static void ixgbevf_clear_rst_vf(struct ixgbe_hw *hw) { u32 vf_mailbox = ixgbevf_read_mailbox_vf(hw); if (vf_mailbox & (IXGBE_VFMAILBOX_RSTI | IXGBE_VFMAILBOX_RSTD)) { hw->mbx.stats.rsts++; hw->mbx.vf_mailbox &= ~(IXGBE_VFMAILBOX_RSTI | IXGBE_VFMAILBOX_RSTD); } } /** * ixgbevf_check_for_bit_vf - Determine if a status bit was set * @hw: pointer to the HW structure * @mask: bitmask for bits to be tested and cleared * * This function is used to check for the read to clear bits within * the V2P mailbox. **/ static s32 ixgbevf_check_for_bit_vf(struct ixgbe_hw *hw, u32 mask) { u32 vf_mailbox = ixgbevf_read_mailbox_vf(hw); s32 ret_val = IXGBE_ERR_MBX; if (vf_mailbox & mask) ret_val = 0; return ret_val; } /** * ixgbevf_check_for_msg_vf - checks to see if the PF has sent mail * @hw: pointer to the HW structure * * returns 0 if the PF has set the Status bit or else ERR_MBX **/ static s32 ixgbevf_check_for_msg_vf(struct ixgbe_hw *hw) { s32 ret_val = IXGBE_ERR_MBX; if (!ixgbevf_check_for_bit_vf(hw, IXGBE_VFMAILBOX_PFSTS)) { ret_val = 0; hw->mbx.stats.reqs++; } return ret_val; } /** * ixgbevf_check_for_ack_vf - checks to see if the PF has ACK'd * @hw: pointer to the HW structure * * returns 0 if the PF has set the ACK bit or else ERR_MBX **/ static s32 ixgbevf_check_for_ack_vf(struct ixgbe_hw *hw) { s32 ret_val = IXGBE_ERR_MBX; if (!ixgbevf_check_for_bit_vf(hw, IXGBE_VFMAILBOX_PFACK)) { ret_val = 0; ixgbevf_clear_ack_vf(hw); hw->mbx.stats.acks++; } return ret_val; } /** * ixgbevf_check_for_rst_vf - checks to see if the PF has reset * @hw: pointer to the HW structure * * returns true if the PF has set the reset done bit or else false **/ static s32 ixgbevf_check_for_rst_vf(struct ixgbe_hw *hw) { s32 ret_val = IXGBE_ERR_MBX; if (!ixgbevf_check_for_bit_vf(hw, (IXGBE_VFMAILBOX_RSTD | IXGBE_VFMAILBOX_RSTI))) { ret_val = 0; ixgbevf_clear_rst_vf(hw); hw->mbx.stats.rsts++; } return ret_val; } /** * ixgbevf_obtain_mbx_lock_vf - obtain mailbox lock * @hw: pointer to the HW structure * * return 0 if we obtained the mailbox lock **/ static s32 ixgbevf_obtain_mbx_lock_vf(struct ixgbe_hw *hw) { struct ixgbe_mbx_info *mbx = &hw->mbx; s32 ret_val = IXGBE_ERR_CONFIG; int countdown = mbx->timeout; u32 vf_mailbox; if (!mbx->timeout) return ret_val; while (countdown--) { /* Reserve mailbox for VF use */ vf_mailbox = ixgbevf_read_mailbox_vf(hw); vf_mailbox |= IXGBE_VFMAILBOX_VFU; IXGBE_WRITE_REG(hw, IXGBE_VFMAILBOX, vf_mailbox); /* Verify that VF is the owner of the lock */ if (ixgbevf_read_mailbox_vf(hw) & IXGBE_VFMAILBOX_VFU) { ret_val = 0; break; } /* Wait a bit before trying again */ udelay(mbx->udelay); } if (ret_val) ret_val = IXGBE_ERR_TIMEOUT; return ret_val; } /** * ixgbevf_release_mbx_lock_vf - release mailbox lock * @hw: pointer to the HW structure **/ static void ixgbevf_release_mbx_lock_vf(struct ixgbe_hw *hw) { u32 vf_mailbox; /* Return ownership of the buffer */ vf_mailbox = ixgbevf_read_mailbox_vf(hw); vf_mailbox &= ~IXGBE_VFMAILBOX_VFU; IXGBE_WRITE_REG(hw, IXGBE_VFMAILBOX, vf_mailbox); } /** * ixgbevf_release_mbx_lock_vf_legacy - release mailbox lock * @hw: pointer to the HW structure **/ static void ixgbevf_release_mbx_lock_vf_legacy(struct ixgbe_hw *__always_unused hw) { } /** * ixgbevf_write_mbx_vf - Write a message to the mailbox * @hw: pointer to the HW structure * @msg: The message buffer * @size: Length of buffer * * returns 0 if it successfully copied message into the buffer **/ static s32 ixgbevf_write_mbx_vf(struct ixgbe_hw *hw, u32 *msg, u16 size) { u32 vf_mailbox; s32 ret_val; u16 i; /* lock the mailbox to prevent PF/VF race condition */ ret_val = ixgbevf_obtain_mbx_lock_vf(hw); if (ret_val) goto out_no_write; /* flush msg and acks as we are overwriting the message buffer */ ixgbevf_clear_msg_vf(hw); ixgbevf_clear_ack_vf(hw); /* copy the caller specified message to the mailbox memory buffer */ for (i = 0; i < size; i++) IXGBE_WRITE_REG_ARRAY(hw, IXGBE_VFMBMEM, i, msg[i]); /* update stats */ hw->mbx.stats.msgs_tx++; /* interrupt the PF to tell it a message has been sent */ vf_mailbox = ixgbevf_read_mailbox_vf(hw); vf_mailbox |= IXGBE_VFMAILBOX_REQ; IXGBE_WRITE_REG(hw, IXGBE_VFMAILBOX, vf_mailbox); /* if msg sent wait until we receive an ack */ ret_val = ixgbevf_poll_for_ack(hw); out_no_write: hw->mbx.ops.release(hw); return ret_val; } /** * ixgbevf_write_mbx_vf_legacy - Write a message to the mailbox * @hw: pointer to the HW structure * @msg: The message buffer * @size: Length of buffer * * returns 0 if it successfully copied message into the buffer **/ static s32 ixgbevf_write_mbx_vf_legacy(struct ixgbe_hw *hw, u32 *msg, u16 size) { s32 ret_val; u16 i; /* lock the mailbox to prevent PF/VF race condition */ ret_val = ixgbevf_obtain_mbx_lock_vf(hw); if (ret_val) goto out_no_write; /* flush msg and acks as we are overwriting the message buffer */ ixgbevf_check_for_msg_vf(hw); ixgbevf_clear_msg_vf(hw); ixgbevf_check_for_ack_vf(hw); ixgbevf_clear_ack_vf(hw); /* copy the caller specified message to the mailbox memory buffer */ for (i = 0; i < size; i++) IXGBE_WRITE_REG_ARRAY(hw, IXGBE_VFMBMEM, i, msg[i]); /* update stats */ hw->mbx.stats.msgs_tx++; /* Drop VFU and interrupt the PF to tell it a message has been sent */ IXGBE_WRITE_REG(hw, IXGBE_VFMAILBOX, IXGBE_VFMAILBOX_REQ); out_no_write: return ret_val; } /** * ixgbevf_read_mbx_vf - Reads a message from the inbox intended for VF * @hw: pointer to the HW structure * @msg: The message buffer * @size: Length of buffer * * returns 0 if it successfully read message from buffer **/ static s32 ixgbevf_read_mbx_vf(struct ixgbe_hw *hw, u32 *msg, u16 size) { u32 vf_mailbox; s32 ret_val; u16 i; /* check if there is a message from PF */ ret_val = ixgbevf_check_for_msg_vf(hw); if (ret_val) return ret_val; ixgbevf_clear_msg_vf(hw); /* copy the message from the mailbox memory buffer */ for (i = 0; i < size; i++) msg[i] = IXGBE_READ_REG_ARRAY(hw, IXGBE_VFMBMEM, i); /* Acknowledge receipt */ vf_mailbox = ixgbevf_read_mailbox_vf(hw); vf_mailbox |= IXGBE_VFMAILBOX_ACK; IXGBE_WRITE_REG(hw, IXGBE_VFMAILBOX, vf_mailbox); /* update stats */ hw->mbx.stats.msgs_rx++; return ret_val; } /** * ixgbevf_read_mbx_vf_legacy - Reads a message from the inbox intended for VF * @hw: pointer to the HW structure * @msg: The message buffer * @size: Length of buffer * * returns 0 if it successfully read message from buffer **/ static s32 ixgbevf_read_mbx_vf_legacy(struct ixgbe_hw *hw, u32 *msg, u16 size) { s32 ret_val = 0; u16 i; /* lock the mailbox to prevent PF/VF race condition */ ret_val = ixgbevf_obtain_mbx_lock_vf(hw); if (ret_val) goto out_no_read; /* copy the message from the mailbox memory buffer */ for (i = 0; i < size; i++) msg[i] = IXGBE_READ_REG_ARRAY(hw, IXGBE_VFMBMEM, i); /* Acknowledge receipt and release mailbox, then we're done */ IXGBE_WRITE_REG(hw, IXGBE_VFMAILBOX, IXGBE_VFMAILBOX_ACK); /* update stats */ hw->mbx.stats.msgs_rx++; out_no_read: return ret_val; } /** * ixgbevf_init_mbx_params_vf - set initial values for VF mailbox * @hw: pointer to the HW structure * * Initializes the hw->mbx struct to correct values for VF mailbox */ static s32 ixgbevf_init_mbx_params_vf(struct ixgbe_hw *hw) { struct ixgbe_mbx_info *mbx = &hw->mbx; /* start mailbox as timed out and let the reset_hw call set the timeout * value to begin communications */ mbx->timeout = IXGBE_VF_MBX_INIT_TIMEOUT; mbx->udelay = IXGBE_VF_MBX_INIT_DELAY; mbx->size = IXGBE_VFMAILBOX_SIZE; mbx->stats.msgs_tx = 0; mbx->stats.msgs_rx = 0; mbx->stats.reqs = 0; mbx->stats.acks = 0; mbx->stats.rsts = 0; return 0; } /** * ixgbevf_poll_mbx - Wait for message and read it from the mailbox * @hw: pointer to the HW structure * @msg: The message buffer * @size: Length of buffer * * returns 0 if it successfully read message from buffer **/ s32 ixgbevf_poll_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size) { struct ixgbe_mbx_info *mbx = &hw->mbx; s32 ret_val = IXGBE_ERR_CONFIG; if (!mbx->ops.read || !mbx->ops.check_for_msg || !mbx->timeout) return ret_val; /* limit read to size of mailbox */ if (size > mbx->size) size = mbx->size; ret_val = ixgbevf_poll_for_msg(hw); /* if ack received read message, otherwise we timed out */ if (!ret_val) ret_val = mbx->ops.read(hw, msg, size); return ret_val; } /** * ixgbevf_write_mbx - Write a message to the mailbox and wait for ACK * @hw: pointer to the HW structure * @msg: The message buffer * @size: Length of buffer * * returns 0 if it successfully copied message into the buffer and * received an ACK to that message within specified period **/ s32 ixgbevf_write_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size) { struct ixgbe_mbx_info *mbx = &hw->mbx; s32 ret_val = IXGBE_ERR_CONFIG; /** * exit if either we can't write, release * or there is no timeout defined */ if (!mbx->ops.write || !mbx->ops.check_for_ack || !mbx->ops.release || !mbx->timeout) return ret_val; if (size > mbx->size) ret_val = IXGBE_ERR_PARAM; else ret_val = mbx->ops.write(hw, msg, size); return ret_val; } const struct ixgbe_mbx_operations ixgbevf_mbx_ops = { .init_params = ixgbevf_init_mbx_params_vf, .release = ixgbevf_release_mbx_lock_vf, .read = ixgbevf_read_mbx_vf, .write = ixgbevf_write_mbx_vf, .check_for_msg = ixgbevf_check_for_msg_vf, .check_for_ack = ixgbevf_check_for_ack_vf, .check_for_rst = ixgbevf_check_for_rst_vf, }; const struct ixgbe_mbx_operations ixgbevf_mbx_ops_legacy = { .init_params = ixgbevf_init_mbx_params_vf, .release = ixgbevf_release_mbx_lock_vf_legacy, .read = ixgbevf_read_mbx_vf_legacy, .write = ixgbevf_write_mbx_vf_legacy, .check_for_msg = ixgbevf_check_for_msg_vf, .check_for_ack = ixgbevf_check_for_ack_vf, .check_for_rst = ixgbevf_check_for_rst_vf, }; /* Mailbox operations when running on Hyper-V. * On Hyper-V, PF/VF communication is not through the * hardware mailbox; this communication is through * a software mediated path. * Most mail box operations are noop while running on * Hyper-V. */ const struct ixgbe_mbx_operations ixgbevf_hv_mbx_ops = { .init_params = ixgbevf_init_mbx_params_vf, .check_for_rst = ixgbevf_check_for_rst_vf, };
linux-master
drivers/net/ethernet/intel/ixgbevf/mbx.c
// SPDX-License-Identifier: GPL-2.0 /* Copyright(c) 1999 - 2018 Intel Corporation. */ #include <linux/netdevice.h> #include <linux/module.h> #include <linux/pci.h> #include "e1000.h" /* This is the only thing that needs to be changed to adjust the * maximum number of ports that the driver can manage. */ #define E1000_MAX_NIC 32 #define OPTION_UNSET -1 #define OPTION_DISABLED 0 #define OPTION_ENABLED 1 #define COPYBREAK_DEFAULT 256 unsigned int copybreak = COPYBREAK_DEFAULT; module_param(copybreak, uint, 0644); MODULE_PARM_DESC(copybreak, "Maximum size of packet that is copied to a new buffer on receive"); /* All parameters are treated the same, as an integer array of values. * This macro just reduces the need to repeat the same declaration code * over and over (plus this helps to avoid typo bugs). */ #define E1000_PARAM_INIT { [0 ... E1000_MAX_NIC] = OPTION_UNSET } #define E1000_PARAM(X, desc) \ static int X[E1000_MAX_NIC+1] = E1000_PARAM_INIT; \ static unsigned int num_##X; \ module_param_array_named(X, X, int, &num_##X, 0); \ MODULE_PARM_DESC(X, desc); /* Transmit Interrupt Delay in units of 1.024 microseconds * Tx interrupt delay needs to typically be set to something non-zero * * Valid Range: 0-65535 */ E1000_PARAM(TxIntDelay, "Transmit Interrupt Delay"); #define DEFAULT_TIDV 8 #define MAX_TXDELAY 0xFFFF #define MIN_TXDELAY 0 /* Transmit Absolute Interrupt Delay in units of 1.024 microseconds * * Valid Range: 0-65535 */ E1000_PARAM(TxAbsIntDelay, "Transmit Absolute Interrupt Delay"); #define DEFAULT_TADV 32 #define MAX_TXABSDELAY 0xFFFF #define MIN_TXABSDELAY 0 /* Receive Interrupt Delay in units of 1.024 microseconds * hardware will likely hang if you set this to anything but zero. * * Burst variant is used as default if device has FLAG2_DMA_BURST. * * Valid Range: 0-65535 */ E1000_PARAM(RxIntDelay, "Receive Interrupt Delay"); #define DEFAULT_RDTR 0 #define BURST_RDTR 0x20 #define MAX_RXDELAY 0xFFFF #define MIN_RXDELAY 0 /* Receive Absolute Interrupt Delay in units of 1.024 microseconds * * Burst variant is used as default if device has FLAG2_DMA_BURST. * * Valid Range: 0-65535 */ E1000_PARAM(RxAbsIntDelay, "Receive Absolute Interrupt Delay"); #define DEFAULT_RADV 8 #define BURST_RADV 0x20 #define MAX_RXABSDELAY 0xFFFF #define MIN_RXABSDELAY 0 /* Interrupt Throttle Rate (interrupts/sec) * * Valid Range: 100-100000 or one of: 0=off, 1=dynamic, 3=dynamic conservative */ E1000_PARAM(InterruptThrottleRate, "Interrupt Throttling Rate"); #define DEFAULT_ITR 3 #define MAX_ITR 100000 #define MIN_ITR 100 /* IntMode (Interrupt Mode) * * Valid Range: varies depending on kernel configuration & hardware support * * legacy=0, MSI=1, MSI-X=2 * * When MSI/MSI-X support is enabled in kernel- * Default Value: 2 (MSI-X) when supported by hardware, 1 (MSI) otherwise * When MSI/MSI-X support is not enabled in kernel- * Default Value: 0 (legacy) * * When a mode is specified that is not allowed/supported, it will be * demoted to the most advanced interrupt mode available. */ E1000_PARAM(IntMode, "Interrupt Mode"); /* Enable Smart Power Down of the PHY * * Valid Range: 0, 1 * * Default Value: 0 (disabled) */ E1000_PARAM(SmartPowerDownEnable, "Enable PHY smart power down"); /* Enable Kumeran Lock Loss workaround * * Valid Range: 0, 1 * * Default Value: 1 (enabled) */ E1000_PARAM(KumeranLockLoss, "Enable Kumeran lock loss workaround"); /* Write Protect NVM * * Valid Range: 0, 1 * * Default Value: 1 (enabled) */ E1000_PARAM(WriteProtectNVM, "Write-protect NVM [WARNING: disabling this can lead to corrupted NVM]"); /* Enable CRC Stripping * * Valid Range: 0, 1 * * Default Value: 1 (enabled) */ E1000_PARAM(CrcStripping, "Enable CRC Stripping, disable if your BMC needs the CRC"); struct e1000_option { enum { enable_option, range_option, list_option } type; const char *name; const char *err; int def; union { /* range_option info */ struct { int min; int max; } r; /* list_option info */ struct { int nr; struct e1000_opt_list { int i; char *str; } *p; } l; } arg; }; static int e1000_validate_option(unsigned int *value, const struct e1000_option *opt, struct e1000_adapter *adapter) { if (*value == OPTION_UNSET) { *value = opt->def; return 0; } switch (opt->type) { case enable_option: switch (*value) { case OPTION_ENABLED: dev_info(&adapter->pdev->dev, "%s Enabled\n", opt->name); return 0; case OPTION_DISABLED: dev_info(&adapter->pdev->dev, "%s Disabled\n", opt->name); return 0; } break; case range_option: if (*value >= opt->arg.r.min && *value <= opt->arg.r.max) { dev_info(&adapter->pdev->dev, "%s set to %i\n", opt->name, *value); return 0; } break; case list_option: { int i; struct e1000_opt_list *ent; for (i = 0; i < opt->arg.l.nr; i++) { ent = &opt->arg.l.p[i]; if (*value == ent->i) { if (ent->str[0] != '\0') dev_info(&adapter->pdev->dev, "%s\n", ent->str); return 0; } } } break; default: BUG(); } dev_info(&adapter->pdev->dev, "Invalid %s value specified (%i) %s\n", opt->name, *value, opt->err); *value = opt->def; return -1; } /** * e1000e_check_options - Range Checking for Command Line Parameters * @adapter: board private structure * * This routine checks all command line parameters for valid user * input. If an invalid value is given, or if no user specified * value exists, a default value is used. The final value is stored * in a variable in the adapter structure. **/ void e1000e_check_options(struct e1000_adapter *adapter) { struct e1000_hw *hw = &adapter->hw; int bd = adapter->bd_number; if (bd >= E1000_MAX_NIC) { dev_notice(&adapter->pdev->dev, "Warning: no configuration for board #%i\n", bd); dev_notice(&adapter->pdev->dev, "Using defaults for all values\n"); } /* Transmit Interrupt Delay */ { static const struct e1000_option opt = { .type = range_option, .name = "Transmit Interrupt Delay", .err = "using default of " __MODULE_STRING(DEFAULT_TIDV), .def = DEFAULT_TIDV, .arg = { .r = { .min = MIN_TXDELAY, .max = MAX_TXDELAY } } }; if (num_TxIntDelay > bd) { adapter->tx_int_delay = TxIntDelay[bd]; e1000_validate_option(&adapter->tx_int_delay, &opt, adapter); } else { adapter->tx_int_delay = opt.def; } } /* Transmit Absolute Interrupt Delay */ { static const struct e1000_option opt = { .type = range_option, .name = "Transmit Absolute Interrupt Delay", .err = "using default of " __MODULE_STRING(DEFAULT_TADV), .def = DEFAULT_TADV, .arg = { .r = { .min = MIN_TXABSDELAY, .max = MAX_TXABSDELAY } } }; if (num_TxAbsIntDelay > bd) { adapter->tx_abs_int_delay = TxAbsIntDelay[bd]; e1000_validate_option(&adapter->tx_abs_int_delay, &opt, adapter); } else { adapter->tx_abs_int_delay = opt.def; } } /* Receive Interrupt Delay */ { static struct e1000_option opt = { .type = range_option, .name = "Receive Interrupt Delay", .err = "using default of " __MODULE_STRING(DEFAULT_RDTR), .def = DEFAULT_RDTR, .arg = { .r = { .min = MIN_RXDELAY, .max = MAX_RXDELAY } } }; if (adapter->flags2 & FLAG2_DMA_BURST) opt.def = BURST_RDTR; if (num_RxIntDelay > bd) { adapter->rx_int_delay = RxIntDelay[bd]; e1000_validate_option(&adapter->rx_int_delay, &opt, adapter); } else { adapter->rx_int_delay = opt.def; } } /* Receive Absolute Interrupt Delay */ { static struct e1000_option opt = { .type = range_option, .name = "Receive Absolute Interrupt Delay", .err = "using default of " __MODULE_STRING(DEFAULT_RADV), .def = DEFAULT_RADV, .arg = { .r = { .min = MIN_RXABSDELAY, .max = MAX_RXABSDELAY } } }; if (adapter->flags2 & FLAG2_DMA_BURST) opt.def = BURST_RADV; if (num_RxAbsIntDelay > bd) { adapter->rx_abs_int_delay = RxAbsIntDelay[bd]; e1000_validate_option(&adapter->rx_abs_int_delay, &opt, adapter); } else { adapter->rx_abs_int_delay = opt.def; } } /* Interrupt Throttling Rate */ { static const struct e1000_option opt = { .type = range_option, .name = "Interrupt Throttling Rate (ints/sec)", .err = "using default of " __MODULE_STRING(DEFAULT_ITR), .def = DEFAULT_ITR, .arg = { .r = { .min = MIN_ITR, .max = MAX_ITR } } }; if (num_InterruptThrottleRate > bd) { adapter->itr = InterruptThrottleRate[bd]; /* Make sure a message is printed for non-special * values. And in case of an invalid option, display * warning, use default and go through itr/itr_setting * adjustment logic below */ if ((adapter->itr > 4) && e1000_validate_option(&adapter->itr, &opt, adapter)) adapter->itr = opt.def; } else { /* If no option specified, use default value and go * through the logic below to adjust itr/itr_setting */ adapter->itr = opt.def; /* Make sure a message is printed for non-special * default values */ if (adapter->itr > 4) dev_info(&adapter->pdev->dev, "%s set to default %d\n", opt.name, adapter->itr); } adapter->itr_setting = adapter->itr; switch (adapter->itr) { case 0: dev_info(&adapter->pdev->dev, "%s turned off\n", opt.name); break; case 1: dev_info(&adapter->pdev->dev, "%s set to dynamic mode\n", opt.name); adapter->itr = 20000; break; case 2: dev_info(&adapter->pdev->dev, "%s Invalid mode - setting default\n", opt.name); adapter->itr_setting = opt.def; fallthrough; case 3: dev_info(&adapter->pdev->dev, "%s set to dynamic conservative mode\n", opt.name); adapter->itr = 20000; break; case 4: dev_info(&adapter->pdev->dev, "%s set to simplified (2000-8000 ints) mode\n", opt.name); break; default: /* Save the setting, because the dynamic bits * change itr. * * Clear the lower two bits because * they are used as control. */ adapter->itr_setting &= ~3; break; } } /* Interrupt Mode */ { static struct e1000_option opt = { .type = range_option, .name = "Interrupt Mode", #ifndef CONFIG_PCI_MSI .err = "defaulting to 0 (legacy)", .def = E1000E_INT_MODE_LEGACY, .arg = { .r = { .min = 0, .max = 0 } } #endif }; #ifdef CONFIG_PCI_MSI if (adapter->flags & FLAG_HAS_MSIX) { opt.err = kstrdup("defaulting to 2 (MSI-X)", GFP_KERNEL); opt.def = E1000E_INT_MODE_MSIX; opt.arg.r.max = E1000E_INT_MODE_MSIX; } else { opt.err = kstrdup("defaulting to 1 (MSI)", GFP_KERNEL); opt.def = E1000E_INT_MODE_MSI; opt.arg.r.max = E1000E_INT_MODE_MSI; } if (!opt.err) { dev_err(&adapter->pdev->dev, "Failed to allocate memory\n"); return; } #endif if (num_IntMode > bd) { unsigned int int_mode = IntMode[bd]; e1000_validate_option(&int_mode, &opt, adapter); adapter->int_mode = int_mode; } else { adapter->int_mode = opt.def; } #ifdef CONFIG_PCI_MSI kfree(opt.err); #endif } /* Smart Power Down */ { static const struct e1000_option opt = { .type = enable_option, .name = "PHY Smart Power Down", .err = "defaulting to Disabled", .def = OPTION_DISABLED }; if (num_SmartPowerDownEnable > bd) { unsigned int spd = SmartPowerDownEnable[bd]; e1000_validate_option(&spd, &opt, adapter); if ((adapter->flags & FLAG_HAS_SMART_POWER_DOWN) && spd) adapter->flags |= FLAG_SMART_POWER_DOWN; } } /* CRC Stripping */ { static const struct e1000_option opt = { .type = enable_option, .name = "CRC Stripping", .err = "defaulting to Enabled", .def = OPTION_ENABLED }; if (num_CrcStripping > bd) { unsigned int crc_stripping = CrcStripping[bd]; e1000_validate_option(&crc_stripping, &opt, adapter); if (crc_stripping == OPTION_ENABLED) { adapter->flags2 |= FLAG2_CRC_STRIPPING; adapter->flags2 |= FLAG2_DFLT_CRC_STRIPPING; } } else { adapter->flags2 |= FLAG2_CRC_STRIPPING; adapter->flags2 |= FLAG2_DFLT_CRC_STRIPPING; } } /* Kumeran Lock Loss Workaround */ { static const struct e1000_option opt = { .type = enable_option, .name = "Kumeran Lock Loss Workaround", .err = "defaulting to Enabled", .def = OPTION_ENABLED }; bool enabled = opt.def; if (num_KumeranLockLoss > bd) { unsigned int kmrn_lock_loss = KumeranLockLoss[bd]; e1000_validate_option(&kmrn_lock_loss, &opt, adapter); enabled = kmrn_lock_loss; } if (hw->mac.type == e1000_ich8lan) e1000e_set_kmrn_lock_loss_workaround_ich8lan(hw, enabled); } /* Write-protect NVM */ { static const struct e1000_option opt = { .type = enable_option, .name = "Write-protect NVM", .err = "defaulting to Enabled", .def = OPTION_ENABLED }; if (adapter->flags & FLAG_IS_ICH) { if (num_WriteProtectNVM > bd) { unsigned int write_protect_nvm = WriteProtectNVM[bd]; e1000_validate_option(&write_protect_nvm, &opt, adapter); if (write_protect_nvm) adapter->flags |= FLAG_READ_ONLY_NVM; } else { if (opt.def) adapter->flags |= FLAG_READ_ONLY_NVM; } } } }
linux-master
drivers/net/ethernet/intel/e1000e/param.c
// SPDX-License-Identifier: GPL-2.0 /* Copyright(c) 1999 - 2018 Intel Corporation. */ /* 80003ES2LAN Gigabit Ethernet Controller (Copper) * 80003ES2LAN Gigabit Ethernet Controller (Serdes) */ #include "e1000.h" /* A table for the GG82563 cable length where the range is defined * with a lower bound at "index" and the upper bound at * "index + 5". */ static const u16 e1000_gg82563_cable_length_table[] = { 0, 60, 115, 150, 150, 60, 115, 150, 180, 180, 0xFF }; #define GG82563_CABLE_LENGTH_TABLE_SIZE \ ARRAY_SIZE(e1000_gg82563_cable_length_table) static s32 e1000_setup_copper_link_80003es2lan(struct e1000_hw *hw); static s32 e1000_acquire_swfw_sync_80003es2lan(struct e1000_hw *hw, u16 mask); static void e1000_release_swfw_sync_80003es2lan(struct e1000_hw *hw, u16 mask); static void e1000_initialize_hw_bits_80003es2lan(struct e1000_hw *hw); static void e1000_clear_hw_cntrs_80003es2lan(struct e1000_hw *hw); static s32 e1000_cfg_kmrn_1000_80003es2lan(struct e1000_hw *hw); static s32 e1000_cfg_kmrn_10_100_80003es2lan(struct e1000_hw *hw, u16 duplex); static s32 e1000_read_kmrn_reg_80003es2lan(struct e1000_hw *hw, u32 offset, u16 *data); static s32 e1000_write_kmrn_reg_80003es2lan(struct e1000_hw *hw, u32 offset, u16 data); static void e1000_power_down_phy_copper_80003es2lan(struct e1000_hw *hw); /** * e1000_init_phy_params_80003es2lan - Init ESB2 PHY func ptrs. * @hw: pointer to the HW structure **/ static s32 e1000_init_phy_params_80003es2lan(struct e1000_hw *hw) { struct e1000_phy_info *phy = &hw->phy; s32 ret_val; if (hw->phy.media_type != e1000_media_type_copper) { phy->type = e1000_phy_none; return 0; } else { phy->ops.power_up = e1000_power_up_phy_copper; phy->ops.power_down = e1000_power_down_phy_copper_80003es2lan; } phy->addr = 1; phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT; phy->reset_delay_us = 100; phy->type = e1000_phy_gg82563; /* This can only be done after all function pointers are setup. */ ret_val = e1000e_get_phy_id(hw); /* Verify phy id */ if (phy->id != GG82563_E_PHY_ID) return -E1000_ERR_PHY; return ret_val; } /** * e1000_init_nvm_params_80003es2lan - Init ESB2 NVM func ptrs. * @hw: pointer to the HW structure **/ static s32 e1000_init_nvm_params_80003es2lan(struct e1000_hw *hw) { struct e1000_nvm_info *nvm = &hw->nvm; u32 eecd = er32(EECD); u16 size; nvm->opcode_bits = 8; nvm->delay_usec = 1; switch (nvm->override) { case e1000_nvm_override_spi_large: nvm->page_size = 32; nvm->address_bits = 16; break; case e1000_nvm_override_spi_small: nvm->page_size = 8; nvm->address_bits = 8; break; default: nvm->page_size = eecd & E1000_EECD_ADDR_BITS ? 32 : 8; nvm->address_bits = eecd & E1000_EECD_ADDR_BITS ? 16 : 8; break; } nvm->type = e1000_nvm_eeprom_spi; size = (u16)((eecd & E1000_EECD_SIZE_EX_MASK) >> E1000_EECD_SIZE_EX_SHIFT); /* Added to a constant, "size" becomes the left-shift value * for setting word_size. */ size += NVM_WORD_SIZE_BASE_SHIFT; /* EEPROM access above 16k is unsupported */ if (size > 14) size = 14; nvm->word_size = BIT(size); return 0; } /** * e1000_init_mac_params_80003es2lan - Init ESB2 MAC func ptrs. * @hw: pointer to the HW structure **/ static s32 e1000_init_mac_params_80003es2lan(struct e1000_hw *hw) { struct e1000_mac_info *mac = &hw->mac; /* Set media type and media-dependent function pointers */ switch (hw->adapter->pdev->device) { case E1000_DEV_ID_80003ES2LAN_SERDES_DPT: hw->phy.media_type = e1000_media_type_internal_serdes; mac->ops.check_for_link = e1000e_check_for_serdes_link; mac->ops.setup_physical_interface = e1000e_setup_fiber_serdes_link; break; default: hw->phy.media_type = e1000_media_type_copper; mac->ops.check_for_link = e1000e_check_for_copper_link; mac->ops.setup_physical_interface = e1000_setup_copper_link_80003es2lan; break; } /* Set mta register count */ mac->mta_reg_count = 128; /* Set rar entry count */ mac->rar_entry_count = E1000_RAR_ENTRIES; /* FWSM register */ mac->has_fwsm = true; /* ARC supported; valid only if manageability features are enabled. */ mac->arc_subsystem_valid = !!(er32(FWSM) & E1000_FWSM_MODE_MASK); /* Adaptive IFS not supported */ mac->adaptive_ifs = false; /* set lan id for port to determine which phy lock to use */ hw->mac.ops.set_lan_id(hw); return 0; } static s32 e1000_get_variants_80003es2lan(struct e1000_adapter *adapter) { struct e1000_hw *hw = &adapter->hw; s32 rc; rc = e1000_init_mac_params_80003es2lan(hw); if (rc) return rc; rc = e1000_init_nvm_params_80003es2lan(hw); if (rc) return rc; rc = e1000_init_phy_params_80003es2lan(hw); if (rc) return rc; return 0; } /** * e1000_acquire_phy_80003es2lan - Acquire rights to access PHY * @hw: pointer to the HW structure * * A wrapper to acquire access rights to the correct PHY. **/ static s32 e1000_acquire_phy_80003es2lan(struct e1000_hw *hw) { u16 mask; mask = hw->bus.func ? E1000_SWFW_PHY1_SM : E1000_SWFW_PHY0_SM; return e1000_acquire_swfw_sync_80003es2lan(hw, mask); } /** * e1000_release_phy_80003es2lan - Release rights to access PHY * @hw: pointer to the HW structure * * A wrapper to release access rights to the correct PHY. **/ static void e1000_release_phy_80003es2lan(struct e1000_hw *hw) { u16 mask; mask = hw->bus.func ? E1000_SWFW_PHY1_SM : E1000_SWFW_PHY0_SM; e1000_release_swfw_sync_80003es2lan(hw, mask); } /** * e1000_acquire_mac_csr_80003es2lan - Acquire right to access Kumeran register * @hw: pointer to the HW structure * * Acquire the semaphore to access the Kumeran interface. * **/ static s32 e1000_acquire_mac_csr_80003es2lan(struct e1000_hw *hw) { u16 mask; mask = E1000_SWFW_CSR_SM; return e1000_acquire_swfw_sync_80003es2lan(hw, mask); } /** * e1000_release_mac_csr_80003es2lan - Release right to access Kumeran Register * @hw: pointer to the HW structure * * Release the semaphore used to access the Kumeran interface **/ static void e1000_release_mac_csr_80003es2lan(struct e1000_hw *hw) { u16 mask; mask = E1000_SWFW_CSR_SM; e1000_release_swfw_sync_80003es2lan(hw, mask); } /** * e1000_acquire_nvm_80003es2lan - Acquire rights to access NVM * @hw: pointer to the HW structure * * Acquire the semaphore to access the EEPROM. **/ static s32 e1000_acquire_nvm_80003es2lan(struct e1000_hw *hw) { s32 ret_val; ret_val = e1000_acquire_swfw_sync_80003es2lan(hw, E1000_SWFW_EEP_SM); if (ret_val) return ret_val; ret_val = e1000e_acquire_nvm(hw); if (ret_val) e1000_release_swfw_sync_80003es2lan(hw, E1000_SWFW_EEP_SM); return ret_val; } /** * e1000_release_nvm_80003es2lan - Relinquish rights to access NVM * @hw: pointer to the HW structure * * Release the semaphore used to access the EEPROM. **/ static void e1000_release_nvm_80003es2lan(struct e1000_hw *hw) { e1000e_release_nvm(hw); e1000_release_swfw_sync_80003es2lan(hw, E1000_SWFW_EEP_SM); } /** * e1000_acquire_swfw_sync_80003es2lan - Acquire SW/FW semaphore * @hw: pointer to the HW structure * @mask: specifies which semaphore to acquire * * Acquire the SW/FW semaphore to access the PHY or NVM. The mask * will also specify which port we're acquiring the lock for. **/ static s32 e1000_acquire_swfw_sync_80003es2lan(struct e1000_hw *hw, u16 mask) { u32 swfw_sync; u32 swmask = mask; u32 fwmask = mask << 16; s32 i = 0; s32 timeout = 50; while (i < timeout) { if (e1000e_get_hw_semaphore(hw)) return -E1000_ERR_SWFW_SYNC; swfw_sync = er32(SW_FW_SYNC); if (!(swfw_sync & (fwmask | swmask))) break; /* Firmware currently using resource (fwmask) * or other software thread using resource (swmask) */ e1000e_put_hw_semaphore(hw); mdelay(5); i++; } if (i == timeout) { e_dbg("Driver can't access resource, SW_FW_SYNC timeout.\n"); return -E1000_ERR_SWFW_SYNC; } swfw_sync |= swmask; ew32(SW_FW_SYNC, swfw_sync); e1000e_put_hw_semaphore(hw); return 0; } /** * e1000_release_swfw_sync_80003es2lan - Release SW/FW semaphore * @hw: pointer to the HW structure * @mask: specifies which semaphore to acquire * * Release the SW/FW semaphore used to access the PHY or NVM. The mask * will also specify which port we're releasing the lock for. **/ static void e1000_release_swfw_sync_80003es2lan(struct e1000_hw *hw, u16 mask) { u32 swfw_sync; while (e1000e_get_hw_semaphore(hw) != 0) ; /* Empty */ swfw_sync = er32(SW_FW_SYNC); swfw_sync &= ~mask; ew32(SW_FW_SYNC, swfw_sync); e1000e_put_hw_semaphore(hw); } /** * e1000_read_phy_reg_gg82563_80003es2lan - Read GG82563 PHY register * @hw: pointer to the HW structure * @offset: offset of the register to read * @data: pointer to the data returned from the operation * * Read the GG82563 PHY register. **/ static s32 e1000_read_phy_reg_gg82563_80003es2lan(struct e1000_hw *hw, u32 offset, u16 *data) { s32 ret_val; u32 page_select; u16 temp; ret_val = e1000_acquire_phy_80003es2lan(hw); if (ret_val) return ret_val; /* Select Configuration Page */ if ((offset & MAX_PHY_REG_ADDRESS) < GG82563_MIN_ALT_REG) { page_select = GG82563_PHY_PAGE_SELECT; } else { /* Use Alternative Page Select register to access * registers 30 and 31 */ page_select = GG82563_PHY_PAGE_SELECT_ALT; } temp = (u16)((u16)offset >> GG82563_PAGE_SHIFT); ret_val = e1000e_write_phy_reg_mdic(hw, page_select, temp); if (ret_val) { e1000_release_phy_80003es2lan(hw); return ret_val; } if (hw->dev_spec.e80003es2lan.mdic_wa_enable) { /* The "ready" bit in the MDIC register may be incorrectly set * before the device has completed the "Page Select" MDI * transaction. So we wait 200us after each MDI command... */ usleep_range(200, 400); /* ...and verify the command was successful. */ ret_val = e1000e_read_phy_reg_mdic(hw, page_select, &temp); if (((u16)offset >> GG82563_PAGE_SHIFT) != temp) { e1000_release_phy_80003es2lan(hw); return -E1000_ERR_PHY; } usleep_range(200, 400); ret_val = e1000e_read_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset, data); usleep_range(200, 400); } else { ret_val = e1000e_read_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset, data); } e1000_release_phy_80003es2lan(hw); return ret_val; } /** * e1000_write_phy_reg_gg82563_80003es2lan - Write GG82563 PHY register * @hw: pointer to the HW structure * @offset: offset of the register to read * @data: value to write to the register * * Write to the GG82563 PHY register. **/ static s32 e1000_write_phy_reg_gg82563_80003es2lan(struct e1000_hw *hw, u32 offset, u16 data) { s32 ret_val; u32 page_select; u16 temp; ret_val = e1000_acquire_phy_80003es2lan(hw); if (ret_val) return ret_val; /* Select Configuration Page */ if ((offset & MAX_PHY_REG_ADDRESS) < GG82563_MIN_ALT_REG) { page_select = GG82563_PHY_PAGE_SELECT; } else { /* Use Alternative Page Select register to access * registers 30 and 31 */ page_select = GG82563_PHY_PAGE_SELECT_ALT; } temp = (u16)((u16)offset >> GG82563_PAGE_SHIFT); ret_val = e1000e_write_phy_reg_mdic(hw, page_select, temp); if (ret_val) { e1000_release_phy_80003es2lan(hw); return ret_val; } if (hw->dev_spec.e80003es2lan.mdic_wa_enable) { /* The "ready" bit in the MDIC register may be incorrectly set * before the device has completed the "Page Select" MDI * transaction. So we wait 200us after each MDI command... */ usleep_range(200, 400); /* ...and verify the command was successful. */ ret_val = e1000e_read_phy_reg_mdic(hw, page_select, &temp); if (((u16)offset >> GG82563_PAGE_SHIFT) != temp) { e1000_release_phy_80003es2lan(hw); return -E1000_ERR_PHY; } usleep_range(200, 400); ret_val = e1000e_write_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset, data); usleep_range(200, 400); } else { ret_val = e1000e_write_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset, data); } e1000_release_phy_80003es2lan(hw); return ret_val; } /** * e1000_write_nvm_80003es2lan - Write to ESB2 NVM * @hw: pointer to the HW structure * @offset: offset of the register to read * @words: number of words to write * @data: buffer of data to write to the NVM * * Write "words" of data to the ESB2 NVM. **/ static s32 e1000_write_nvm_80003es2lan(struct e1000_hw *hw, u16 offset, u16 words, u16 *data) { return e1000e_write_nvm_spi(hw, offset, words, data); } /** * e1000_get_cfg_done_80003es2lan - Wait for configuration to complete * @hw: pointer to the HW structure * * Wait a specific amount of time for manageability processes to complete. * This is a function pointer entry point called by the phy module. **/ static s32 e1000_get_cfg_done_80003es2lan(struct e1000_hw *hw) { s32 timeout = PHY_CFG_TIMEOUT; u32 mask = E1000_NVM_CFG_DONE_PORT_0; if (hw->bus.func == 1) mask = E1000_NVM_CFG_DONE_PORT_1; while (timeout) { if (er32(EEMNGCTL) & mask) break; usleep_range(1000, 2000); timeout--; } if (!timeout) { e_dbg("MNG configuration cycle has not completed.\n"); return -E1000_ERR_RESET; } return 0; } /** * e1000_phy_force_speed_duplex_80003es2lan - Force PHY speed and duplex * @hw: pointer to the HW structure * * Force the speed and duplex settings onto the PHY. This is a * function pointer entry point called by the phy module. **/ static s32 e1000_phy_force_speed_duplex_80003es2lan(struct e1000_hw *hw) { s32 ret_val; u16 phy_data; bool link; /* Clear Auto-Crossover to force MDI manually. M88E1000 requires MDI * forced whenever speed and duplex are forced. */ ret_val = e1e_rphy(hw, M88E1000_PHY_SPEC_CTRL, &phy_data); if (ret_val) return ret_val; phy_data &= ~GG82563_PSCR_CROSSOVER_MODE_AUTO; ret_val = e1e_wphy(hw, GG82563_PHY_SPEC_CTRL, phy_data); if (ret_val) return ret_val; e_dbg("GG82563 PSCR: %X\n", phy_data); ret_val = e1e_rphy(hw, MII_BMCR, &phy_data); if (ret_val) return ret_val; e1000e_phy_force_speed_duplex_setup(hw, &phy_data); /* Reset the phy to commit changes. */ phy_data |= BMCR_RESET; ret_val = e1e_wphy(hw, MII_BMCR, phy_data); if (ret_val) return ret_val; udelay(1); if (hw->phy.autoneg_wait_to_complete) { e_dbg("Waiting for forced speed/duplex link on GG82563 phy.\n"); ret_val = e1000e_phy_has_link_generic(hw, PHY_FORCE_LIMIT, 100000, &link); if (ret_val) return ret_val; if (!link) { /* We didn't get link. * Reset the DSP and cross our fingers. */ ret_val = e1000e_phy_reset_dsp(hw); if (ret_val) return ret_val; } /* Try once more */ ret_val = e1000e_phy_has_link_generic(hw, PHY_FORCE_LIMIT, 100000, &link); if (ret_val) return ret_val; } ret_val = e1e_rphy(hw, GG82563_PHY_MAC_SPEC_CTRL, &phy_data); if (ret_val) return ret_val; /* Resetting the phy means we need to verify the TX_CLK corresponds * to the link speed. 10Mbps -> 2.5MHz, else 25MHz. */ phy_data &= ~GG82563_MSCR_TX_CLK_MASK; if (hw->mac.forced_speed_duplex & E1000_ALL_10_SPEED) phy_data |= GG82563_MSCR_TX_CLK_10MBPS_2_5; else phy_data |= GG82563_MSCR_TX_CLK_100MBPS_25; /* In addition, we must re-enable CRS on Tx for both half and full * duplex. */ phy_data |= GG82563_MSCR_ASSERT_CRS_ON_TX; ret_val = e1e_wphy(hw, GG82563_PHY_MAC_SPEC_CTRL, phy_data); return ret_val; } /** * e1000_get_cable_length_80003es2lan - Set approximate cable length * @hw: pointer to the HW structure * * Find the approximate cable length as measured by the GG82563 PHY. * This is a function pointer entry point called by the phy module. **/ static s32 e1000_get_cable_length_80003es2lan(struct e1000_hw *hw) { struct e1000_phy_info *phy = &hw->phy; s32 ret_val; u16 phy_data, index; ret_val = e1e_rphy(hw, GG82563_PHY_DSP_DISTANCE, &phy_data); if (ret_val) return ret_val; index = phy_data & GG82563_DSPD_CABLE_LENGTH; if (index >= GG82563_CABLE_LENGTH_TABLE_SIZE - 5) return -E1000_ERR_PHY; phy->min_cable_length = e1000_gg82563_cable_length_table[index]; phy->max_cable_length = e1000_gg82563_cable_length_table[index + 5]; phy->cable_length = (phy->min_cable_length + phy->max_cable_length) / 2; return 0; } /** * e1000_get_link_up_info_80003es2lan - Report speed and duplex * @hw: pointer to the HW structure * @speed: pointer to speed buffer * @duplex: pointer to duplex buffer * * Retrieve the current speed and duplex configuration. **/ static s32 e1000_get_link_up_info_80003es2lan(struct e1000_hw *hw, u16 *speed, u16 *duplex) { s32 ret_val; if (hw->phy.media_type == e1000_media_type_copper) { ret_val = e1000e_get_speed_and_duplex_copper(hw, speed, duplex); hw->phy.ops.cfg_on_link_up(hw); } else { ret_val = e1000e_get_speed_and_duplex_fiber_serdes(hw, speed, duplex); } return ret_val; } /** * e1000_reset_hw_80003es2lan - Reset the ESB2 controller * @hw: pointer to the HW structure * * Perform a global reset to the ESB2 controller. **/ static s32 e1000_reset_hw_80003es2lan(struct e1000_hw *hw) { u32 ctrl; s32 ret_val; u16 kum_reg_data; /* Prevent the PCI-E bus from sticking if there is no TLP connection * on the last TLP read/write transaction when MAC is reset. */ ret_val = e1000e_disable_pcie_master(hw); if (ret_val) e_dbg("PCI-E Master disable polling has failed.\n"); e_dbg("Masking off all interrupts\n"); ew32(IMC, 0xffffffff); ew32(RCTL, 0); ew32(TCTL, E1000_TCTL_PSP); e1e_flush(); usleep_range(10000, 11000); ctrl = er32(CTRL); ret_val = e1000_acquire_phy_80003es2lan(hw); if (ret_val) return ret_val; e_dbg("Issuing a global reset to MAC\n"); ew32(CTRL, ctrl | E1000_CTRL_RST); e1000_release_phy_80003es2lan(hw); /* Disable IBIST slave mode (far-end loopback) */ ret_val = e1000_read_kmrn_reg_80003es2lan(hw, E1000_KMRNCTRLSTA_INBAND_PARAM, &kum_reg_data); if (!ret_val) { kum_reg_data |= E1000_KMRNCTRLSTA_IBIST_DISABLE; ret_val = e1000_write_kmrn_reg_80003es2lan(hw, E1000_KMRNCTRLSTA_INBAND_PARAM, kum_reg_data); if (ret_val) e_dbg("Error disabling far-end loopback\n"); } else { e_dbg("Error disabling far-end loopback\n"); } ret_val = e1000e_get_auto_rd_done(hw); if (ret_val) /* We don't want to continue accessing MAC registers. */ return ret_val; /* Clear any pending interrupt events. */ ew32(IMC, 0xffffffff); er32(ICR); return e1000_check_alt_mac_addr_generic(hw); } /** * e1000_init_hw_80003es2lan - Initialize the ESB2 controller * @hw: pointer to the HW structure * * Initialize the hw bits, LED, VFTA, MTA, link and hw counters. **/ static s32 e1000_init_hw_80003es2lan(struct e1000_hw *hw) { struct e1000_mac_info *mac = &hw->mac; u32 reg_data; s32 ret_val; u16 kum_reg_data; u16 i; e1000_initialize_hw_bits_80003es2lan(hw); /* Initialize identification LED */ ret_val = mac->ops.id_led_init(hw); /* An error is not fatal and we should not stop init due to this */ if (ret_val) e_dbg("Error initializing identification LED\n"); /* Disabling VLAN filtering */ e_dbg("Initializing the IEEE VLAN\n"); mac->ops.clear_vfta(hw); /* Setup the receive address. */ e1000e_init_rx_addrs(hw, mac->rar_entry_count); /* Zero out the Multicast HASH table */ e_dbg("Zeroing the MTA\n"); for (i = 0; i < mac->mta_reg_count; i++) E1000_WRITE_REG_ARRAY(hw, E1000_MTA, i, 0); /* Setup link and flow control */ ret_val = mac->ops.setup_link(hw); if (ret_val) return ret_val; /* Disable IBIST slave mode (far-end loopback) */ ret_val = e1000_read_kmrn_reg_80003es2lan(hw, E1000_KMRNCTRLSTA_INBAND_PARAM, &kum_reg_data); if (!ret_val) { kum_reg_data |= E1000_KMRNCTRLSTA_IBIST_DISABLE; ret_val = e1000_write_kmrn_reg_80003es2lan(hw, E1000_KMRNCTRLSTA_INBAND_PARAM, kum_reg_data); if (ret_val) e_dbg("Error disabling far-end loopback\n"); } else { e_dbg("Error disabling far-end loopback\n"); } /* Set the transmit descriptor write-back policy */ reg_data = er32(TXDCTL(0)); reg_data = ((reg_data & ~E1000_TXDCTL_WTHRESH) | E1000_TXDCTL_FULL_TX_DESC_WB | E1000_TXDCTL_COUNT_DESC); ew32(TXDCTL(0), reg_data); /* ...for both queues. */ reg_data = er32(TXDCTL(1)); reg_data = ((reg_data & ~E1000_TXDCTL_WTHRESH) | E1000_TXDCTL_FULL_TX_DESC_WB | E1000_TXDCTL_COUNT_DESC); ew32(TXDCTL(1), reg_data); /* Enable retransmit on late collisions */ reg_data = er32(TCTL); reg_data |= E1000_TCTL_RTLC; ew32(TCTL, reg_data); /* Configure Gigabit Carry Extend Padding */ reg_data = er32(TCTL_EXT); reg_data &= ~E1000_TCTL_EXT_GCEX_MASK; reg_data |= DEFAULT_TCTL_EXT_GCEX_80003ES2LAN; ew32(TCTL_EXT, reg_data); /* Configure Transmit Inter-Packet Gap */ reg_data = er32(TIPG); reg_data &= ~E1000_TIPG_IPGT_MASK; reg_data |= DEFAULT_TIPG_IPGT_1000_80003ES2LAN; ew32(TIPG, reg_data); reg_data = E1000_READ_REG_ARRAY(hw, E1000_FFLT, 0x0001); reg_data &= ~0x00100000; E1000_WRITE_REG_ARRAY(hw, E1000_FFLT, 0x0001, reg_data); /* default to true to enable the MDIC W/A */ hw->dev_spec.e80003es2lan.mdic_wa_enable = true; ret_val = e1000_read_kmrn_reg_80003es2lan(hw, E1000_KMRNCTRLSTA_OFFSET >> E1000_KMRNCTRLSTA_OFFSET_SHIFT, &i); if (!ret_val) { if ((i & E1000_KMRNCTRLSTA_OPMODE_MASK) == E1000_KMRNCTRLSTA_OPMODE_INBAND_MDIO) hw->dev_spec.e80003es2lan.mdic_wa_enable = false; } /* Clear all of the statistics registers (clear on read). It is * important that we do this after we have tried to establish link * because the symbol error count will increment wildly if there * is no link. */ e1000_clear_hw_cntrs_80003es2lan(hw); return ret_val; } /** * e1000_initialize_hw_bits_80003es2lan - Init hw bits of ESB2 * @hw: pointer to the HW structure * * Initializes required hardware-dependent bits needed for normal operation. **/ static void e1000_initialize_hw_bits_80003es2lan(struct e1000_hw *hw) { u32 reg; /* Transmit Descriptor Control 0 */ reg = er32(TXDCTL(0)); reg |= BIT(22); ew32(TXDCTL(0), reg); /* Transmit Descriptor Control 1 */ reg = er32(TXDCTL(1)); reg |= BIT(22); ew32(TXDCTL(1), reg); /* Transmit Arbitration Control 0 */ reg = er32(TARC(0)); reg &= ~(0xF << 27); /* 30:27 */ if (hw->phy.media_type != e1000_media_type_copper) reg &= ~BIT(20); ew32(TARC(0), reg); /* Transmit Arbitration Control 1 */ reg = er32(TARC(1)); if (er32(TCTL) & E1000_TCTL_MULR) reg &= ~BIT(28); else reg |= BIT(28); ew32(TARC(1), reg); /* Disable IPv6 extension header parsing because some malformed * IPv6 headers can hang the Rx. */ reg = er32(RFCTL); reg |= (E1000_RFCTL_IPV6_EX_DIS | E1000_RFCTL_NEW_IPV6_EXT_DIS); ew32(RFCTL, reg); } /** * e1000_copper_link_setup_gg82563_80003es2lan - Configure GG82563 Link * @hw: pointer to the HW structure * * Setup some GG82563 PHY registers for obtaining link **/ static s32 e1000_copper_link_setup_gg82563_80003es2lan(struct e1000_hw *hw) { struct e1000_phy_info *phy = &hw->phy; s32 ret_val; u32 reg; u16 data; ret_val = e1e_rphy(hw, GG82563_PHY_MAC_SPEC_CTRL, &data); if (ret_val) return ret_val; data |= GG82563_MSCR_ASSERT_CRS_ON_TX; /* Use 25MHz for both link down and 1000Base-T for Tx clock. */ data |= GG82563_MSCR_TX_CLK_1000MBPS_25; ret_val = e1e_wphy(hw, GG82563_PHY_MAC_SPEC_CTRL, data); if (ret_val) return ret_val; /* Options: * MDI/MDI-X = 0 (default) * 0 - Auto for all speeds * 1 - MDI mode * 2 - MDI-X mode * 3 - Auto for 1000Base-T only (MDI-X for 10/100Base-T modes) */ ret_val = e1e_rphy(hw, GG82563_PHY_SPEC_CTRL, &data); if (ret_val) return ret_val; data &= ~GG82563_PSCR_CROSSOVER_MODE_MASK; switch (phy->mdix) { case 1: data |= GG82563_PSCR_CROSSOVER_MODE_MDI; break; case 2: data |= GG82563_PSCR_CROSSOVER_MODE_MDIX; break; case 0: default: data |= GG82563_PSCR_CROSSOVER_MODE_AUTO; break; } /* Options: * disable_polarity_correction = 0 (default) * Automatic Correction for Reversed Cable Polarity * 0 - Disabled * 1 - Enabled */ data &= ~GG82563_PSCR_POLARITY_REVERSAL_DISABLE; if (phy->disable_polarity_correction) data |= GG82563_PSCR_POLARITY_REVERSAL_DISABLE; ret_val = e1e_wphy(hw, GG82563_PHY_SPEC_CTRL, data); if (ret_val) return ret_val; /* SW Reset the PHY so all changes take effect */ ret_val = hw->phy.ops.commit(hw); if (ret_val) { e_dbg("Error Resetting the PHY\n"); return ret_val; } /* Bypass Rx and Tx FIFO's */ reg = E1000_KMRNCTRLSTA_OFFSET_FIFO_CTRL; data = (E1000_KMRNCTRLSTA_FIFO_CTRL_RX_BYPASS | E1000_KMRNCTRLSTA_FIFO_CTRL_TX_BYPASS); ret_val = e1000_write_kmrn_reg_80003es2lan(hw, reg, data); if (ret_val) return ret_val; reg = E1000_KMRNCTRLSTA_OFFSET_MAC2PHY_OPMODE; ret_val = e1000_read_kmrn_reg_80003es2lan(hw, reg, &data); if (ret_val) return ret_val; data |= E1000_KMRNCTRLSTA_OPMODE_E_IDLE; ret_val = e1000_write_kmrn_reg_80003es2lan(hw, reg, data); if (ret_val) return ret_val; ret_val = e1e_rphy(hw, GG82563_PHY_SPEC_CTRL_2, &data); if (ret_val) return ret_val; data &= ~GG82563_PSCR2_REVERSE_AUTO_NEG; ret_val = e1e_wphy(hw, GG82563_PHY_SPEC_CTRL_2, data); if (ret_val) return ret_val; reg = er32(CTRL_EXT); reg &= ~E1000_CTRL_EXT_LINK_MODE_MASK; ew32(CTRL_EXT, reg); ret_val = e1e_rphy(hw, GG82563_PHY_PWR_MGMT_CTRL, &data); if (ret_val) return ret_val; /* Do not init these registers when the HW is in IAMT mode, since the * firmware will have already initialized them. We only initialize * them if the HW is not in IAMT mode. */ if (!hw->mac.ops.check_mng_mode(hw)) { /* Enable Electrical Idle on the PHY */ data |= GG82563_PMCR_ENABLE_ELECTRICAL_IDLE; ret_val = e1e_wphy(hw, GG82563_PHY_PWR_MGMT_CTRL, data); if (ret_val) return ret_val; ret_val = e1e_rphy(hw, GG82563_PHY_KMRN_MODE_CTRL, &data); if (ret_val) return ret_val; data &= ~GG82563_KMCR_PASS_FALSE_CARRIER; ret_val = e1e_wphy(hw, GG82563_PHY_KMRN_MODE_CTRL, data); if (ret_val) return ret_val; } /* Workaround: Disable padding in Kumeran interface in the MAC * and in the PHY to avoid CRC errors. */ ret_val = e1e_rphy(hw, GG82563_PHY_INBAND_CTRL, &data); if (ret_val) return ret_val; data |= GG82563_ICR_DIS_PADDING; ret_val = e1e_wphy(hw, GG82563_PHY_INBAND_CTRL, data); if (ret_val) return ret_val; return 0; } /** * e1000_setup_copper_link_80003es2lan - Setup Copper Link for ESB2 * @hw: pointer to the HW structure * * Essentially a wrapper for setting up all things "copper" related. * This is a function pointer entry point called by the mac module. **/ static s32 e1000_setup_copper_link_80003es2lan(struct e1000_hw *hw) { u32 ctrl; s32 ret_val; u16 reg_data; ctrl = er32(CTRL); ctrl |= E1000_CTRL_SLU; ctrl &= ~(E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX); ew32(CTRL, ctrl); /* Set the mac to wait the maximum time between each * iteration and increase the max iterations when * polling the phy; this fixes erroneous timeouts at 10Mbps. */ ret_val = e1000_write_kmrn_reg_80003es2lan(hw, GG82563_REG(0x34, 4), 0xFFFF); if (ret_val) return ret_val; ret_val = e1000_read_kmrn_reg_80003es2lan(hw, GG82563_REG(0x34, 9), &reg_data); if (ret_val) return ret_val; reg_data |= 0x3F; ret_val = e1000_write_kmrn_reg_80003es2lan(hw, GG82563_REG(0x34, 9), reg_data); if (ret_val) return ret_val; ret_val = e1000_read_kmrn_reg_80003es2lan(hw, E1000_KMRNCTRLSTA_OFFSET_INB_CTRL, &reg_data); if (ret_val) return ret_val; reg_data |= E1000_KMRNCTRLSTA_INB_CTRL_DIS_PADDING; ret_val = e1000_write_kmrn_reg_80003es2lan(hw, E1000_KMRNCTRLSTA_OFFSET_INB_CTRL, reg_data); if (ret_val) return ret_val; ret_val = e1000_copper_link_setup_gg82563_80003es2lan(hw); if (ret_val) return ret_val; return e1000e_setup_copper_link(hw); } /** * e1000_cfg_on_link_up_80003es2lan - es2 link configuration after link-up * @hw: pointer to the HW structure * * Configure the KMRN interface by applying last minute quirks for * 10/100 operation. **/ static s32 e1000_cfg_on_link_up_80003es2lan(struct e1000_hw *hw) { s32 ret_val = 0; u16 speed; u16 duplex; if (hw->phy.media_type == e1000_media_type_copper) { ret_val = e1000e_get_speed_and_duplex_copper(hw, &speed, &duplex); if (ret_val) return ret_val; if (speed == SPEED_1000) ret_val = e1000_cfg_kmrn_1000_80003es2lan(hw); else ret_val = e1000_cfg_kmrn_10_100_80003es2lan(hw, duplex); } return ret_val; } /** * e1000_cfg_kmrn_10_100_80003es2lan - Apply "quirks" for 10/100 operation * @hw: pointer to the HW structure * @duplex: current duplex setting * * Configure the KMRN interface by applying last minute quirks for * 10/100 operation. **/ static s32 e1000_cfg_kmrn_10_100_80003es2lan(struct e1000_hw *hw, u16 duplex) { s32 ret_val; u32 tipg; u32 i = 0; u16 reg_data, reg_data2; reg_data = E1000_KMRNCTRLSTA_HD_CTRL_10_100_DEFAULT; ret_val = e1000_write_kmrn_reg_80003es2lan(hw, E1000_KMRNCTRLSTA_OFFSET_HD_CTRL, reg_data); if (ret_val) return ret_val; /* Configure Transmit Inter-Packet Gap */ tipg = er32(TIPG); tipg &= ~E1000_TIPG_IPGT_MASK; tipg |= DEFAULT_TIPG_IPGT_10_100_80003ES2LAN; ew32(TIPG, tipg); do { ret_val = e1e_rphy(hw, GG82563_PHY_KMRN_MODE_CTRL, &reg_data); if (ret_val) return ret_val; ret_val = e1e_rphy(hw, GG82563_PHY_KMRN_MODE_CTRL, &reg_data2); if (ret_val) return ret_val; i++; } while ((reg_data != reg_data2) && (i < GG82563_MAX_KMRN_RETRY)); if (duplex == HALF_DUPLEX) reg_data |= GG82563_KMCR_PASS_FALSE_CARRIER; else reg_data &= ~GG82563_KMCR_PASS_FALSE_CARRIER; return e1e_wphy(hw, GG82563_PHY_KMRN_MODE_CTRL, reg_data); } /** * e1000_cfg_kmrn_1000_80003es2lan - Apply "quirks" for gigabit operation * @hw: pointer to the HW structure * * Configure the KMRN interface by applying last minute quirks for * gigabit operation. **/ static s32 e1000_cfg_kmrn_1000_80003es2lan(struct e1000_hw *hw) { s32 ret_val; u16 reg_data, reg_data2; u32 tipg; u32 i = 0; reg_data = E1000_KMRNCTRLSTA_HD_CTRL_1000_DEFAULT; ret_val = e1000_write_kmrn_reg_80003es2lan(hw, E1000_KMRNCTRLSTA_OFFSET_HD_CTRL, reg_data); if (ret_val) return ret_val; /* Configure Transmit Inter-Packet Gap */ tipg = er32(TIPG); tipg &= ~E1000_TIPG_IPGT_MASK; tipg |= DEFAULT_TIPG_IPGT_1000_80003ES2LAN; ew32(TIPG, tipg); do { ret_val = e1e_rphy(hw, GG82563_PHY_KMRN_MODE_CTRL, &reg_data); if (ret_val) return ret_val; ret_val = e1e_rphy(hw, GG82563_PHY_KMRN_MODE_CTRL, &reg_data2); if (ret_val) return ret_val; i++; } while ((reg_data != reg_data2) && (i < GG82563_MAX_KMRN_RETRY)); reg_data &= ~GG82563_KMCR_PASS_FALSE_CARRIER; return e1e_wphy(hw, GG82563_PHY_KMRN_MODE_CTRL, reg_data); } /** * e1000_read_kmrn_reg_80003es2lan - Read kumeran register * @hw: pointer to the HW structure * @offset: register offset to be read * @data: pointer to the read data * * Acquire semaphore, then read the PHY register at offset * using the kumeran interface. The information retrieved is stored in data. * Release the semaphore before exiting. **/ static s32 e1000_read_kmrn_reg_80003es2lan(struct e1000_hw *hw, u32 offset, u16 *data) { u32 kmrnctrlsta; s32 ret_val; ret_val = e1000_acquire_mac_csr_80003es2lan(hw); if (ret_val) return ret_val; kmrnctrlsta = ((offset << E1000_KMRNCTRLSTA_OFFSET_SHIFT) & E1000_KMRNCTRLSTA_OFFSET) | E1000_KMRNCTRLSTA_REN; ew32(KMRNCTRLSTA, kmrnctrlsta); e1e_flush(); udelay(2); kmrnctrlsta = er32(KMRNCTRLSTA); *data = (u16)kmrnctrlsta; e1000_release_mac_csr_80003es2lan(hw); return ret_val; } /** * e1000_write_kmrn_reg_80003es2lan - Write kumeran register * @hw: pointer to the HW structure * @offset: register offset to write to * @data: data to write at register offset * * Acquire semaphore, then write the data to PHY register * at the offset using the kumeran interface. Release semaphore * before exiting. **/ static s32 e1000_write_kmrn_reg_80003es2lan(struct e1000_hw *hw, u32 offset, u16 data) { u32 kmrnctrlsta; s32 ret_val; ret_val = e1000_acquire_mac_csr_80003es2lan(hw); if (ret_val) return ret_val; kmrnctrlsta = ((offset << E1000_KMRNCTRLSTA_OFFSET_SHIFT) & E1000_KMRNCTRLSTA_OFFSET) | data; ew32(KMRNCTRLSTA, kmrnctrlsta); e1e_flush(); udelay(2); e1000_release_mac_csr_80003es2lan(hw); return ret_val; } /** * e1000_read_mac_addr_80003es2lan - Read device MAC address * @hw: pointer to the HW structure **/ static s32 e1000_read_mac_addr_80003es2lan(struct e1000_hw *hw) { s32 ret_val; /* If there's an alternate MAC address place it in RAR0 * so that it will override the Si installed default perm * address. */ ret_val = e1000_check_alt_mac_addr_generic(hw); if (ret_val) return ret_val; return e1000_read_mac_addr_generic(hw); } /** * e1000_power_down_phy_copper_80003es2lan - Remove link during PHY power down * @hw: pointer to the HW structure * * In the case of a PHY power down to save power, or to turn off link during a * driver unload, or wake on lan is not enabled, remove the link. **/ static void e1000_power_down_phy_copper_80003es2lan(struct e1000_hw *hw) { /* If the management interface is not enabled, then power down */ if (!(hw->mac.ops.check_mng_mode(hw) || hw->phy.ops.check_reset_block(hw))) e1000_power_down_phy_copper(hw); } /** * e1000_clear_hw_cntrs_80003es2lan - Clear device specific hardware counters * @hw: pointer to the HW structure * * Clears the hardware counters by reading the counter registers. **/ static void e1000_clear_hw_cntrs_80003es2lan(struct e1000_hw *hw) { e1000e_clear_hw_cntrs_base(hw); er32(PRC64); er32(PRC127); er32(PRC255); er32(PRC511); er32(PRC1023); er32(PRC1522); er32(PTC64); er32(PTC127); er32(PTC255); er32(PTC511); er32(PTC1023); er32(PTC1522); er32(ALGNERRC); er32(RXERRC); er32(TNCRS); er32(CEXTERR); er32(TSCTC); er32(TSCTFC); er32(MGTPRC); er32(MGTPDC); er32(MGTPTC); er32(IAC); er32(ICRXOC); er32(ICRXPTC); er32(ICRXATC); er32(ICTXPTC); er32(ICTXATC); er32(ICTXQEC); er32(ICTXQMTC); er32(ICRXDMTC); } static const struct e1000_mac_operations es2_mac_ops = { .read_mac_addr = e1000_read_mac_addr_80003es2lan, .id_led_init = e1000e_id_led_init_generic, .blink_led = e1000e_blink_led_generic, .check_mng_mode = e1000e_check_mng_mode_generic, /* check_for_link dependent on media type */ .cleanup_led = e1000e_cleanup_led_generic, .clear_hw_cntrs = e1000_clear_hw_cntrs_80003es2lan, .get_bus_info = e1000e_get_bus_info_pcie, .set_lan_id = e1000_set_lan_id_multi_port_pcie, .get_link_up_info = e1000_get_link_up_info_80003es2lan, .led_on = e1000e_led_on_generic, .led_off = e1000e_led_off_generic, .update_mc_addr_list = e1000e_update_mc_addr_list_generic, .write_vfta = e1000_write_vfta_generic, .clear_vfta = e1000_clear_vfta_generic, .reset_hw = e1000_reset_hw_80003es2lan, .init_hw = e1000_init_hw_80003es2lan, .setup_link = e1000e_setup_link_generic, /* setup_physical_interface dependent on media type */ .setup_led = e1000e_setup_led_generic, .config_collision_dist = e1000e_config_collision_dist_generic, .rar_set = e1000e_rar_set_generic, .rar_get_count = e1000e_rar_get_count_generic, }; static const struct e1000_phy_operations es2_phy_ops = { .acquire = e1000_acquire_phy_80003es2lan, .check_polarity = e1000_check_polarity_m88, .check_reset_block = e1000e_check_reset_block_generic, .commit = e1000e_phy_sw_reset, .force_speed_duplex = e1000_phy_force_speed_duplex_80003es2lan, .get_cfg_done = e1000_get_cfg_done_80003es2lan, .get_cable_length = e1000_get_cable_length_80003es2lan, .get_info = e1000e_get_phy_info_m88, .read_reg = e1000_read_phy_reg_gg82563_80003es2lan, .release = e1000_release_phy_80003es2lan, .reset = e1000e_phy_hw_reset_generic, .set_d0_lplu_state = NULL, .set_d3_lplu_state = e1000e_set_d3_lplu_state, .write_reg = e1000_write_phy_reg_gg82563_80003es2lan, .cfg_on_link_up = e1000_cfg_on_link_up_80003es2lan, }; static const struct e1000_nvm_operations es2_nvm_ops = { .acquire = e1000_acquire_nvm_80003es2lan, .read = e1000e_read_nvm_eerd, .release = e1000_release_nvm_80003es2lan, .reload = e1000e_reload_nvm_generic, .update = e1000e_update_nvm_checksum_generic, .valid_led_default = e1000e_valid_led_default, .validate = e1000e_validate_nvm_checksum_generic, .write = e1000_write_nvm_80003es2lan, }; const struct e1000_info e1000_es2_info = { .mac = e1000_80003es2lan, .flags = FLAG_HAS_HW_VLAN_FILTER | FLAG_HAS_JUMBO_FRAMES | FLAG_HAS_WOL | FLAG_APME_IN_CTRL3 | FLAG_HAS_CTRLEXT_ON_LOAD | FLAG_RX_NEEDS_RESTART /* errata */ | FLAG_TARC_SET_BIT_ZERO /* errata */ | FLAG_APME_CHECK_PORT_B | FLAG_DISABLE_FC_PAUSE_TIME, /* errata */ .flags2 = FLAG2_DMA_BURST, .pba = 38, .max_hw_frame_size = DEFAULT_JUMBO, .get_variants = e1000_get_variants_80003es2lan, .mac_ops = &es2_mac_ops, .phy_ops = &es2_phy_ops, .nvm_ops = &es2_nvm_ops, };
linux-master
drivers/net/ethernet/intel/e1000e/80003es2lan.c
// SPDX-License-Identifier: GPL-2.0 /* Copyright(c) 1999 - 2018 Intel Corporation. */ #include "e1000.h" /** * e1000_raise_eec_clk - Raise EEPROM clock * @hw: pointer to the HW structure * @eecd: pointer to the EEPROM * * Enable/Raise the EEPROM clock bit. **/ static void e1000_raise_eec_clk(struct e1000_hw *hw, u32 *eecd) { *eecd = *eecd | E1000_EECD_SK; ew32(EECD, *eecd); e1e_flush(); udelay(hw->nvm.delay_usec); } /** * e1000_lower_eec_clk - Lower EEPROM clock * @hw: pointer to the HW structure * @eecd: pointer to the EEPROM * * Clear/Lower the EEPROM clock bit. **/ static void e1000_lower_eec_clk(struct e1000_hw *hw, u32 *eecd) { *eecd = *eecd & ~E1000_EECD_SK; ew32(EECD, *eecd); e1e_flush(); udelay(hw->nvm.delay_usec); } /** * e1000_shift_out_eec_bits - Shift data bits our to the EEPROM * @hw: pointer to the HW structure * @data: data to send to the EEPROM * @count: number of bits to shift out * * We need to shift 'count' bits out to the EEPROM. So, the value in the * "data" parameter will be shifted out to the EEPROM one bit at a time. * In order to do this, "data" must be broken down into bits. **/ static void e1000_shift_out_eec_bits(struct e1000_hw *hw, u16 data, u16 count) { struct e1000_nvm_info *nvm = &hw->nvm; u32 eecd = er32(EECD); u32 mask; mask = BIT(count - 1); if (nvm->type == e1000_nvm_eeprom_spi) eecd |= E1000_EECD_DO; do { eecd &= ~E1000_EECD_DI; if (data & mask) eecd |= E1000_EECD_DI; ew32(EECD, eecd); e1e_flush(); udelay(nvm->delay_usec); e1000_raise_eec_clk(hw, &eecd); e1000_lower_eec_clk(hw, &eecd); mask >>= 1; } while (mask); eecd &= ~E1000_EECD_DI; ew32(EECD, eecd); } /** * e1000_shift_in_eec_bits - Shift data bits in from the EEPROM * @hw: pointer to the HW structure * @count: number of bits to shift in * * In order to read a register from the EEPROM, we need to shift 'count' bits * in from the EEPROM. Bits are "shifted in" by raising the clock input to * the EEPROM (setting the SK bit), and then reading the value of the data out * "DO" bit. During this "shifting in" process the data in "DI" bit should * always be clear. **/ static u16 e1000_shift_in_eec_bits(struct e1000_hw *hw, u16 count) { u32 eecd; u32 i; u16 data; eecd = er32(EECD); eecd &= ~(E1000_EECD_DO | E1000_EECD_DI); data = 0; for (i = 0; i < count; i++) { data <<= 1; e1000_raise_eec_clk(hw, &eecd); eecd = er32(EECD); eecd &= ~E1000_EECD_DI; if (eecd & E1000_EECD_DO) data |= 1; e1000_lower_eec_clk(hw, &eecd); } return data; } /** * e1000e_poll_eerd_eewr_done - Poll for EEPROM read/write completion * @hw: pointer to the HW structure * @ee_reg: EEPROM flag for polling * * Polls the EEPROM status bit for either read or write completion based * upon the value of 'ee_reg'. **/ s32 e1000e_poll_eerd_eewr_done(struct e1000_hw *hw, int ee_reg) { u32 attempts = 100000; u32 i, reg = 0; for (i = 0; i < attempts; i++) { if (ee_reg == E1000_NVM_POLL_READ) reg = er32(EERD); else reg = er32(EEWR); if (reg & E1000_NVM_RW_REG_DONE) return 0; udelay(5); } return -E1000_ERR_NVM; } /** * e1000e_acquire_nvm - Generic request for access to EEPROM * @hw: pointer to the HW structure * * Set the EEPROM access request bit and wait for EEPROM access grant bit. * Return successful if access grant bit set, else clear the request for * EEPROM access and return -E1000_ERR_NVM (-1). **/ s32 e1000e_acquire_nvm(struct e1000_hw *hw) { u32 eecd = er32(EECD); s32 timeout = E1000_NVM_GRANT_ATTEMPTS; ew32(EECD, eecd | E1000_EECD_REQ); eecd = er32(EECD); while (timeout) { if (eecd & E1000_EECD_GNT) break; udelay(5); eecd = er32(EECD); timeout--; } if (!timeout) { eecd &= ~E1000_EECD_REQ; ew32(EECD, eecd); e_dbg("Could not acquire NVM grant\n"); return -E1000_ERR_NVM; } return 0; } /** * e1000_standby_nvm - Return EEPROM to standby state * @hw: pointer to the HW structure * * Return the EEPROM to a standby state. **/ static void e1000_standby_nvm(struct e1000_hw *hw) { struct e1000_nvm_info *nvm = &hw->nvm; u32 eecd = er32(EECD); if (nvm->type == e1000_nvm_eeprom_spi) { /* Toggle CS to flush commands */ eecd |= E1000_EECD_CS; ew32(EECD, eecd); e1e_flush(); udelay(nvm->delay_usec); eecd &= ~E1000_EECD_CS; ew32(EECD, eecd); e1e_flush(); udelay(nvm->delay_usec); } } /** * e1000_stop_nvm - Terminate EEPROM command * @hw: pointer to the HW structure * * Terminates the current command by inverting the EEPROM's chip select pin. **/ static void e1000_stop_nvm(struct e1000_hw *hw) { u32 eecd; eecd = er32(EECD); if (hw->nvm.type == e1000_nvm_eeprom_spi) { /* Pull CS high */ eecd |= E1000_EECD_CS; e1000_lower_eec_clk(hw, &eecd); } } /** * e1000e_release_nvm - Release exclusive access to EEPROM * @hw: pointer to the HW structure * * Stop any current commands to the EEPROM and clear the EEPROM request bit. **/ void e1000e_release_nvm(struct e1000_hw *hw) { u32 eecd; e1000_stop_nvm(hw); eecd = er32(EECD); eecd &= ~E1000_EECD_REQ; ew32(EECD, eecd); } /** * e1000_ready_nvm_eeprom - Prepares EEPROM for read/write * @hw: pointer to the HW structure * * Setups the EEPROM for reading and writing. **/ static s32 e1000_ready_nvm_eeprom(struct e1000_hw *hw) { struct e1000_nvm_info *nvm = &hw->nvm; u32 eecd = er32(EECD); u8 spi_stat_reg; if (nvm->type == e1000_nvm_eeprom_spi) { u16 timeout = NVM_MAX_RETRY_SPI; /* Clear SK and CS */ eecd &= ~(E1000_EECD_CS | E1000_EECD_SK); ew32(EECD, eecd); e1e_flush(); udelay(1); /* Read "Status Register" repeatedly until the LSB is cleared. * The EEPROM will signal that the command has been completed * by clearing bit 0 of the internal status register. If it's * not cleared within 'timeout', then error out. */ while (timeout) { e1000_shift_out_eec_bits(hw, NVM_RDSR_OPCODE_SPI, hw->nvm.opcode_bits); spi_stat_reg = (u8)e1000_shift_in_eec_bits(hw, 8); if (!(spi_stat_reg & NVM_STATUS_RDY_SPI)) break; udelay(5); e1000_standby_nvm(hw); timeout--; } if (!timeout) { e_dbg("SPI NVM Status error\n"); return -E1000_ERR_NVM; } } return 0; } /** * e1000e_read_nvm_eerd - Reads EEPROM using EERD register * @hw: pointer to the HW structure * @offset: offset of word in the EEPROM to read * @words: number of words to read * @data: word read from the EEPROM * * Reads a 16 bit word from the EEPROM using the EERD register. **/ s32 e1000e_read_nvm_eerd(struct e1000_hw *hw, u16 offset, u16 words, u16 *data) { struct e1000_nvm_info *nvm = &hw->nvm; u32 i, eerd = 0; s32 ret_val = 0; /* A check for invalid values: offset too large, too many words, * too many words for the offset, and not enough words. */ if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) || (words == 0)) { e_dbg("nvm parameter(s) out of bounds\n"); return -E1000_ERR_NVM; } for (i = 0; i < words; i++) { eerd = ((offset + i) << E1000_NVM_RW_ADDR_SHIFT) + E1000_NVM_RW_REG_START; ew32(EERD, eerd); ret_val = e1000e_poll_eerd_eewr_done(hw, E1000_NVM_POLL_READ); if (ret_val) { e_dbg("NVM read error: %d\n", ret_val); break; } data[i] = (er32(EERD) >> E1000_NVM_RW_REG_DATA); } return ret_val; } /** * e1000e_write_nvm_spi - Write to EEPROM using SPI * @hw: pointer to the HW structure * @offset: offset within the EEPROM to be written to * @words: number of words to write * @data: 16 bit word(s) to be written to the EEPROM * * Writes data to EEPROM at offset using SPI interface. * * If e1000e_update_nvm_checksum is not called after this function , the * EEPROM will most likely contain an invalid checksum. **/ s32 e1000e_write_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data) { struct e1000_nvm_info *nvm = &hw->nvm; s32 ret_val = -E1000_ERR_NVM; u16 widx = 0; /* A check for invalid values: offset too large, too many words, * and not enough words. */ if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) || (words == 0)) { e_dbg("nvm parameter(s) out of bounds\n"); return -E1000_ERR_NVM; } while (widx < words) { u8 write_opcode = NVM_WRITE_OPCODE_SPI; ret_val = nvm->ops.acquire(hw); if (ret_val) return ret_val; ret_val = e1000_ready_nvm_eeprom(hw); if (ret_val) { nvm->ops.release(hw); return ret_val; } e1000_standby_nvm(hw); /* Send the WRITE ENABLE command (8 bit opcode) */ e1000_shift_out_eec_bits(hw, NVM_WREN_OPCODE_SPI, nvm->opcode_bits); e1000_standby_nvm(hw); /* Some SPI eeproms use the 8th address bit embedded in the * opcode */ if ((nvm->address_bits == 8) && (offset >= 128)) write_opcode |= NVM_A8_OPCODE_SPI; /* Send the Write command (8-bit opcode + addr) */ e1000_shift_out_eec_bits(hw, write_opcode, nvm->opcode_bits); e1000_shift_out_eec_bits(hw, (u16)((offset + widx) * 2), nvm->address_bits); /* Loop to allow for up to whole page write of eeprom */ while (widx < words) { u16 word_out = data[widx]; word_out = (word_out >> 8) | (word_out << 8); e1000_shift_out_eec_bits(hw, word_out, 16); widx++; if ((((offset + widx) * 2) % nvm->page_size) == 0) { e1000_standby_nvm(hw); break; } } usleep_range(10000, 11000); nvm->ops.release(hw); } return ret_val; } /** * e1000_read_pba_string_generic - Read device part number * @hw: pointer to the HW structure * @pba_num: pointer to device part number * @pba_num_size: size of part number buffer * * Reads the product board assembly (PBA) number from the EEPROM and stores * the value in pba_num. **/ s32 e1000_read_pba_string_generic(struct e1000_hw *hw, u8 *pba_num, u32 pba_num_size) { s32 ret_val; u16 nvm_data; u16 pba_ptr; u16 offset; u16 length; if (pba_num == NULL) { e_dbg("PBA string buffer was null\n"); return -E1000_ERR_INVALID_ARGUMENT; } ret_val = e1000_read_nvm(hw, NVM_PBA_OFFSET_0, 1, &nvm_data); if (ret_val) { e_dbg("NVM Read Error\n"); return ret_val; } ret_val = e1000_read_nvm(hw, NVM_PBA_OFFSET_1, 1, &pba_ptr); if (ret_val) { e_dbg("NVM Read Error\n"); return ret_val; } /* if nvm_data is not ptr guard the PBA must be in legacy format which * means pba_ptr is actually our second data word for the PBA number * and we can decode it into an ascii string */ if (nvm_data != NVM_PBA_PTR_GUARD) { e_dbg("NVM PBA number is not stored as string\n"); /* make sure callers buffer is big enough to store the PBA */ if (pba_num_size < E1000_PBANUM_LENGTH) { e_dbg("PBA string buffer too small\n"); return E1000_ERR_NO_SPACE; } /* extract hex string from data and pba_ptr */ pba_num[0] = (nvm_data >> 12) & 0xF; pba_num[1] = (nvm_data >> 8) & 0xF; pba_num[2] = (nvm_data >> 4) & 0xF; pba_num[3] = nvm_data & 0xF; pba_num[4] = (pba_ptr >> 12) & 0xF; pba_num[5] = (pba_ptr >> 8) & 0xF; pba_num[6] = '-'; pba_num[7] = 0; pba_num[8] = (pba_ptr >> 4) & 0xF; pba_num[9] = pba_ptr & 0xF; /* put a null character on the end of our string */ pba_num[10] = '\0'; /* switch all the data but the '-' to hex char */ for (offset = 0; offset < 10; offset++) { if (pba_num[offset] < 0xA) pba_num[offset] += '0'; else if (pba_num[offset] < 0x10) pba_num[offset] += 'A' - 0xA; } return 0; } ret_val = e1000_read_nvm(hw, pba_ptr, 1, &length); if (ret_val) { e_dbg("NVM Read Error\n"); return ret_val; } if (length == 0xFFFF || length == 0) { e_dbg("NVM PBA number section invalid length\n"); return -E1000_ERR_NVM_PBA_SECTION; } /* check if pba_num buffer is big enough */ if (pba_num_size < (((u32)length * 2) - 1)) { e_dbg("PBA string buffer too small\n"); return -E1000_ERR_NO_SPACE; } /* trim pba length from start of string */ pba_ptr++; length--; for (offset = 0; offset < length; offset++) { ret_val = e1000_read_nvm(hw, pba_ptr + offset, 1, &nvm_data); if (ret_val) { e_dbg("NVM Read Error\n"); return ret_val; } pba_num[offset * 2] = (u8)(nvm_data >> 8); pba_num[(offset * 2) + 1] = (u8)(nvm_data & 0xFF); } pba_num[offset * 2] = '\0'; return 0; } /** * e1000_read_mac_addr_generic - Read device MAC address * @hw: pointer to the HW structure * * Reads the device MAC address from the EEPROM and stores the value. * Since devices with two ports use the same EEPROM, we increment the * last bit in the MAC address for the second port. **/ s32 e1000_read_mac_addr_generic(struct e1000_hw *hw) { u32 rar_high; u32 rar_low; u16 i; rar_high = er32(RAH(0)); rar_low = er32(RAL(0)); for (i = 0; i < E1000_RAL_MAC_ADDR_LEN; i++) hw->mac.perm_addr[i] = (u8)(rar_low >> (i * 8)); for (i = 0; i < E1000_RAH_MAC_ADDR_LEN; i++) hw->mac.perm_addr[i + 4] = (u8)(rar_high >> (i * 8)); for (i = 0; i < ETH_ALEN; i++) hw->mac.addr[i] = hw->mac.perm_addr[i]; return 0; } /** * e1000e_validate_nvm_checksum_generic - Validate EEPROM checksum * @hw: pointer to the HW structure * * Calculates the EEPROM checksum by reading/adding each word of the EEPROM * and then verifies that the sum of the EEPROM is equal to 0xBABA. **/ s32 e1000e_validate_nvm_checksum_generic(struct e1000_hw *hw) { s32 ret_val; u16 checksum = 0; u16 i, nvm_data; for (i = 0; i < (NVM_CHECKSUM_REG + 1); i++) { ret_val = e1000_read_nvm(hw, i, 1, &nvm_data); if (ret_val) { e_dbg("NVM Read Error\n"); return ret_val; } checksum += nvm_data; } if (checksum != (u16)NVM_SUM) { e_dbg("NVM Checksum Invalid\n"); return -E1000_ERR_NVM; } return 0; } /** * e1000e_update_nvm_checksum_generic - Update EEPROM checksum * @hw: pointer to the HW structure * * Updates the EEPROM checksum by reading/adding each word of the EEPROM * up to the checksum. Then calculates the EEPROM checksum and writes the * value to the EEPROM. **/ s32 e1000e_update_nvm_checksum_generic(struct e1000_hw *hw) { s32 ret_val; u16 checksum = 0; u16 i, nvm_data; for (i = 0; i < NVM_CHECKSUM_REG; i++) { ret_val = e1000_read_nvm(hw, i, 1, &nvm_data); if (ret_val) { e_dbg("NVM Read Error while updating checksum.\n"); return ret_val; } checksum += nvm_data; } checksum = (u16)NVM_SUM - checksum; ret_val = e1000_write_nvm(hw, NVM_CHECKSUM_REG, 1, &checksum); if (ret_val) e_dbg("NVM Write Error while updating checksum.\n"); return ret_val; } /** * e1000e_reload_nvm_generic - Reloads EEPROM * @hw: pointer to the HW structure * * Reloads the EEPROM by setting the "Reinitialize from EEPROM" bit in the * extended control register. **/ void e1000e_reload_nvm_generic(struct e1000_hw *hw) { u32 ctrl_ext; usleep_range(10, 20); ctrl_ext = er32(CTRL_EXT); ctrl_ext |= E1000_CTRL_EXT_EE_RST; ew32(CTRL_EXT, ctrl_ext); e1e_flush(); }
linux-master
drivers/net/ethernet/intel/e1000e/nvm.c
// SPDX-License-Identifier: GPL-2.0 /* Copyright(c) 1999 - 2018 Intel Corporation. */ /* 82562G 10/100 Network Connection * 82562G-2 10/100 Network Connection * 82562GT 10/100 Network Connection * 82562GT-2 10/100 Network Connection * 82562V 10/100 Network Connection * 82562V-2 10/100 Network Connection * 82566DC-2 Gigabit Network Connection * 82566DC Gigabit Network Connection * 82566DM-2 Gigabit Network Connection * 82566DM Gigabit Network Connection * 82566MC Gigabit Network Connection * 82566MM Gigabit Network Connection * 82567LM Gigabit Network Connection * 82567LF Gigabit Network Connection * 82567V Gigabit Network Connection * 82567LM-2 Gigabit Network Connection * 82567LF-2 Gigabit Network Connection * 82567V-2 Gigabit Network Connection * 82567LF-3 Gigabit Network Connection * 82567LM-3 Gigabit Network Connection * 82567LM-4 Gigabit Network Connection * 82577LM Gigabit Network Connection * 82577LC Gigabit Network Connection * 82578DM Gigabit Network Connection * 82578DC Gigabit Network Connection * 82579LM Gigabit Network Connection * 82579V Gigabit Network Connection * Ethernet Connection I217-LM * Ethernet Connection I217-V * Ethernet Connection I218-V * Ethernet Connection I218-LM * Ethernet Connection (2) I218-LM * Ethernet Connection (2) I218-V * Ethernet Connection (3) I218-LM * Ethernet Connection (3) I218-V */ #include "e1000.h" /* ICH GbE Flash Hardware Sequencing Flash Status Register bit breakdown */ /* Offset 04h HSFSTS */ union ich8_hws_flash_status { struct ich8_hsfsts { u16 flcdone:1; /* bit 0 Flash Cycle Done */ u16 flcerr:1; /* bit 1 Flash Cycle Error */ u16 dael:1; /* bit 2 Direct Access error Log */ u16 berasesz:2; /* bit 4:3 Sector Erase Size */ u16 flcinprog:1; /* bit 5 flash cycle in Progress */ u16 reserved1:2; /* bit 13:6 Reserved */ u16 reserved2:6; /* bit 13:6 Reserved */ u16 fldesvalid:1; /* bit 14 Flash Descriptor Valid */ u16 flockdn:1; /* bit 15 Flash Config Lock-Down */ } hsf_status; u16 regval; }; /* ICH GbE Flash Hardware Sequencing Flash control Register bit breakdown */ /* Offset 06h FLCTL */ union ich8_hws_flash_ctrl { struct ich8_hsflctl { u16 flcgo:1; /* 0 Flash Cycle Go */ u16 flcycle:2; /* 2:1 Flash Cycle */ u16 reserved:5; /* 7:3 Reserved */ u16 fldbcount:2; /* 9:8 Flash Data Byte Count */ u16 flockdn:6; /* 15:10 Reserved */ } hsf_ctrl; u16 regval; }; /* ICH Flash Region Access Permissions */ union ich8_hws_flash_regacc { struct ich8_flracc { u32 grra:8; /* 0:7 GbE region Read Access */ u32 grwa:8; /* 8:15 GbE region Write Access */ u32 gmrag:8; /* 23:16 GbE Master Read Access Grant */ u32 gmwag:8; /* 31:24 GbE Master Write Access Grant */ } hsf_flregacc; u16 regval; }; /* ICH Flash Protected Region */ union ich8_flash_protected_range { struct ich8_pr { u32 base:13; /* 0:12 Protected Range Base */ u32 reserved1:2; /* 13:14 Reserved */ u32 rpe:1; /* 15 Read Protection Enable */ u32 limit:13; /* 16:28 Protected Range Limit */ u32 reserved2:2; /* 29:30 Reserved */ u32 wpe:1; /* 31 Write Protection Enable */ } range; u32 regval; }; static void e1000_clear_hw_cntrs_ich8lan(struct e1000_hw *hw); static void e1000_initialize_hw_bits_ich8lan(struct e1000_hw *hw); static s32 e1000_erase_flash_bank_ich8lan(struct e1000_hw *hw, u32 bank); static s32 e1000_retry_write_flash_byte_ich8lan(struct e1000_hw *hw, u32 offset, u8 byte); static s32 e1000_read_flash_byte_ich8lan(struct e1000_hw *hw, u32 offset, u8 *data); static s32 e1000_read_flash_word_ich8lan(struct e1000_hw *hw, u32 offset, u16 *data); static s32 e1000_read_flash_data_ich8lan(struct e1000_hw *hw, u32 offset, u8 size, u16 *data); static s32 e1000_read_flash_data32_ich8lan(struct e1000_hw *hw, u32 offset, u32 *data); static s32 e1000_read_flash_dword_ich8lan(struct e1000_hw *hw, u32 offset, u32 *data); static s32 e1000_write_flash_data32_ich8lan(struct e1000_hw *hw, u32 offset, u32 data); static s32 e1000_retry_write_flash_dword_ich8lan(struct e1000_hw *hw, u32 offset, u32 dword); static s32 e1000_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw); static s32 e1000_cleanup_led_ich8lan(struct e1000_hw *hw); static s32 e1000_led_on_ich8lan(struct e1000_hw *hw); static s32 e1000_led_off_ich8lan(struct e1000_hw *hw); static s32 e1000_id_led_init_pchlan(struct e1000_hw *hw); static s32 e1000_setup_led_pchlan(struct e1000_hw *hw); static s32 e1000_cleanup_led_pchlan(struct e1000_hw *hw); static s32 e1000_led_on_pchlan(struct e1000_hw *hw); static s32 e1000_led_off_pchlan(struct e1000_hw *hw); static s32 e1000_set_lplu_state_pchlan(struct e1000_hw *hw, bool active); static void e1000_power_down_phy_copper_ich8lan(struct e1000_hw *hw); static void e1000_lan_init_done_ich8lan(struct e1000_hw *hw); static s32 e1000_k1_gig_workaround_hv(struct e1000_hw *hw, bool link); static s32 e1000_set_mdio_slow_mode_hv(struct e1000_hw *hw); static bool e1000_check_mng_mode_ich8lan(struct e1000_hw *hw); static bool e1000_check_mng_mode_pchlan(struct e1000_hw *hw); static int e1000_rar_set_pch2lan(struct e1000_hw *hw, u8 *addr, u32 index); static int e1000_rar_set_pch_lpt(struct e1000_hw *hw, u8 *addr, u32 index); static u32 e1000_rar_get_count_pch_lpt(struct e1000_hw *hw); static s32 e1000_k1_workaround_lv(struct e1000_hw *hw); static void e1000_gate_hw_phy_config_ich8lan(struct e1000_hw *hw, bool gate); static s32 e1000_disable_ulp_lpt_lp(struct e1000_hw *hw, bool force); static s32 e1000_setup_copper_link_pch_lpt(struct e1000_hw *hw); static s32 e1000_oem_bits_config_ich8lan(struct e1000_hw *hw, bool d0_state); static inline u16 __er16flash(struct e1000_hw *hw, unsigned long reg) { return readw(hw->flash_address + reg); } static inline u32 __er32flash(struct e1000_hw *hw, unsigned long reg) { return readl(hw->flash_address + reg); } static inline void __ew16flash(struct e1000_hw *hw, unsigned long reg, u16 val) { writew(val, hw->flash_address + reg); } static inline void __ew32flash(struct e1000_hw *hw, unsigned long reg, u32 val) { writel(val, hw->flash_address + reg); } #define er16flash(reg) __er16flash(hw, (reg)) #define er32flash(reg) __er32flash(hw, (reg)) #define ew16flash(reg, val) __ew16flash(hw, (reg), (val)) #define ew32flash(reg, val) __ew32flash(hw, (reg), (val)) /** * e1000_phy_is_accessible_pchlan - Check if able to access PHY registers * @hw: pointer to the HW structure * * Test access to the PHY registers by reading the PHY ID registers. If * the PHY ID is already known (e.g. resume path) compare it with known ID, * otherwise assume the read PHY ID is correct if it is valid. * * Assumes the sw/fw/hw semaphore is already acquired. **/ static bool e1000_phy_is_accessible_pchlan(struct e1000_hw *hw) { u16 phy_reg = 0; u32 phy_id = 0; s32 ret_val = 0; u16 retry_count; u32 mac_reg = 0; for (retry_count = 0; retry_count < 2; retry_count++) { ret_val = e1e_rphy_locked(hw, MII_PHYSID1, &phy_reg); if (ret_val || (phy_reg == 0xFFFF)) continue; phy_id = (u32)(phy_reg << 16); ret_val = e1e_rphy_locked(hw, MII_PHYSID2, &phy_reg); if (ret_val || (phy_reg == 0xFFFF)) { phy_id = 0; continue; } phy_id |= (u32)(phy_reg & PHY_REVISION_MASK); break; } if (hw->phy.id) { if (hw->phy.id == phy_id) goto out; } else if (phy_id) { hw->phy.id = phy_id; hw->phy.revision = (u32)(phy_reg & ~PHY_REVISION_MASK); goto out; } /* In case the PHY needs to be in mdio slow mode, * set slow mode and try to get the PHY id again. */ if (hw->mac.type < e1000_pch_lpt) { hw->phy.ops.release(hw); ret_val = e1000_set_mdio_slow_mode_hv(hw); if (!ret_val) ret_val = e1000e_get_phy_id(hw); hw->phy.ops.acquire(hw); } if (ret_val) return false; out: if (hw->mac.type >= e1000_pch_lpt) { /* Only unforce SMBus if ME is not active */ if (!(er32(FWSM) & E1000_ICH_FWSM_FW_VALID)) { /* Unforce SMBus mode in PHY */ e1e_rphy_locked(hw, CV_SMB_CTRL, &phy_reg); phy_reg &= ~CV_SMB_CTRL_FORCE_SMBUS; e1e_wphy_locked(hw, CV_SMB_CTRL, phy_reg); /* Unforce SMBus mode in MAC */ mac_reg = er32(CTRL_EXT); mac_reg &= ~E1000_CTRL_EXT_FORCE_SMBUS; ew32(CTRL_EXT, mac_reg); } } return true; } /** * e1000_toggle_lanphypc_pch_lpt - toggle the LANPHYPC pin value * @hw: pointer to the HW structure * * Toggling the LANPHYPC pin value fully power-cycles the PHY and is * used to reset the PHY to a quiescent state when necessary. **/ static void e1000_toggle_lanphypc_pch_lpt(struct e1000_hw *hw) { u32 mac_reg; /* Set Phy Config Counter to 50msec */ mac_reg = er32(FEXTNVM3); mac_reg &= ~E1000_FEXTNVM3_PHY_CFG_COUNTER_MASK; mac_reg |= E1000_FEXTNVM3_PHY_CFG_COUNTER_50MSEC; ew32(FEXTNVM3, mac_reg); /* Toggle LANPHYPC Value bit */ mac_reg = er32(CTRL); mac_reg |= E1000_CTRL_LANPHYPC_OVERRIDE; mac_reg &= ~E1000_CTRL_LANPHYPC_VALUE; ew32(CTRL, mac_reg); e1e_flush(); usleep_range(10, 20); mac_reg &= ~E1000_CTRL_LANPHYPC_OVERRIDE; ew32(CTRL, mac_reg); e1e_flush(); if (hw->mac.type < e1000_pch_lpt) { msleep(50); } else { u16 count = 20; do { usleep_range(5000, 6000); } while (!(er32(CTRL_EXT) & E1000_CTRL_EXT_LPCD) && count--); msleep(30); } } /** * e1000_init_phy_workarounds_pchlan - PHY initialization workarounds * @hw: pointer to the HW structure * * Workarounds/flow necessary for PHY initialization during driver load * and resume paths. **/ static s32 e1000_init_phy_workarounds_pchlan(struct e1000_hw *hw) { struct e1000_adapter *adapter = hw->adapter; u32 mac_reg, fwsm = er32(FWSM); s32 ret_val; /* Gate automatic PHY configuration by hardware on managed and * non-managed 82579 and newer adapters. */ e1000_gate_hw_phy_config_ich8lan(hw, true); /* It is not possible to be certain of the current state of ULP * so forcibly disable it. */ hw->dev_spec.ich8lan.ulp_state = e1000_ulp_state_unknown; ret_val = e1000_disable_ulp_lpt_lp(hw, true); if (ret_val) e_warn("Failed to disable ULP\n"); ret_val = hw->phy.ops.acquire(hw); if (ret_val) { e_dbg("Failed to initialize PHY flow\n"); goto out; } /* The MAC-PHY interconnect may be in SMBus mode. If the PHY is * inaccessible and resetting the PHY is not blocked, toggle the * LANPHYPC Value bit to force the interconnect to PCIe mode. */ switch (hw->mac.type) { case e1000_pch_lpt: case e1000_pch_spt: case e1000_pch_cnp: case e1000_pch_tgp: case e1000_pch_adp: case e1000_pch_mtp: case e1000_pch_lnp: case e1000_pch_ptp: case e1000_pch_nvp: if (e1000_phy_is_accessible_pchlan(hw)) break; /* Before toggling LANPHYPC, see if PHY is accessible by * forcing MAC to SMBus mode first. */ mac_reg = er32(CTRL_EXT); mac_reg |= E1000_CTRL_EXT_FORCE_SMBUS; ew32(CTRL_EXT, mac_reg); /* Wait 50 milliseconds for MAC to finish any retries * that it might be trying to perform from previous * attempts to acknowledge any phy read requests. */ msleep(50); fallthrough; case e1000_pch2lan: if (e1000_phy_is_accessible_pchlan(hw)) break; fallthrough; case e1000_pchlan: if ((hw->mac.type == e1000_pchlan) && (fwsm & E1000_ICH_FWSM_FW_VALID)) break; if (hw->phy.ops.check_reset_block(hw)) { e_dbg("Required LANPHYPC toggle blocked by ME\n"); ret_val = -E1000_ERR_PHY; break; } /* Toggle LANPHYPC Value bit */ e1000_toggle_lanphypc_pch_lpt(hw); if (hw->mac.type >= e1000_pch_lpt) { if (e1000_phy_is_accessible_pchlan(hw)) break; /* Toggling LANPHYPC brings the PHY out of SMBus mode * so ensure that the MAC is also out of SMBus mode */ mac_reg = er32(CTRL_EXT); mac_reg &= ~E1000_CTRL_EXT_FORCE_SMBUS; ew32(CTRL_EXT, mac_reg); if (e1000_phy_is_accessible_pchlan(hw)) break; ret_val = -E1000_ERR_PHY; } break; default: break; } hw->phy.ops.release(hw); if (!ret_val) { /* Check to see if able to reset PHY. Print error if not */ if (hw->phy.ops.check_reset_block(hw)) { e_err("Reset blocked by ME\n"); goto out; } /* Reset the PHY before any access to it. Doing so, ensures * that the PHY is in a known good state before we read/write * PHY registers. The generic reset is sufficient here, * because we haven't determined the PHY type yet. */ ret_val = e1000e_phy_hw_reset_generic(hw); if (ret_val) goto out; /* On a successful reset, possibly need to wait for the PHY * to quiesce to an accessible state before returning control * to the calling function. If the PHY does not quiesce, then * return E1000E_BLK_PHY_RESET, as this is the condition that * the PHY is in. */ ret_val = hw->phy.ops.check_reset_block(hw); if (ret_val) e_err("ME blocked access to PHY after reset\n"); } out: /* Ungate automatic PHY configuration on non-managed 82579 */ if ((hw->mac.type == e1000_pch2lan) && !(fwsm & E1000_ICH_FWSM_FW_VALID)) { usleep_range(10000, 11000); e1000_gate_hw_phy_config_ich8lan(hw, false); } return ret_val; } /** * e1000_init_phy_params_pchlan - Initialize PHY function pointers * @hw: pointer to the HW structure * * Initialize family-specific PHY parameters and function pointers. **/ static s32 e1000_init_phy_params_pchlan(struct e1000_hw *hw) { struct e1000_phy_info *phy = &hw->phy; s32 ret_val; phy->addr = 1; phy->reset_delay_us = 100; phy->ops.set_page = e1000_set_page_igp; phy->ops.read_reg = e1000_read_phy_reg_hv; phy->ops.read_reg_locked = e1000_read_phy_reg_hv_locked; phy->ops.read_reg_page = e1000_read_phy_reg_page_hv; phy->ops.set_d0_lplu_state = e1000_set_lplu_state_pchlan; phy->ops.set_d3_lplu_state = e1000_set_lplu_state_pchlan; phy->ops.write_reg = e1000_write_phy_reg_hv; phy->ops.write_reg_locked = e1000_write_phy_reg_hv_locked; phy->ops.write_reg_page = e1000_write_phy_reg_page_hv; phy->ops.power_up = e1000_power_up_phy_copper; phy->ops.power_down = e1000_power_down_phy_copper_ich8lan; phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT; phy->id = e1000_phy_unknown; ret_val = e1000_init_phy_workarounds_pchlan(hw); if (ret_val) return ret_val; if (phy->id == e1000_phy_unknown) switch (hw->mac.type) { default: ret_val = e1000e_get_phy_id(hw); if (ret_val) return ret_val; if ((phy->id != 0) && (phy->id != PHY_REVISION_MASK)) break; fallthrough; case e1000_pch2lan: case e1000_pch_lpt: case e1000_pch_spt: case e1000_pch_cnp: case e1000_pch_tgp: case e1000_pch_adp: case e1000_pch_mtp: case e1000_pch_lnp: case e1000_pch_ptp: case e1000_pch_nvp: /* In case the PHY needs to be in mdio slow mode, * set slow mode and try to get the PHY id again. */ ret_val = e1000_set_mdio_slow_mode_hv(hw); if (ret_val) return ret_val; ret_val = e1000e_get_phy_id(hw); if (ret_val) return ret_val; break; } phy->type = e1000e_get_phy_type_from_id(phy->id); switch (phy->type) { case e1000_phy_82577: case e1000_phy_82579: case e1000_phy_i217: phy->ops.check_polarity = e1000_check_polarity_82577; phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_82577; phy->ops.get_cable_length = e1000_get_cable_length_82577; phy->ops.get_info = e1000_get_phy_info_82577; phy->ops.commit = e1000e_phy_sw_reset; break; case e1000_phy_82578: phy->ops.check_polarity = e1000_check_polarity_m88; phy->ops.force_speed_duplex = e1000e_phy_force_speed_duplex_m88; phy->ops.get_cable_length = e1000e_get_cable_length_m88; phy->ops.get_info = e1000e_get_phy_info_m88; break; default: ret_val = -E1000_ERR_PHY; break; } return ret_val; } /** * e1000_init_phy_params_ich8lan - Initialize PHY function pointers * @hw: pointer to the HW structure * * Initialize family-specific PHY parameters and function pointers. **/ static s32 e1000_init_phy_params_ich8lan(struct e1000_hw *hw) { struct e1000_phy_info *phy = &hw->phy; s32 ret_val; u16 i = 0; phy->addr = 1; phy->reset_delay_us = 100; phy->ops.power_up = e1000_power_up_phy_copper; phy->ops.power_down = e1000_power_down_phy_copper_ich8lan; /* We may need to do this twice - once for IGP and if that fails, * we'll set BM func pointers and try again */ ret_val = e1000e_determine_phy_address(hw); if (ret_val) { phy->ops.write_reg = e1000e_write_phy_reg_bm; phy->ops.read_reg = e1000e_read_phy_reg_bm; ret_val = e1000e_determine_phy_address(hw); if (ret_val) { e_dbg("Cannot determine PHY addr. Erroring out\n"); return ret_val; } } phy->id = 0; while ((e1000_phy_unknown == e1000e_get_phy_type_from_id(phy->id)) && (i++ < 100)) { usleep_range(1000, 1100); ret_val = e1000e_get_phy_id(hw); if (ret_val) return ret_val; } /* Verify phy id */ switch (phy->id) { case IGP03E1000_E_PHY_ID: phy->type = e1000_phy_igp_3; phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT; phy->ops.read_reg_locked = e1000e_read_phy_reg_igp_locked; phy->ops.write_reg_locked = e1000e_write_phy_reg_igp_locked; phy->ops.get_info = e1000e_get_phy_info_igp; phy->ops.check_polarity = e1000_check_polarity_igp; phy->ops.force_speed_duplex = e1000e_phy_force_speed_duplex_igp; break; case IFE_E_PHY_ID: case IFE_PLUS_E_PHY_ID: case IFE_C_E_PHY_ID: phy->type = e1000_phy_ife; phy->autoneg_mask = E1000_ALL_NOT_GIG; phy->ops.get_info = e1000_get_phy_info_ife; phy->ops.check_polarity = e1000_check_polarity_ife; phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_ife; break; case BME1000_E_PHY_ID: phy->type = e1000_phy_bm; phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT; phy->ops.read_reg = e1000e_read_phy_reg_bm; phy->ops.write_reg = e1000e_write_phy_reg_bm; phy->ops.commit = e1000e_phy_sw_reset; phy->ops.get_info = e1000e_get_phy_info_m88; phy->ops.check_polarity = e1000_check_polarity_m88; phy->ops.force_speed_duplex = e1000e_phy_force_speed_duplex_m88; break; default: return -E1000_ERR_PHY; } return 0; } /** * e1000_init_nvm_params_ich8lan - Initialize NVM function pointers * @hw: pointer to the HW structure * * Initialize family-specific NVM parameters and function * pointers. **/ static s32 e1000_init_nvm_params_ich8lan(struct e1000_hw *hw) { struct e1000_nvm_info *nvm = &hw->nvm; struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan; u32 gfpreg, sector_base_addr, sector_end_addr; u16 i; u32 nvm_size; nvm->type = e1000_nvm_flash_sw; if (hw->mac.type >= e1000_pch_spt) { /* in SPT, gfpreg doesn't exist. NVM size is taken from the * STRAP register. This is because in SPT the GbE Flash region * is no longer accessed through the flash registers. Instead, * the mechanism has changed, and the Flash region access * registers are now implemented in GbE memory space. */ nvm->flash_base_addr = 0; nvm_size = (((er32(STRAP) >> 1) & 0x1F) + 1) * NVM_SIZE_MULTIPLIER; nvm->flash_bank_size = nvm_size / 2; /* Adjust to word count */ nvm->flash_bank_size /= sizeof(u16); /* Set the base address for flash register access */ hw->flash_address = hw->hw_addr + E1000_FLASH_BASE_ADDR; } else { /* Can't read flash registers if register set isn't mapped. */ if (!hw->flash_address) { e_dbg("ERROR: Flash registers not mapped\n"); return -E1000_ERR_CONFIG; } gfpreg = er32flash(ICH_FLASH_GFPREG); /* sector_X_addr is a "sector"-aligned address (4096 bytes) * Add 1 to sector_end_addr since this sector is included in * the overall size. */ sector_base_addr = gfpreg & FLASH_GFPREG_BASE_MASK; sector_end_addr = ((gfpreg >> 16) & FLASH_GFPREG_BASE_MASK) + 1; /* flash_base_addr is byte-aligned */ nvm->flash_base_addr = sector_base_addr << FLASH_SECTOR_ADDR_SHIFT; /* find total size of the NVM, then cut in half since the total * size represents two separate NVM banks. */ nvm->flash_bank_size = ((sector_end_addr - sector_base_addr) << FLASH_SECTOR_ADDR_SHIFT); nvm->flash_bank_size /= 2; /* Adjust to word count */ nvm->flash_bank_size /= sizeof(u16); } nvm->word_size = E1000_ICH8_SHADOW_RAM_WORDS; /* Clear shadow ram */ for (i = 0; i < nvm->word_size; i++) { dev_spec->shadow_ram[i].modified = false; dev_spec->shadow_ram[i].value = 0xFFFF; } return 0; } /** * e1000_init_mac_params_ich8lan - Initialize MAC function pointers * @hw: pointer to the HW structure * * Initialize family-specific MAC parameters and function * pointers. **/ static s32 e1000_init_mac_params_ich8lan(struct e1000_hw *hw) { struct e1000_mac_info *mac = &hw->mac; /* Set media type function pointer */ hw->phy.media_type = e1000_media_type_copper; /* Set mta register count */ mac->mta_reg_count = 32; /* Set rar entry count */ mac->rar_entry_count = E1000_ICH_RAR_ENTRIES; if (mac->type == e1000_ich8lan) mac->rar_entry_count--; /* FWSM register */ mac->has_fwsm = true; /* ARC subsystem not supported */ mac->arc_subsystem_valid = false; /* Adaptive IFS supported */ mac->adaptive_ifs = true; /* LED and other operations */ switch (mac->type) { case e1000_ich8lan: case e1000_ich9lan: case e1000_ich10lan: /* check management mode */ mac->ops.check_mng_mode = e1000_check_mng_mode_ich8lan; /* ID LED init */ mac->ops.id_led_init = e1000e_id_led_init_generic; /* blink LED */ mac->ops.blink_led = e1000e_blink_led_generic; /* setup LED */ mac->ops.setup_led = e1000e_setup_led_generic; /* cleanup LED */ mac->ops.cleanup_led = e1000_cleanup_led_ich8lan; /* turn on/off LED */ mac->ops.led_on = e1000_led_on_ich8lan; mac->ops.led_off = e1000_led_off_ich8lan; break; case e1000_pch2lan: mac->rar_entry_count = E1000_PCH2_RAR_ENTRIES; mac->ops.rar_set = e1000_rar_set_pch2lan; fallthrough; case e1000_pch_lpt: case e1000_pch_spt: case e1000_pch_cnp: case e1000_pch_tgp: case e1000_pch_adp: case e1000_pch_mtp: case e1000_pch_lnp: case e1000_pch_ptp: case e1000_pch_nvp: case e1000_pchlan: /* check management mode */ mac->ops.check_mng_mode = e1000_check_mng_mode_pchlan; /* ID LED init */ mac->ops.id_led_init = e1000_id_led_init_pchlan; /* setup LED */ mac->ops.setup_led = e1000_setup_led_pchlan; /* cleanup LED */ mac->ops.cleanup_led = e1000_cleanup_led_pchlan; /* turn on/off LED */ mac->ops.led_on = e1000_led_on_pchlan; mac->ops.led_off = e1000_led_off_pchlan; break; default: break; } if (mac->type >= e1000_pch_lpt) { mac->rar_entry_count = E1000_PCH_LPT_RAR_ENTRIES; mac->ops.rar_set = e1000_rar_set_pch_lpt; mac->ops.setup_physical_interface = e1000_setup_copper_link_pch_lpt; mac->ops.rar_get_count = e1000_rar_get_count_pch_lpt; } /* Enable PCS Lock-loss workaround for ICH8 */ if (mac->type == e1000_ich8lan) e1000e_set_kmrn_lock_loss_workaround_ich8lan(hw, true); return 0; } /** * __e1000_access_emi_reg_locked - Read/write EMI register * @hw: pointer to the HW structure * @address: EMI address to program * @data: pointer to value to read/write from/to the EMI address * @read: boolean flag to indicate read or write * * This helper function assumes the SW/FW/HW Semaphore is already acquired. **/ static s32 __e1000_access_emi_reg_locked(struct e1000_hw *hw, u16 address, u16 *data, bool read) { s32 ret_val; ret_val = e1e_wphy_locked(hw, I82579_EMI_ADDR, address); if (ret_val) return ret_val; if (read) ret_val = e1e_rphy_locked(hw, I82579_EMI_DATA, data); else ret_val = e1e_wphy_locked(hw, I82579_EMI_DATA, *data); return ret_val; } /** * e1000_read_emi_reg_locked - Read Extended Management Interface register * @hw: pointer to the HW structure * @addr: EMI address to program * @data: value to be read from the EMI address * * Assumes the SW/FW/HW Semaphore is already acquired. **/ s32 e1000_read_emi_reg_locked(struct e1000_hw *hw, u16 addr, u16 *data) { return __e1000_access_emi_reg_locked(hw, addr, data, true); } /** * e1000_write_emi_reg_locked - Write Extended Management Interface register * @hw: pointer to the HW structure * @addr: EMI address to program * @data: value to be written to the EMI address * * Assumes the SW/FW/HW Semaphore is already acquired. **/ s32 e1000_write_emi_reg_locked(struct e1000_hw *hw, u16 addr, u16 data) { return __e1000_access_emi_reg_locked(hw, addr, &data, false); } /** * e1000_set_eee_pchlan - Enable/disable EEE support * @hw: pointer to the HW structure * * Enable/disable EEE based on setting in dev_spec structure, the duplex of * the link and the EEE capabilities of the link partner. The LPI Control * register bits will remain set only if/when link is up. * * EEE LPI must not be asserted earlier than one second after link is up. * On 82579, EEE LPI should not be enabled until such time otherwise there * can be link issues with some switches. Other devices can have EEE LPI * enabled immediately upon link up since they have a timer in hardware which * prevents LPI from being asserted too early. **/ s32 e1000_set_eee_pchlan(struct e1000_hw *hw) { struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan; s32 ret_val; u16 lpa, pcs_status, adv, adv_addr, lpi_ctrl, data; switch (hw->phy.type) { case e1000_phy_82579: lpa = I82579_EEE_LP_ABILITY; pcs_status = I82579_EEE_PCS_STATUS; adv_addr = I82579_EEE_ADVERTISEMENT; break; case e1000_phy_i217: lpa = I217_EEE_LP_ABILITY; pcs_status = I217_EEE_PCS_STATUS; adv_addr = I217_EEE_ADVERTISEMENT; break; default: return 0; } ret_val = hw->phy.ops.acquire(hw); if (ret_val) return ret_val; ret_val = e1e_rphy_locked(hw, I82579_LPI_CTRL, &lpi_ctrl); if (ret_val) goto release; /* Clear bits that enable EEE in various speeds */ lpi_ctrl &= ~I82579_LPI_CTRL_ENABLE_MASK; /* Enable EEE if not disabled by user */ if (!dev_spec->eee_disable) { /* Save off link partner's EEE ability */ ret_val = e1000_read_emi_reg_locked(hw, lpa, &dev_spec->eee_lp_ability); if (ret_val) goto release; /* Read EEE advertisement */ ret_val = e1000_read_emi_reg_locked(hw, adv_addr, &adv); if (ret_val) goto release; /* Enable EEE only for speeds in which the link partner is * EEE capable and for which we advertise EEE. */ if (adv & dev_spec->eee_lp_ability & I82579_EEE_1000_SUPPORTED) lpi_ctrl |= I82579_LPI_CTRL_1000_ENABLE; if (adv & dev_spec->eee_lp_ability & I82579_EEE_100_SUPPORTED) { e1e_rphy_locked(hw, MII_LPA, &data); if (data & LPA_100FULL) lpi_ctrl |= I82579_LPI_CTRL_100_ENABLE; else /* EEE is not supported in 100Half, so ignore * partner's EEE in 100 ability if full-duplex * is not advertised. */ dev_spec->eee_lp_ability &= ~I82579_EEE_100_SUPPORTED; } } if (hw->phy.type == e1000_phy_82579) { ret_val = e1000_read_emi_reg_locked(hw, I82579_LPI_PLL_SHUT, &data); if (ret_val) goto release; data &= ~I82579_LPI_100_PLL_SHUT; ret_val = e1000_write_emi_reg_locked(hw, I82579_LPI_PLL_SHUT, data); } /* R/Clr IEEE MMD 3.1 bits 11:10 - Tx/Rx LPI Received */ ret_val = e1000_read_emi_reg_locked(hw, pcs_status, &data); if (ret_val) goto release; ret_val = e1e_wphy_locked(hw, I82579_LPI_CTRL, lpi_ctrl); release: hw->phy.ops.release(hw); return ret_val; } /** * e1000_k1_workaround_lpt_lp - K1 workaround on Lynxpoint-LP * @hw: pointer to the HW structure * @link: link up bool flag * * When K1 is enabled for 1Gbps, the MAC can miss 2 DMA completion indications * preventing further DMA write requests. Workaround the issue by disabling * the de-assertion of the clock request when in 1Gpbs mode. * Also, set appropriate Tx re-transmission timeouts for 10 and 100Half link * speeds in order to avoid Tx hangs. **/ static s32 e1000_k1_workaround_lpt_lp(struct e1000_hw *hw, bool link) { u32 fextnvm6 = er32(FEXTNVM6); u32 status = er32(STATUS); s32 ret_val = 0; u16 reg; if (link && (status & E1000_STATUS_SPEED_1000)) { ret_val = hw->phy.ops.acquire(hw); if (ret_val) return ret_val; ret_val = e1000e_read_kmrn_reg_locked(hw, E1000_KMRNCTRLSTA_K1_CONFIG, &reg); if (ret_val) goto release; ret_val = e1000e_write_kmrn_reg_locked(hw, E1000_KMRNCTRLSTA_K1_CONFIG, reg & ~E1000_KMRNCTRLSTA_K1_ENABLE); if (ret_val) goto release; usleep_range(10, 20); ew32(FEXTNVM6, fextnvm6 | E1000_FEXTNVM6_REQ_PLL_CLK); ret_val = e1000e_write_kmrn_reg_locked(hw, E1000_KMRNCTRLSTA_K1_CONFIG, reg); release: hw->phy.ops.release(hw); } else { /* clear FEXTNVM6 bit 8 on link down or 10/100 */ fextnvm6 &= ~E1000_FEXTNVM6_REQ_PLL_CLK; if ((hw->phy.revision > 5) || !link || ((status & E1000_STATUS_SPEED_100) && (status & E1000_STATUS_FD))) goto update_fextnvm6; ret_val = e1e_rphy(hw, I217_INBAND_CTRL, &reg); if (ret_val) return ret_val; /* Clear link status transmit timeout */ reg &= ~I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_MASK; if (status & E1000_STATUS_SPEED_100) { /* Set inband Tx timeout to 5x10us for 100Half */ reg |= 5 << I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_SHIFT; /* Do not extend the K1 entry latency for 100Half */ fextnvm6 &= ~E1000_FEXTNVM6_ENABLE_K1_ENTRY_CONDITION; } else { /* Set inband Tx timeout to 50x10us for 10Full/Half */ reg |= 50 << I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_SHIFT; /* Extend the K1 entry latency for 10 Mbps */ fextnvm6 |= E1000_FEXTNVM6_ENABLE_K1_ENTRY_CONDITION; } ret_val = e1e_wphy(hw, I217_INBAND_CTRL, reg); if (ret_val) return ret_val; update_fextnvm6: ew32(FEXTNVM6, fextnvm6); } return ret_val; } /** * e1000_platform_pm_pch_lpt - Set platform power management values * @hw: pointer to the HW structure * @link: bool indicating link status * * Set the Latency Tolerance Reporting (LTR) values for the "PCIe-like" * GbE MAC in the Lynx Point PCH based on Rx buffer size and link speed * when link is up (which must not exceed the maximum latency supported * by the platform), otherwise specify there is no LTR requirement. * Unlike true-PCIe devices which set the LTR maximum snoop/no-snoop * latencies in the LTR Extended Capability Structure in the PCIe Extended * Capability register set, on this device LTR is set by writing the * equivalent snoop/no-snoop latencies in the LTRV register in the MAC and * set the SEND bit to send an Intel On-chip System Fabric sideband (IOSF-SB) * message to the PMC. **/ static s32 e1000_platform_pm_pch_lpt(struct e1000_hw *hw, bool link) { u32 reg = link << (E1000_LTRV_REQ_SHIFT + E1000_LTRV_NOSNOOP_SHIFT) | link << E1000_LTRV_REQ_SHIFT | E1000_LTRV_SEND; u32 max_ltr_enc_d = 0; /* maximum LTR decoded by platform */ u32 lat_enc_d = 0; /* latency decoded */ u16 lat_enc = 0; /* latency encoded */ if (link) { u16 speed, duplex, scale = 0; u16 max_snoop, max_nosnoop; u16 max_ltr_enc; /* max LTR latency encoded */ u64 value; u32 rxa; if (!hw->adapter->max_frame_size) { e_dbg("max_frame_size not set.\n"); return -E1000_ERR_CONFIG; } hw->mac.ops.get_link_up_info(hw, &speed, &duplex); if (!speed) { e_dbg("Speed not set.\n"); return -E1000_ERR_CONFIG; } /* Rx Packet Buffer Allocation size (KB) */ rxa = er32(PBA) & E1000_PBA_RXA_MASK; /* Determine the maximum latency tolerated by the device. * * Per the PCIe spec, the tolerated latencies are encoded as * a 3-bit encoded scale (only 0-5 are valid) multiplied by * a 10-bit value (0-1023) to provide a range from 1 ns to * 2^25*(2^10-1) ns. The scale is encoded as 0=2^0ns, * 1=2^5ns, 2=2^10ns,...5=2^25ns. */ rxa *= 512; value = (rxa > hw->adapter->max_frame_size) ? (rxa - hw->adapter->max_frame_size) * (16000 / speed) : 0; while (value > PCI_LTR_VALUE_MASK) { scale++; value = DIV_ROUND_UP(value, BIT(5)); } if (scale > E1000_LTRV_SCALE_MAX) { e_dbg("Invalid LTR latency scale %d\n", scale); return -E1000_ERR_CONFIG; } lat_enc = (u16)((scale << PCI_LTR_SCALE_SHIFT) | value); /* Determine the maximum latency tolerated by the platform */ pci_read_config_word(hw->adapter->pdev, E1000_PCI_LTR_CAP_LPT, &max_snoop); pci_read_config_word(hw->adapter->pdev, E1000_PCI_LTR_CAP_LPT + 2, &max_nosnoop); max_ltr_enc = max_t(u16, max_snoop, max_nosnoop); lat_enc_d = (lat_enc & E1000_LTRV_VALUE_MASK) * (1U << (E1000_LTRV_SCALE_FACTOR * ((lat_enc & E1000_LTRV_SCALE_MASK) >> E1000_LTRV_SCALE_SHIFT))); max_ltr_enc_d = (max_ltr_enc & E1000_LTRV_VALUE_MASK) * (1U << (E1000_LTRV_SCALE_FACTOR * ((max_ltr_enc & E1000_LTRV_SCALE_MASK) >> E1000_LTRV_SCALE_SHIFT))); if (lat_enc_d > max_ltr_enc_d) lat_enc = max_ltr_enc; } /* Set Snoop and No-Snoop latencies the same */ reg |= lat_enc | (lat_enc << E1000_LTRV_NOSNOOP_SHIFT); ew32(LTRV, reg); return 0; } /** * e1000_enable_ulp_lpt_lp - configure Ultra Low Power mode for LynxPoint-LP * @hw: pointer to the HW structure * @to_sx: boolean indicating a system power state transition to Sx * * When link is down, configure ULP mode to significantly reduce the power * to the PHY. If on a Manageability Engine (ME) enabled system, tell the * ME firmware to start the ULP configuration. If not on an ME enabled * system, configure the ULP mode by software. */ s32 e1000_enable_ulp_lpt_lp(struct e1000_hw *hw, bool to_sx) { u32 mac_reg; s32 ret_val = 0; u16 phy_reg; u16 oem_reg = 0; if ((hw->mac.type < e1000_pch_lpt) || (hw->adapter->pdev->device == E1000_DEV_ID_PCH_LPT_I217_LM) || (hw->adapter->pdev->device == E1000_DEV_ID_PCH_LPT_I217_V) || (hw->adapter->pdev->device == E1000_DEV_ID_PCH_I218_LM2) || (hw->adapter->pdev->device == E1000_DEV_ID_PCH_I218_V2) || (hw->dev_spec.ich8lan.ulp_state == e1000_ulp_state_on)) return 0; if (er32(FWSM) & E1000_ICH_FWSM_FW_VALID) { /* Request ME configure ULP mode in the PHY */ mac_reg = er32(H2ME); mac_reg |= E1000_H2ME_ULP | E1000_H2ME_ENFORCE_SETTINGS; ew32(H2ME, mac_reg); goto out; } if (!to_sx) { int i = 0; /* Poll up to 5 seconds for Cable Disconnected indication */ while (!(er32(FEXT) & E1000_FEXT_PHY_CABLE_DISCONNECTED)) { /* Bail if link is re-acquired */ if (er32(STATUS) & E1000_STATUS_LU) return -E1000_ERR_PHY; if (i++ == 100) break; msleep(50); } e_dbg("CABLE_DISCONNECTED %s set after %dmsec\n", (er32(FEXT) & E1000_FEXT_PHY_CABLE_DISCONNECTED) ? "" : "not", i * 50); } ret_val = hw->phy.ops.acquire(hw); if (ret_val) goto out; /* Force SMBus mode in PHY */ ret_val = e1000_read_phy_reg_hv_locked(hw, CV_SMB_CTRL, &phy_reg); if (ret_val) goto release; phy_reg |= CV_SMB_CTRL_FORCE_SMBUS; e1000_write_phy_reg_hv_locked(hw, CV_SMB_CTRL, phy_reg); /* Force SMBus mode in MAC */ mac_reg = er32(CTRL_EXT); mac_reg |= E1000_CTRL_EXT_FORCE_SMBUS; ew32(CTRL_EXT, mac_reg); /* Si workaround for ULP entry flow on i127/rev6 h/w. Enable * LPLU and disable Gig speed when entering ULP */ if ((hw->phy.type == e1000_phy_i217) && (hw->phy.revision == 6)) { ret_val = e1000_read_phy_reg_hv_locked(hw, HV_OEM_BITS, &oem_reg); if (ret_val) goto release; phy_reg = oem_reg; phy_reg |= HV_OEM_BITS_LPLU | HV_OEM_BITS_GBE_DIS; ret_val = e1000_write_phy_reg_hv_locked(hw, HV_OEM_BITS, phy_reg); if (ret_val) goto release; } /* Set Inband ULP Exit, Reset to SMBus mode and * Disable SMBus Release on PERST# in PHY */ ret_val = e1000_read_phy_reg_hv_locked(hw, I218_ULP_CONFIG1, &phy_reg); if (ret_val) goto release; phy_reg |= (I218_ULP_CONFIG1_RESET_TO_SMBUS | I218_ULP_CONFIG1_DISABLE_SMB_PERST); if (to_sx) { if (er32(WUFC) & E1000_WUFC_LNKC) phy_reg |= I218_ULP_CONFIG1_WOL_HOST; else phy_reg &= ~I218_ULP_CONFIG1_WOL_HOST; phy_reg |= I218_ULP_CONFIG1_STICKY_ULP; phy_reg &= ~I218_ULP_CONFIG1_INBAND_EXIT; } else { phy_reg |= I218_ULP_CONFIG1_INBAND_EXIT; phy_reg &= ~I218_ULP_CONFIG1_STICKY_ULP; phy_reg &= ~I218_ULP_CONFIG1_WOL_HOST; } e1000_write_phy_reg_hv_locked(hw, I218_ULP_CONFIG1, phy_reg); /* Set Disable SMBus Release on PERST# in MAC */ mac_reg = er32(FEXTNVM7); mac_reg |= E1000_FEXTNVM7_DISABLE_SMB_PERST; ew32(FEXTNVM7, mac_reg); /* Commit ULP changes in PHY by starting auto ULP configuration */ phy_reg |= I218_ULP_CONFIG1_START; e1000_write_phy_reg_hv_locked(hw, I218_ULP_CONFIG1, phy_reg); if ((hw->phy.type == e1000_phy_i217) && (hw->phy.revision == 6) && to_sx && (er32(STATUS) & E1000_STATUS_LU)) { ret_val = e1000_write_phy_reg_hv_locked(hw, HV_OEM_BITS, oem_reg); if (ret_val) goto release; } release: hw->phy.ops.release(hw); out: if (ret_val) e_dbg("Error in ULP enable flow: %d\n", ret_val); else hw->dev_spec.ich8lan.ulp_state = e1000_ulp_state_on; return ret_val; } /** * e1000_disable_ulp_lpt_lp - unconfigure Ultra Low Power mode for LynxPoint-LP * @hw: pointer to the HW structure * @force: boolean indicating whether or not to force disabling ULP * * Un-configure ULP mode when link is up, the system is transitioned from * Sx or the driver is unloaded. If on a Manageability Engine (ME) enabled * system, poll for an indication from ME that ULP has been un-configured. * If not on an ME enabled system, un-configure the ULP mode by software. * * During nominal operation, this function is called when link is acquired * to disable ULP mode (force=false); otherwise, for example when unloading * the driver or during Sx->S0 transitions, this is called with force=true * to forcibly disable ULP. */ static s32 e1000_disable_ulp_lpt_lp(struct e1000_hw *hw, bool force) { s32 ret_val = 0; u32 mac_reg; u16 phy_reg; int i = 0; if ((hw->mac.type < e1000_pch_lpt) || (hw->adapter->pdev->device == E1000_DEV_ID_PCH_LPT_I217_LM) || (hw->adapter->pdev->device == E1000_DEV_ID_PCH_LPT_I217_V) || (hw->adapter->pdev->device == E1000_DEV_ID_PCH_I218_LM2) || (hw->adapter->pdev->device == E1000_DEV_ID_PCH_I218_V2) || (hw->dev_spec.ich8lan.ulp_state == e1000_ulp_state_off)) return 0; if (er32(FWSM) & E1000_ICH_FWSM_FW_VALID) { struct e1000_adapter *adapter = hw->adapter; bool firmware_bug = false; if (force) { /* Request ME un-configure ULP mode in the PHY */ mac_reg = er32(H2ME); mac_reg &= ~E1000_H2ME_ULP; mac_reg |= E1000_H2ME_ENFORCE_SETTINGS; ew32(H2ME, mac_reg); } /* Poll up to 2.5 seconds for ME to clear ULP_CFG_DONE. * If this takes more than 1 second, show a warning indicating a * firmware bug */ while (er32(FWSM) & E1000_FWSM_ULP_CFG_DONE) { if (i++ == 250) { ret_val = -E1000_ERR_PHY; goto out; } if (i > 100 && !firmware_bug) firmware_bug = true; usleep_range(10000, 11000); } if (firmware_bug) e_warn("ULP_CONFIG_DONE took %d msec. This is a firmware bug\n", i * 10); else e_dbg("ULP_CONFIG_DONE cleared after %d msec\n", i * 10); if (force) { mac_reg = er32(H2ME); mac_reg &= ~E1000_H2ME_ENFORCE_SETTINGS; ew32(H2ME, mac_reg); } else { /* Clear H2ME.ULP after ME ULP configuration */ mac_reg = er32(H2ME); mac_reg &= ~E1000_H2ME_ULP; ew32(H2ME, mac_reg); } goto out; } ret_val = hw->phy.ops.acquire(hw); if (ret_val) goto out; if (force) /* Toggle LANPHYPC Value bit */ e1000_toggle_lanphypc_pch_lpt(hw); /* Unforce SMBus mode in PHY */ ret_val = e1000_read_phy_reg_hv_locked(hw, CV_SMB_CTRL, &phy_reg); if (ret_val) { /* The MAC might be in PCIe mode, so temporarily force to * SMBus mode in order to access the PHY. */ mac_reg = er32(CTRL_EXT); mac_reg |= E1000_CTRL_EXT_FORCE_SMBUS; ew32(CTRL_EXT, mac_reg); msleep(50); ret_val = e1000_read_phy_reg_hv_locked(hw, CV_SMB_CTRL, &phy_reg); if (ret_val) goto release; } phy_reg &= ~CV_SMB_CTRL_FORCE_SMBUS; e1000_write_phy_reg_hv_locked(hw, CV_SMB_CTRL, phy_reg); /* Unforce SMBus mode in MAC */ mac_reg = er32(CTRL_EXT); mac_reg &= ~E1000_CTRL_EXT_FORCE_SMBUS; ew32(CTRL_EXT, mac_reg); /* When ULP mode was previously entered, K1 was disabled by the * hardware. Re-Enable K1 in the PHY when exiting ULP. */ ret_val = e1000_read_phy_reg_hv_locked(hw, HV_PM_CTRL, &phy_reg); if (ret_val) goto release; phy_reg |= HV_PM_CTRL_K1_ENABLE; e1000_write_phy_reg_hv_locked(hw, HV_PM_CTRL, phy_reg); /* Clear ULP enabled configuration */ ret_val = e1000_read_phy_reg_hv_locked(hw, I218_ULP_CONFIG1, &phy_reg); if (ret_val) goto release; phy_reg &= ~(I218_ULP_CONFIG1_IND | I218_ULP_CONFIG1_STICKY_ULP | I218_ULP_CONFIG1_RESET_TO_SMBUS | I218_ULP_CONFIG1_WOL_HOST | I218_ULP_CONFIG1_INBAND_EXIT | I218_ULP_CONFIG1_EN_ULP_LANPHYPC | I218_ULP_CONFIG1_DIS_CLR_STICKY_ON_PERST | I218_ULP_CONFIG1_DISABLE_SMB_PERST); e1000_write_phy_reg_hv_locked(hw, I218_ULP_CONFIG1, phy_reg); /* Commit ULP changes by starting auto ULP configuration */ phy_reg |= I218_ULP_CONFIG1_START; e1000_write_phy_reg_hv_locked(hw, I218_ULP_CONFIG1, phy_reg); /* Clear Disable SMBus Release on PERST# in MAC */ mac_reg = er32(FEXTNVM7); mac_reg &= ~E1000_FEXTNVM7_DISABLE_SMB_PERST; ew32(FEXTNVM7, mac_reg); release: hw->phy.ops.release(hw); if (force) { e1000_phy_hw_reset(hw); msleep(50); } out: if (ret_val) e_dbg("Error in ULP disable flow: %d\n", ret_val); else hw->dev_spec.ich8lan.ulp_state = e1000_ulp_state_off; return ret_val; } /** * e1000_check_for_copper_link_ich8lan - Check for link (Copper) * @hw: pointer to the HW structure * * Checks to see of the link status of the hardware has changed. If a * change in link status has been detected, then we read the PHY registers * to get the current speed/duplex if link exists. **/ static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw) { struct e1000_mac_info *mac = &hw->mac; s32 ret_val, tipg_reg = 0; u16 emi_addr, emi_val = 0; bool link; u16 phy_reg; /* We only want to go out to the PHY registers to see if Auto-Neg * has completed and/or if our link status has changed. The * get_link_status flag is set upon receiving a Link Status * Change or Rx Sequence Error interrupt. */ if (!mac->get_link_status) return 0; mac->get_link_status = false; /* First we want to see if the MII Status Register reports * link. If so, then we want to get the current speed/duplex * of the PHY. */ ret_val = e1000e_phy_has_link_generic(hw, 1, 0, &link); if (ret_val) goto out; if (hw->mac.type == e1000_pchlan) { ret_val = e1000_k1_gig_workaround_hv(hw, link); if (ret_val) goto out; } /* When connected at 10Mbps half-duplex, some parts are excessively * aggressive resulting in many collisions. To avoid this, increase * the IPG and reduce Rx latency in the PHY. */ if ((hw->mac.type >= e1000_pch2lan) && link) { u16 speed, duplex; e1000e_get_speed_and_duplex_copper(hw, &speed, &duplex); tipg_reg = er32(TIPG); tipg_reg &= ~E1000_TIPG_IPGT_MASK; if (duplex == HALF_DUPLEX && speed == SPEED_10) { tipg_reg |= 0xFF; /* Reduce Rx latency in analog PHY */ emi_val = 0; } else if (hw->mac.type >= e1000_pch_spt && duplex == FULL_DUPLEX && speed != SPEED_1000) { tipg_reg |= 0xC; emi_val = 1; } else { /* Roll back the default values */ tipg_reg |= 0x08; emi_val = 1; } ew32(TIPG, tipg_reg); ret_val = hw->phy.ops.acquire(hw); if (ret_val) goto out; if (hw->mac.type == e1000_pch2lan) emi_addr = I82579_RX_CONFIG; else emi_addr = I217_RX_CONFIG; ret_val = e1000_write_emi_reg_locked(hw, emi_addr, emi_val); if (hw->mac.type >= e1000_pch_lpt) { u16 phy_reg; e1e_rphy_locked(hw, I217_PLL_CLOCK_GATE_REG, &phy_reg); phy_reg &= ~I217_PLL_CLOCK_GATE_MASK; if (speed == SPEED_100 || speed == SPEED_10) phy_reg |= 0x3E8; else phy_reg |= 0xFA; e1e_wphy_locked(hw, I217_PLL_CLOCK_GATE_REG, phy_reg); if (speed == SPEED_1000) { hw->phy.ops.read_reg_locked(hw, HV_PM_CTRL, &phy_reg); phy_reg |= HV_PM_CTRL_K1_CLK_REQ; hw->phy.ops.write_reg_locked(hw, HV_PM_CTRL, phy_reg); } } hw->phy.ops.release(hw); if (ret_val) goto out; if (hw->mac.type >= e1000_pch_spt) { u16 data; u16 ptr_gap; if (speed == SPEED_1000) { ret_val = hw->phy.ops.acquire(hw); if (ret_val) goto out; ret_val = e1e_rphy_locked(hw, PHY_REG(776, 20), &data); if (ret_val) { hw->phy.ops.release(hw); goto out; } ptr_gap = (data & (0x3FF << 2)) >> 2; if (ptr_gap < 0x18) { data &= ~(0x3FF << 2); data |= (0x18 << 2); ret_val = e1e_wphy_locked(hw, PHY_REG(776, 20), data); } hw->phy.ops.release(hw); if (ret_val) goto out; } else { ret_val = hw->phy.ops.acquire(hw); if (ret_val) goto out; ret_val = e1e_wphy_locked(hw, PHY_REG(776, 20), 0xC023); hw->phy.ops.release(hw); if (ret_val) goto out; } } } /* I217 Packet Loss issue: * ensure that FEXTNVM4 Beacon Duration is set correctly * on power up. * Set the Beacon Duration for I217 to 8 usec */ if (hw->mac.type >= e1000_pch_lpt) { u32 mac_reg; mac_reg = er32(FEXTNVM4); mac_reg &= ~E1000_FEXTNVM4_BEACON_DURATION_MASK; mac_reg |= E1000_FEXTNVM4_BEACON_DURATION_8USEC; ew32(FEXTNVM4, mac_reg); } /* Work-around I218 hang issue */ if ((hw->adapter->pdev->device == E1000_DEV_ID_PCH_LPTLP_I218_LM) || (hw->adapter->pdev->device == E1000_DEV_ID_PCH_LPTLP_I218_V) || (hw->adapter->pdev->device == E1000_DEV_ID_PCH_I218_LM3) || (hw->adapter->pdev->device == E1000_DEV_ID_PCH_I218_V3)) { ret_val = e1000_k1_workaround_lpt_lp(hw, link); if (ret_val) goto out; } if (hw->mac.type >= e1000_pch_lpt) { /* Set platform power management values for * Latency Tolerance Reporting (LTR) */ ret_val = e1000_platform_pm_pch_lpt(hw, link); if (ret_val) goto out; } /* Clear link partner's EEE ability */ hw->dev_spec.ich8lan.eee_lp_ability = 0; if (hw->mac.type >= e1000_pch_lpt) { u32 fextnvm6 = er32(FEXTNVM6); if (hw->mac.type == e1000_pch_spt) { /* FEXTNVM6 K1-off workaround - for SPT only */ u32 pcieanacfg = er32(PCIEANACFG); if (pcieanacfg & E1000_FEXTNVM6_K1_OFF_ENABLE) fextnvm6 |= E1000_FEXTNVM6_K1_OFF_ENABLE; else fextnvm6 &= ~E1000_FEXTNVM6_K1_OFF_ENABLE; } ew32(FEXTNVM6, fextnvm6); } if (!link) goto out; switch (hw->mac.type) { case e1000_pch2lan: ret_val = e1000_k1_workaround_lv(hw); if (ret_val) return ret_val; fallthrough; case e1000_pchlan: if (hw->phy.type == e1000_phy_82578) { ret_val = e1000_link_stall_workaround_hv(hw); if (ret_val) return ret_val; } /* Workaround for PCHx parts in half-duplex: * Set the number of preambles removed from the packet * when it is passed from the PHY to the MAC to prevent * the MAC from misinterpreting the packet type. */ e1e_rphy(hw, HV_KMRN_FIFO_CTRLSTA, &phy_reg); phy_reg &= ~HV_KMRN_FIFO_CTRLSTA_PREAMBLE_MASK; if ((er32(STATUS) & E1000_STATUS_FD) != E1000_STATUS_FD) phy_reg |= BIT(HV_KMRN_FIFO_CTRLSTA_PREAMBLE_SHIFT); e1e_wphy(hw, HV_KMRN_FIFO_CTRLSTA, phy_reg); break; default: break; } /* Check if there was DownShift, must be checked * immediately after link-up */ e1000e_check_downshift(hw); /* Enable/Disable EEE after link up */ if (hw->phy.type > e1000_phy_82579) { ret_val = e1000_set_eee_pchlan(hw); if (ret_val) return ret_val; } /* If we are forcing speed/duplex, then we simply return since * we have already determined whether we have link or not. */ if (!mac->autoneg) return -E1000_ERR_CONFIG; /* Auto-Neg is enabled. Auto Speed Detection takes care * of MAC speed/duplex configuration. So we only need to * configure Collision Distance in the MAC. */ mac->ops.config_collision_dist(hw); /* Configure Flow Control now that Auto-Neg has completed. * First, we need to restore the desired flow control * settings because we may have had to re-autoneg with a * different link partner. */ ret_val = e1000e_config_fc_after_link_up(hw); if (ret_val) e_dbg("Error configuring flow control\n"); return ret_val; out: mac->get_link_status = true; return ret_val; } static s32 e1000_get_variants_ich8lan(struct e1000_adapter *adapter) { struct e1000_hw *hw = &adapter->hw; s32 rc; rc = e1000_init_mac_params_ich8lan(hw); if (rc) return rc; rc = e1000_init_nvm_params_ich8lan(hw); if (rc) return rc; switch (hw->mac.type) { case e1000_ich8lan: case e1000_ich9lan: case e1000_ich10lan: rc = e1000_init_phy_params_ich8lan(hw); break; case e1000_pchlan: case e1000_pch2lan: case e1000_pch_lpt: case e1000_pch_spt: case e1000_pch_cnp: case e1000_pch_tgp: case e1000_pch_adp: case e1000_pch_mtp: case e1000_pch_lnp: case e1000_pch_ptp: case e1000_pch_nvp: rc = e1000_init_phy_params_pchlan(hw); break; default: break; } if (rc) return rc; /* Disable Jumbo Frame support on parts with Intel 10/100 PHY or * on parts with MACsec enabled in NVM (reflected in CTRL_EXT). */ if ((adapter->hw.phy.type == e1000_phy_ife) || ((adapter->hw.mac.type >= e1000_pch2lan) && (!(er32(CTRL_EXT) & E1000_CTRL_EXT_LSECCK)))) { adapter->flags &= ~FLAG_HAS_JUMBO_FRAMES; adapter->max_hw_frame_size = VLAN_ETH_FRAME_LEN + ETH_FCS_LEN; hw->mac.ops.blink_led = NULL; } if ((adapter->hw.mac.type == e1000_ich8lan) && (adapter->hw.phy.type != e1000_phy_ife)) adapter->flags |= FLAG_LSC_GIG_SPEED_DROP; /* Enable workaround for 82579 w/ ME enabled */ if ((adapter->hw.mac.type == e1000_pch2lan) && (er32(FWSM) & E1000_ICH_FWSM_FW_VALID)) adapter->flags2 |= FLAG2_PCIM2PCI_ARBITER_WA; return 0; } static DEFINE_MUTEX(nvm_mutex); /** * e1000_acquire_nvm_ich8lan - Acquire NVM mutex * @hw: pointer to the HW structure * * Acquires the mutex for performing NVM operations. **/ static s32 e1000_acquire_nvm_ich8lan(struct e1000_hw __always_unused *hw) { mutex_lock(&nvm_mutex); return 0; } /** * e1000_release_nvm_ich8lan - Release NVM mutex * @hw: pointer to the HW structure * * Releases the mutex used while performing NVM operations. **/ static void e1000_release_nvm_ich8lan(struct e1000_hw __always_unused *hw) { mutex_unlock(&nvm_mutex); } /** * e1000_acquire_swflag_ich8lan - Acquire software control flag * @hw: pointer to the HW structure * * Acquires the software control flag for performing PHY and select * MAC CSR accesses. **/ static s32 e1000_acquire_swflag_ich8lan(struct e1000_hw *hw) { u32 extcnf_ctrl, timeout = PHY_CFG_TIMEOUT; s32 ret_val = 0; if (test_and_set_bit(__E1000_ACCESS_SHARED_RESOURCE, &hw->adapter->state)) { e_dbg("contention for Phy access\n"); return -E1000_ERR_PHY; } while (timeout) { extcnf_ctrl = er32(EXTCNF_CTRL); if (!(extcnf_ctrl & E1000_EXTCNF_CTRL_SWFLAG)) break; mdelay(1); timeout--; } if (!timeout) { e_dbg("SW has already locked the resource.\n"); ret_val = -E1000_ERR_CONFIG; goto out; } timeout = SW_FLAG_TIMEOUT; extcnf_ctrl |= E1000_EXTCNF_CTRL_SWFLAG; ew32(EXTCNF_CTRL, extcnf_ctrl); while (timeout) { extcnf_ctrl = er32(EXTCNF_CTRL); if (extcnf_ctrl & E1000_EXTCNF_CTRL_SWFLAG) break; mdelay(1); timeout--; } if (!timeout) { e_dbg("Failed to acquire the semaphore, FW or HW has it: FWSM=0x%8.8x EXTCNF_CTRL=0x%8.8x)\n", er32(FWSM), extcnf_ctrl); extcnf_ctrl &= ~E1000_EXTCNF_CTRL_SWFLAG; ew32(EXTCNF_CTRL, extcnf_ctrl); ret_val = -E1000_ERR_CONFIG; goto out; } out: if (ret_val) clear_bit(__E1000_ACCESS_SHARED_RESOURCE, &hw->adapter->state); return ret_val; } /** * e1000_release_swflag_ich8lan - Release software control flag * @hw: pointer to the HW structure * * Releases the software control flag for performing PHY and select * MAC CSR accesses. **/ static void e1000_release_swflag_ich8lan(struct e1000_hw *hw) { u32 extcnf_ctrl; extcnf_ctrl = er32(EXTCNF_CTRL); if (extcnf_ctrl & E1000_EXTCNF_CTRL_SWFLAG) { extcnf_ctrl &= ~E1000_EXTCNF_CTRL_SWFLAG; ew32(EXTCNF_CTRL, extcnf_ctrl); } else { e_dbg("Semaphore unexpectedly released by sw/fw/hw\n"); } clear_bit(__E1000_ACCESS_SHARED_RESOURCE, &hw->adapter->state); } /** * e1000_check_mng_mode_ich8lan - Checks management mode * @hw: pointer to the HW structure * * This checks if the adapter has any manageability enabled. * This is a function pointer entry point only called by read/write * routines for the PHY and NVM parts. **/ static bool e1000_check_mng_mode_ich8lan(struct e1000_hw *hw) { u32 fwsm; fwsm = er32(FWSM); return (fwsm & E1000_ICH_FWSM_FW_VALID) && ((fwsm & E1000_FWSM_MODE_MASK) == (E1000_ICH_MNG_IAMT_MODE << E1000_FWSM_MODE_SHIFT)); } /** * e1000_check_mng_mode_pchlan - Checks management mode * @hw: pointer to the HW structure * * This checks if the adapter has iAMT enabled. * This is a function pointer entry point only called by read/write * routines for the PHY and NVM parts. **/ static bool e1000_check_mng_mode_pchlan(struct e1000_hw *hw) { u32 fwsm; fwsm = er32(FWSM); return (fwsm & E1000_ICH_FWSM_FW_VALID) && (fwsm & (E1000_ICH_MNG_IAMT_MODE << E1000_FWSM_MODE_SHIFT)); } /** * e1000_rar_set_pch2lan - Set receive address register * @hw: pointer to the HW structure * @addr: pointer to the receive address * @index: receive address array register * * Sets the receive address array register at index to the address passed * in by addr. For 82579, RAR[0] is the base address register that is to * contain the MAC address but RAR[1-6] are reserved for manageability (ME). * Use SHRA[0-3] in place of those reserved for ME. **/ static int e1000_rar_set_pch2lan(struct e1000_hw *hw, u8 *addr, u32 index) { u32 rar_low, rar_high; /* HW expects these in little endian so we reverse the byte order * from network order (big endian) to little endian */ rar_low = ((u32)addr[0] | ((u32)addr[1] << 8) | ((u32)addr[2] << 16) | ((u32)addr[3] << 24)); rar_high = ((u32)addr[4] | ((u32)addr[5] << 8)); /* If MAC address zero, no need to set the AV bit */ if (rar_low || rar_high) rar_high |= E1000_RAH_AV; if (index == 0) { ew32(RAL(index), rar_low); e1e_flush(); ew32(RAH(index), rar_high); e1e_flush(); return 0; } /* RAR[1-6] are owned by manageability. Skip those and program the * next address into the SHRA register array. */ if (index < (u32)(hw->mac.rar_entry_count)) { s32 ret_val; ret_val = e1000_acquire_swflag_ich8lan(hw); if (ret_val) goto out; ew32(SHRAL(index - 1), rar_low); e1e_flush(); ew32(SHRAH(index - 1), rar_high); e1e_flush(); e1000_release_swflag_ich8lan(hw); /* verify the register updates */ if ((er32(SHRAL(index - 1)) == rar_low) && (er32(SHRAH(index - 1)) == rar_high)) return 0; e_dbg("SHRA[%d] might be locked by ME - FWSM=0x%8.8x\n", (index - 1), er32(FWSM)); } out: e_dbg("Failed to write receive address at index %d\n", index); return -E1000_ERR_CONFIG; } /** * e1000_rar_get_count_pch_lpt - Get the number of available SHRA * @hw: pointer to the HW structure * * Get the number of available receive registers that the Host can * program. SHRA[0-10] are the shared receive address registers * that are shared between the Host and manageability engine (ME). * ME can reserve any number of addresses and the host needs to be * able to tell how many available registers it has access to. **/ static u32 e1000_rar_get_count_pch_lpt(struct e1000_hw *hw) { u32 wlock_mac; u32 num_entries; wlock_mac = er32(FWSM) & E1000_FWSM_WLOCK_MAC_MASK; wlock_mac >>= E1000_FWSM_WLOCK_MAC_SHIFT; switch (wlock_mac) { case 0: /* All SHRA[0..10] and RAR[0] available */ num_entries = hw->mac.rar_entry_count; break; case 1: /* Only RAR[0] available */ num_entries = 1; break; default: /* SHRA[0..(wlock_mac - 1)] available + RAR[0] */ num_entries = wlock_mac + 1; break; } return num_entries; } /** * e1000_rar_set_pch_lpt - Set receive address registers * @hw: pointer to the HW structure * @addr: pointer to the receive address * @index: receive address array register * * Sets the receive address register array at index to the address passed * in by addr. For LPT, RAR[0] is the base address register that is to * contain the MAC address. SHRA[0-10] are the shared receive address * registers that are shared between the Host and manageability engine (ME). **/ static int e1000_rar_set_pch_lpt(struct e1000_hw *hw, u8 *addr, u32 index) { u32 rar_low, rar_high; u32 wlock_mac; /* HW expects these in little endian so we reverse the byte order * from network order (big endian) to little endian */ rar_low = ((u32)addr[0] | ((u32)addr[1] << 8) | ((u32)addr[2] << 16) | ((u32)addr[3] << 24)); rar_high = ((u32)addr[4] | ((u32)addr[5] << 8)); /* If MAC address zero, no need to set the AV bit */ if (rar_low || rar_high) rar_high |= E1000_RAH_AV; if (index == 0) { ew32(RAL(index), rar_low); e1e_flush(); ew32(RAH(index), rar_high); e1e_flush(); return 0; } /* The manageability engine (ME) can lock certain SHRAR registers that * it is using - those registers are unavailable for use. */ if (index < hw->mac.rar_entry_count) { wlock_mac = er32(FWSM) & E1000_FWSM_WLOCK_MAC_MASK; wlock_mac >>= E1000_FWSM_WLOCK_MAC_SHIFT; /* Check if all SHRAR registers are locked */ if (wlock_mac == 1) goto out; if ((wlock_mac == 0) || (index <= wlock_mac)) { s32 ret_val; ret_val = e1000_acquire_swflag_ich8lan(hw); if (ret_val) goto out; ew32(SHRAL_PCH_LPT(index - 1), rar_low); e1e_flush(); ew32(SHRAH_PCH_LPT(index - 1), rar_high); e1e_flush(); e1000_release_swflag_ich8lan(hw); /* verify the register updates */ if ((er32(SHRAL_PCH_LPT(index - 1)) == rar_low) && (er32(SHRAH_PCH_LPT(index - 1)) == rar_high)) return 0; } } out: e_dbg("Failed to write receive address at index %d\n", index); return -E1000_ERR_CONFIG; } /** * e1000_check_reset_block_ich8lan - Check if PHY reset is blocked * @hw: pointer to the HW structure * * Checks if firmware is blocking the reset of the PHY. * This is a function pointer entry point only called by * reset routines. **/ static s32 e1000_check_reset_block_ich8lan(struct e1000_hw *hw) { bool blocked = false; int i = 0; while ((blocked = !(er32(FWSM) & E1000_ICH_FWSM_RSPCIPHY)) && (i++ < 30)) usleep_range(10000, 11000); return blocked ? E1000_BLK_PHY_RESET : 0; } /** * e1000_write_smbus_addr - Write SMBus address to PHY needed during Sx states * @hw: pointer to the HW structure * * Assumes semaphore already acquired. * **/ static s32 e1000_write_smbus_addr(struct e1000_hw *hw) { u16 phy_data; u32 strap = er32(STRAP); u32 freq = (strap & E1000_STRAP_SMT_FREQ_MASK) >> E1000_STRAP_SMT_FREQ_SHIFT; s32 ret_val; strap &= E1000_STRAP_SMBUS_ADDRESS_MASK; ret_val = e1000_read_phy_reg_hv_locked(hw, HV_SMB_ADDR, &phy_data); if (ret_val) return ret_val; phy_data &= ~HV_SMB_ADDR_MASK; phy_data |= (strap >> E1000_STRAP_SMBUS_ADDRESS_SHIFT); phy_data |= HV_SMB_ADDR_PEC_EN | HV_SMB_ADDR_VALID; if (hw->phy.type == e1000_phy_i217) { /* Restore SMBus frequency */ if (freq--) { phy_data &= ~HV_SMB_ADDR_FREQ_MASK; phy_data |= (freq & BIT(0)) << HV_SMB_ADDR_FREQ_LOW_SHIFT; phy_data |= (freq & BIT(1)) << (HV_SMB_ADDR_FREQ_HIGH_SHIFT - 1); } else { e_dbg("Unsupported SMB frequency in PHY\n"); } } return e1000_write_phy_reg_hv_locked(hw, HV_SMB_ADDR, phy_data); } /** * e1000_sw_lcd_config_ich8lan - SW-based LCD Configuration * @hw: pointer to the HW structure * * SW should configure the LCD from the NVM extended configuration region * as a workaround for certain parts. **/ static s32 e1000_sw_lcd_config_ich8lan(struct e1000_hw *hw) { struct e1000_phy_info *phy = &hw->phy; u32 i, data, cnf_size, cnf_base_addr, sw_cfg_mask; s32 ret_val = 0; u16 word_addr, reg_data, reg_addr, phy_page = 0; /* Initialize the PHY from the NVM on ICH platforms. This * is needed due to an issue where the NVM configuration is * not properly autoloaded after power transitions. * Therefore, after each PHY reset, we will load the * configuration data out of the NVM manually. */ switch (hw->mac.type) { case e1000_ich8lan: if (phy->type != e1000_phy_igp_3) return ret_val; if ((hw->adapter->pdev->device == E1000_DEV_ID_ICH8_IGP_AMT) || (hw->adapter->pdev->device == E1000_DEV_ID_ICH8_IGP_C)) { sw_cfg_mask = E1000_FEXTNVM_SW_CONFIG; break; } fallthrough; case e1000_pchlan: case e1000_pch2lan: case e1000_pch_lpt: case e1000_pch_spt: case e1000_pch_cnp: case e1000_pch_tgp: case e1000_pch_adp: case e1000_pch_mtp: case e1000_pch_lnp: case e1000_pch_ptp: case e1000_pch_nvp: sw_cfg_mask = E1000_FEXTNVM_SW_CONFIG_ICH8M; break; default: return ret_val; } ret_val = hw->phy.ops.acquire(hw); if (ret_val) return ret_val; data = er32(FEXTNVM); if (!(data & sw_cfg_mask)) goto release; /* Make sure HW does not configure LCD from PHY * extended configuration before SW configuration */ data = er32(EXTCNF_CTRL); if ((hw->mac.type < e1000_pch2lan) && (data & E1000_EXTCNF_CTRL_LCD_WRITE_ENABLE)) goto release; cnf_size = er32(EXTCNF_SIZE); cnf_size &= E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH_MASK; cnf_size >>= E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH_SHIFT; if (!cnf_size) goto release; cnf_base_addr = data & E1000_EXTCNF_CTRL_EXT_CNF_POINTER_MASK; cnf_base_addr >>= E1000_EXTCNF_CTRL_EXT_CNF_POINTER_SHIFT; if (((hw->mac.type == e1000_pchlan) && !(data & E1000_EXTCNF_CTRL_OEM_WRITE_ENABLE)) || (hw->mac.type > e1000_pchlan)) { /* HW configures the SMBus address and LEDs when the * OEM and LCD Write Enable bits are set in the NVM. * When both NVM bits are cleared, SW will configure * them instead. */ ret_val = e1000_write_smbus_addr(hw); if (ret_val) goto release; data = er32(LEDCTL); ret_val = e1000_write_phy_reg_hv_locked(hw, HV_LED_CONFIG, (u16)data); if (ret_val) goto release; } /* Configure LCD from extended configuration region. */ /* cnf_base_addr is in DWORD */ word_addr = (u16)(cnf_base_addr << 1); for (i = 0; i < cnf_size; i++) { ret_val = e1000_read_nvm(hw, (word_addr + i * 2), 1, &reg_data); if (ret_val) goto release; ret_val = e1000_read_nvm(hw, (word_addr + i * 2 + 1), 1, &reg_addr); if (ret_val) goto release; /* Save off the PHY page for future writes. */ if (reg_addr == IGP01E1000_PHY_PAGE_SELECT) { phy_page = reg_data; continue; } reg_addr &= PHY_REG_MASK; reg_addr |= phy_page; ret_val = e1e_wphy_locked(hw, (u32)reg_addr, reg_data); if (ret_val) goto release; } release: hw->phy.ops.release(hw); return ret_val; } /** * e1000_k1_gig_workaround_hv - K1 Si workaround * @hw: pointer to the HW structure * @link: link up bool flag * * If K1 is enabled for 1Gbps, the MAC might stall when transitioning * from a lower speed. This workaround disables K1 whenever link is at 1Gig * If link is down, the function will restore the default K1 setting located * in the NVM. **/ static s32 e1000_k1_gig_workaround_hv(struct e1000_hw *hw, bool link) { s32 ret_val = 0; u16 status_reg = 0; bool k1_enable = hw->dev_spec.ich8lan.nvm_k1_enabled; if (hw->mac.type != e1000_pchlan) return 0; /* Wrap the whole flow with the sw flag */ ret_val = hw->phy.ops.acquire(hw); if (ret_val) return ret_val; /* Disable K1 when link is 1Gbps, otherwise use the NVM setting */ if (link) { if (hw->phy.type == e1000_phy_82578) { ret_val = e1e_rphy_locked(hw, BM_CS_STATUS, &status_reg); if (ret_val) goto release; status_reg &= (BM_CS_STATUS_LINK_UP | BM_CS_STATUS_RESOLVED | BM_CS_STATUS_SPEED_MASK); if (status_reg == (BM_CS_STATUS_LINK_UP | BM_CS_STATUS_RESOLVED | BM_CS_STATUS_SPEED_1000)) k1_enable = false; } if (hw->phy.type == e1000_phy_82577) { ret_val = e1e_rphy_locked(hw, HV_M_STATUS, &status_reg); if (ret_val) goto release; status_reg &= (HV_M_STATUS_LINK_UP | HV_M_STATUS_AUTONEG_COMPLETE | HV_M_STATUS_SPEED_MASK); if (status_reg == (HV_M_STATUS_LINK_UP | HV_M_STATUS_AUTONEG_COMPLETE | HV_M_STATUS_SPEED_1000)) k1_enable = false; } /* Link stall fix for link up */ ret_val = e1e_wphy_locked(hw, PHY_REG(770, 19), 0x0100); if (ret_val) goto release; } else { /* Link stall fix for link down */ ret_val = e1e_wphy_locked(hw, PHY_REG(770, 19), 0x4100); if (ret_val) goto release; } ret_val = e1000_configure_k1_ich8lan(hw, k1_enable); release: hw->phy.ops.release(hw); return ret_val; } /** * e1000_configure_k1_ich8lan - Configure K1 power state * @hw: pointer to the HW structure * @k1_enable: K1 state to configure * * Configure the K1 power state based on the provided parameter. * Assumes semaphore already acquired. * * Success returns 0, Failure returns -E1000_ERR_PHY (-2) **/ s32 e1000_configure_k1_ich8lan(struct e1000_hw *hw, bool k1_enable) { s32 ret_val; u32 ctrl_reg = 0; u32 ctrl_ext = 0; u32 reg = 0; u16 kmrn_reg = 0; ret_val = e1000e_read_kmrn_reg_locked(hw, E1000_KMRNCTRLSTA_K1_CONFIG, &kmrn_reg); if (ret_val) return ret_val; if (k1_enable) kmrn_reg |= E1000_KMRNCTRLSTA_K1_ENABLE; else kmrn_reg &= ~E1000_KMRNCTRLSTA_K1_ENABLE; ret_val = e1000e_write_kmrn_reg_locked(hw, E1000_KMRNCTRLSTA_K1_CONFIG, kmrn_reg); if (ret_val) return ret_val; usleep_range(20, 40); ctrl_ext = er32(CTRL_EXT); ctrl_reg = er32(CTRL); reg = ctrl_reg & ~(E1000_CTRL_SPD_1000 | E1000_CTRL_SPD_100); reg |= E1000_CTRL_FRCSPD; ew32(CTRL, reg); ew32(CTRL_EXT, ctrl_ext | E1000_CTRL_EXT_SPD_BYPS); e1e_flush(); usleep_range(20, 40); ew32(CTRL, ctrl_reg); ew32(CTRL_EXT, ctrl_ext); e1e_flush(); usleep_range(20, 40); return 0; } /** * e1000_oem_bits_config_ich8lan - SW-based LCD Configuration * @hw: pointer to the HW structure * @d0_state: boolean if entering d0 or d3 device state * * SW will configure Gbe Disable and LPLU based on the NVM. The four bits are * collectively called OEM bits. The OEM Write Enable bit and SW Config bit * in NVM determines whether HW should configure LPLU and Gbe Disable. **/ static s32 e1000_oem_bits_config_ich8lan(struct e1000_hw *hw, bool d0_state) { s32 ret_val = 0; u32 mac_reg; u16 oem_reg; if (hw->mac.type < e1000_pchlan) return ret_val; ret_val = hw->phy.ops.acquire(hw); if (ret_val) return ret_val; if (hw->mac.type == e1000_pchlan) { mac_reg = er32(EXTCNF_CTRL); if (mac_reg & E1000_EXTCNF_CTRL_OEM_WRITE_ENABLE) goto release; } mac_reg = er32(FEXTNVM); if (!(mac_reg & E1000_FEXTNVM_SW_CONFIG_ICH8M)) goto release; mac_reg = er32(PHY_CTRL); ret_val = e1e_rphy_locked(hw, HV_OEM_BITS, &oem_reg); if (ret_val) goto release; oem_reg &= ~(HV_OEM_BITS_GBE_DIS | HV_OEM_BITS_LPLU); if (d0_state) { if (mac_reg & E1000_PHY_CTRL_GBE_DISABLE) oem_reg |= HV_OEM_BITS_GBE_DIS; if (mac_reg & E1000_PHY_CTRL_D0A_LPLU) oem_reg |= HV_OEM_BITS_LPLU; } else { if (mac_reg & (E1000_PHY_CTRL_GBE_DISABLE | E1000_PHY_CTRL_NOND0A_GBE_DISABLE)) oem_reg |= HV_OEM_BITS_GBE_DIS; if (mac_reg & (E1000_PHY_CTRL_D0A_LPLU | E1000_PHY_CTRL_NOND0A_LPLU)) oem_reg |= HV_OEM_BITS_LPLU; } /* Set Restart auto-neg to activate the bits */ if ((d0_state || (hw->mac.type != e1000_pchlan)) && !hw->phy.ops.check_reset_block(hw)) oem_reg |= HV_OEM_BITS_RESTART_AN; ret_val = e1e_wphy_locked(hw, HV_OEM_BITS, oem_reg); release: hw->phy.ops.release(hw); return ret_val; } /** * e1000_set_mdio_slow_mode_hv - Set slow MDIO access mode * @hw: pointer to the HW structure **/ static s32 e1000_set_mdio_slow_mode_hv(struct e1000_hw *hw) { s32 ret_val; u16 data; ret_val = e1e_rphy(hw, HV_KMRN_MODE_CTRL, &data); if (ret_val) return ret_val; data |= HV_KMRN_MDIO_SLOW; ret_val = e1e_wphy(hw, HV_KMRN_MODE_CTRL, data); return ret_val; } /** * e1000_hv_phy_workarounds_ich8lan - apply PHY workarounds * @hw: pointer to the HW structure * * A series of PHY workarounds to be done after every PHY reset. **/ static s32 e1000_hv_phy_workarounds_ich8lan(struct e1000_hw *hw) { s32 ret_val = 0; u16 phy_data; if (hw->mac.type != e1000_pchlan) return 0; /* Set MDIO slow mode before any other MDIO access */ if (hw->phy.type == e1000_phy_82577) { ret_val = e1000_set_mdio_slow_mode_hv(hw); if (ret_val) return ret_val; } if (((hw->phy.type == e1000_phy_82577) && ((hw->phy.revision == 1) || (hw->phy.revision == 2))) || ((hw->phy.type == e1000_phy_82578) && (hw->phy.revision == 1))) { /* Disable generation of early preamble */ ret_val = e1e_wphy(hw, PHY_REG(769, 25), 0x4431); if (ret_val) return ret_val; /* Preamble tuning for SSC */ ret_val = e1e_wphy(hw, HV_KMRN_FIFO_CTRLSTA, 0xA204); if (ret_val) return ret_val; } if (hw->phy.type == e1000_phy_82578) { /* Return registers to default by doing a soft reset then * writing 0x3140 to the control register. */ if (hw->phy.revision < 2) { e1000e_phy_sw_reset(hw); ret_val = e1e_wphy(hw, MII_BMCR, 0x3140); if (ret_val) return ret_val; } } /* Select page 0 */ ret_val = hw->phy.ops.acquire(hw); if (ret_val) return ret_val; hw->phy.addr = 1; ret_val = e1000e_write_phy_reg_mdic(hw, IGP01E1000_PHY_PAGE_SELECT, 0); hw->phy.ops.release(hw); if (ret_val) return ret_val; /* Configure the K1 Si workaround during phy reset assuming there is * link so that it disables K1 if link is in 1Gbps. */ ret_val = e1000_k1_gig_workaround_hv(hw, true); if (ret_val) return ret_val; /* Workaround for link disconnects on a busy hub in half duplex */ ret_val = hw->phy.ops.acquire(hw); if (ret_val) return ret_val; ret_val = e1e_rphy_locked(hw, BM_PORT_GEN_CFG, &phy_data); if (ret_val) goto release; ret_val = e1e_wphy_locked(hw, BM_PORT_GEN_CFG, phy_data & 0x00FF); if (ret_val) goto release; /* set MSE higher to enable link to stay up when noise is high */ ret_val = e1000_write_emi_reg_locked(hw, I82577_MSE_THRESHOLD, 0x0034); release: hw->phy.ops.release(hw); return ret_val; } /** * e1000_copy_rx_addrs_to_phy_ich8lan - Copy Rx addresses from MAC to PHY * @hw: pointer to the HW structure **/ void e1000_copy_rx_addrs_to_phy_ich8lan(struct e1000_hw *hw) { u32 mac_reg; u16 i, phy_reg = 0; s32 ret_val; ret_val = hw->phy.ops.acquire(hw); if (ret_val) return; ret_val = e1000_enable_phy_wakeup_reg_access_bm(hw, &phy_reg); if (ret_val) goto release; /* Copy both RAL/H (rar_entry_count) and SHRAL/H to PHY */ for (i = 0; i < (hw->mac.rar_entry_count); i++) { mac_reg = er32(RAL(i)); hw->phy.ops.write_reg_page(hw, BM_RAR_L(i), (u16)(mac_reg & 0xFFFF)); hw->phy.ops.write_reg_page(hw, BM_RAR_M(i), (u16)((mac_reg >> 16) & 0xFFFF)); mac_reg = er32(RAH(i)); hw->phy.ops.write_reg_page(hw, BM_RAR_H(i), (u16)(mac_reg & 0xFFFF)); hw->phy.ops.write_reg_page(hw, BM_RAR_CTRL(i), (u16)((mac_reg & E1000_RAH_AV) >> 16)); } e1000_disable_phy_wakeup_reg_access_bm(hw, &phy_reg); release: hw->phy.ops.release(hw); } /** * e1000_lv_jumbo_workaround_ich8lan - required for jumbo frame operation * with 82579 PHY * @hw: pointer to the HW structure * @enable: flag to enable/disable workaround when enabling/disabling jumbos **/ s32 e1000_lv_jumbo_workaround_ich8lan(struct e1000_hw *hw, bool enable) { s32 ret_val = 0; u16 phy_reg, data; u32 mac_reg; u16 i; if (hw->mac.type < e1000_pch2lan) return 0; /* disable Rx path while enabling/disabling workaround */ e1e_rphy(hw, PHY_REG(769, 20), &phy_reg); ret_val = e1e_wphy(hw, PHY_REG(769, 20), phy_reg | BIT(14)); if (ret_val) return ret_val; if (enable) { /* Write Rx addresses (rar_entry_count for RAL/H, and * SHRAL/H) and initial CRC values to the MAC */ for (i = 0; i < hw->mac.rar_entry_count; i++) { u8 mac_addr[ETH_ALEN] = { 0 }; u32 addr_high, addr_low; addr_high = er32(RAH(i)); if (!(addr_high & E1000_RAH_AV)) continue; addr_low = er32(RAL(i)); mac_addr[0] = (addr_low & 0xFF); mac_addr[1] = ((addr_low >> 8) & 0xFF); mac_addr[2] = ((addr_low >> 16) & 0xFF); mac_addr[3] = ((addr_low >> 24) & 0xFF); mac_addr[4] = (addr_high & 0xFF); mac_addr[5] = ((addr_high >> 8) & 0xFF); ew32(PCH_RAICC(i), ~ether_crc_le(ETH_ALEN, mac_addr)); } /* Write Rx addresses to the PHY */ e1000_copy_rx_addrs_to_phy_ich8lan(hw); /* Enable jumbo frame workaround in the MAC */ mac_reg = er32(FFLT_DBG); mac_reg &= ~BIT(14); mac_reg |= (7 << 15); ew32(FFLT_DBG, mac_reg); mac_reg = er32(RCTL); mac_reg |= E1000_RCTL_SECRC; ew32(RCTL, mac_reg); ret_val = e1000e_read_kmrn_reg(hw, E1000_KMRNCTRLSTA_CTRL_OFFSET, &data); if (ret_val) return ret_val; ret_val = e1000e_write_kmrn_reg(hw, E1000_KMRNCTRLSTA_CTRL_OFFSET, data | BIT(0)); if (ret_val) return ret_val; ret_val = e1000e_read_kmrn_reg(hw, E1000_KMRNCTRLSTA_HD_CTRL, &data); if (ret_val) return ret_val; data &= ~(0xF << 8); data |= (0xB << 8); ret_val = e1000e_write_kmrn_reg(hw, E1000_KMRNCTRLSTA_HD_CTRL, data); if (ret_val) return ret_val; /* Enable jumbo frame workaround in the PHY */ e1e_rphy(hw, PHY_REG(769, 23), &data); data &= ~(0x7F << 5); data |= (0x37 << 5); ret_val = e1e_wphy(hw, PHY_REG(769, 23), data); if (ret_val) return ret_val; e1e_rphy(hw, PHY_REG(769, 16), &data); data &= ~BIT(13); ret_val = e1e_wphy(hw, PHY_REG(769, 16), data); if (ret_val) return ret_val; e1e_rphy(hw, PHY_REG(776, 20), &data); data &= ~(0x3FF << 2); data |= (E1000_TX_PTR_GAP << 2); ret_val = e1e_wphy(hw, PHY_REG(776, 20), data); if (ret_val) return ret_val; ret_val = e1e_wphy(hw, PHY_REG(776, 23), 0xF100); if (ret_val) return ret_val; e1e_rphy(hw, HV_PM_CTRL, &data); ret_val = e1e_wphy(hw, HV_PM_CTRL, data | BIT(10)); if (ret_val) return ret_val; } else { /* Write MAC register values back to h/w defaults */ mac_reg = er32(FFLT_DBG); mac_reg &= ~(0xF << 14); ew32(FFLT_DBG, mac_reg); mac_reg = er32(RCTL); mac_reg &= ~E1000_RCTL_SECRC; ew32(RCTL, mac_reg); ret_val = e1000e_read_kmrn_reg(hw, E1000_KMRNCTRLSTA_CTRL_OFFSET, &data); if (ret_val) return ret_val; ret_val = e1000e_write_kmrn_reg(hw, E1000_KMRNCTRLSTA_CTRL_OFFSET, data & ~BIT(0)); if (ret_val) return ret_val; ret_val = e1000e_read_kmrn_reg(hw, E1000_KMRNCTRLSTA_HD_CTRL, &data); if (ret_val) return ret_val; data &= ~(0xF << 8); data |= (0xB << 8); ret_val = e1000e_write_kmrn_reg(hw, E1000_KMRNCTRLSTA_HD_CTRL, data); if (ret_val) return ret_val; /* Write PHY register values back to h/w defaults */ e1e_rphy(hw, PHY_REG(769, 23), &data); data &= ~(0x7F << 5); ret_val = e1e_wphy(hw, PHY_REG(769, 23), data); if (ret_val) return ret_val; e1e_rphy(hw, PHY_REG(769, 16), &data); data |= BIT(13); ret_val = e1e_wphy(hw, PHY_REG(769, 16), data); if (ret_val) return ret_val; e1e_rphy(hw, PHY_REG(776, 20), &data); data &= ~(0x3FF << 2); data |= (0x8 << 2); ret_val = e1e_wphy(hw, PHY_REG(776, 20), data); if (ret_val) return ret_val; ret_val = e1e_wphy(hw, PHY_REG(776, 23), 0x7E00); if (ret_val) return ret_val; e1e_rphy(hw, HV_PM_CTRL, &data); ret_val = e1e_wphy(hw, HV_PM_CTRL, data & ~BIT(10)); if (ret_val) return ret_val; } /* re-enable Rx path after enabling/disabling workaround */ return e1e_wphy(hw, PHY_REG(769, 20), phy_reg & ~BIT(14)); } /** * e1000_lv_phy_workarounds_ich8lan - apply ich8 specific workarounds * @hw: pointer to the HW structure * * A series of PHY workarounds to be done after every PHY reset. **/ static s32 e1000_lv_phy_workarounds_ich8lan(struct e1000_hw *hw) { s32 ret_val = 0; if (hw->mac.type != e1000_pch2lan) return 0; /* Set MDIO slow mode before any other MDIO access */ ret_val = e1000_set_mdio_slow_mode_hv(hw); if (ret_val) return ret_val; ret_val = hw->phy.ops.acquire(hw); if (ret_val) return ret_val; /* set MSE higher to enable link to stay up when noise is high */ ret_val = e1000_write_emi_reg_locked(hw, I82579_MSE_THRESHOLD, 0x0034); if (ret_val) goto release; /* drop link after 5 times MSE threshold was reached */ ret_val = e1000_write_emi_reg_locked(hw, I82579_MSE_LINK_DOWN, 0x0005); release: hw->phy.ops.release(hw); return ret_val; } /** * e1000_k1_workaround_lv - K1 Si workaround * @hw: pointer to the HW structure * * Workaround to set the K1 beacon duration for 82579 parts in 10Mbps * Disable K1 in 1000Mbps and 100Mbps **/ static s32 e1000_k1_workaround_lv(struct e1000_hw *hw) { s32 ret_val = 0; u16 status_reg = 0; if (hw->mac.type != e1000_pch2lan) return 0; /* Set K1 beacon duration based on 10Mbs speed */ ret_val = e1e_rphy(hw, HV_M_STATUS, &status_reg); if (ret_val) return ret_val; if ((status_reg & (HV_M_STATUS_LINK_UP | HV_M_STATUS_AUTONEG_COMPLETE)) == (HV_M_STATUS_LINK_UP | HV_M_STATUS_AUTONEG_COMPLETE)) { if (status_reg & (HV_M_STATUS_SPEED_1000 | HV_M_STATUS_SPEED_100)) { u16 pm_phy_reg; /* LV 1G/100 Packet drop issue wa */ ret_val = e1e_rphy(hw, HV_PM_CTRL, &pm_phy_reg); if (ret_val) return ret_val; pm_phy_reg &= ~HV_PM_CTRL_K1_ENABLE; ret_val = e1e_wphy(hw, HV_PM_CTRL, pm_phy_reg); if (ret_val) return ret_val; } else { u32 mac_reg; mac_reg = er32(FEXTNVM4); mac_reg &= ~E1000_FEXTNVM4_BEACON_DURATION_MASK; mac_reg |= E1000_FEXTNVM4_BEACON_DURATION_16USEC; ew32(FEXTNVM4, mac_reg); } } return ret_val; } /** * e1000_gate_hw_phy_config_ich8lan - disable PHY config via hardware * @hw: pointer to the HW structure * @gate: boolean set to true to gate, false to ungate * * Gate/ungate the automatic PHY configuration via hardware; perform * the configuration via software instead. **/ static void e1000_gate_hw_phy_config_ich8lan(struct e1000_hw *hw, bool gate) { u32 extcnf_ctrl; if (hw->mac.type < e1000_pch2lan) return; extcnf_ctrl = er32(EXTCNF_CTRL); if (gate) extcnf_ctrl |= E1000_EXTCNF_CTRL_GATE_PHY_CFG; else extcnf_ctrl &= ~E1000_EXTCNF_CTRL_GATE_PHY_CFG; ew32(EXTCNF_CTRL, extcnf_ctrl); } /** * e1000_lan_init_done_ich8lan - Check for PHY config completion * @hw: pointer to the HW structure * * Check the appropriate indication the MAC has finished configuring the * PHY after a software reset. **/ static void e1000_lan_init_done_ich8lan(struct e1000_hw *hw) { u32 data, loop = E1000_ICH8_LAN_INIT_TIMEOUT; /* Wait for basic configuration completes before proceeding */ do { data = er32(STATUS); data &= E1000_STATUS_LAN_INIT_DONE; usleep_range(100, 200); } while ((!data) && --loop); /* If basic configuration is incomplete before the above loop * count reaches 0, loading the configuration from NVM will * leave the PHY in a bad state possibly resulting in no link. */ if (loop == 0) e_dbg("LAN_INIT_DONE not set, increase timeout\n"); /* Clear the Init Done bit for the next init event */ data = er32(STATUS); data &= ~E1000_STATUS_LAN_INIT_DONE; ew32(STATUS, data); } /** * e1000_post_phy_reset_ich8lan - Perform steps required after a PHY reset * @hw: pointer to the HW structure **/ static s32 e1000_post_phy_reset_ich8lan(struct e1000_hw *hw) { s32 ret_val = 0; u16 reg; if (hw->phy.ops.check_reset_block(hw)) return 0; /* Allow time for h/w to get to quiescent state after reset */ usleep_range(10000, 11000); /* Perform any necessary post-reset workarounds */ switch (hw->mac.type) { case e1000_pchlan: ret_val = e1000_hv_phy_workarounds_ich8lan(hw); if (ret_val) return ret_val; break; case e1000_pch2lan: ret_val = e1000_lv_phy_workarounds_ich8lan(hw); if (ret_val) return ret_val; break; default: break; } /* Clear the host wakeup bit after lcd reset */ if (hw->mac.type >= e1000_pchlan) { e1e_rphy(hw, BM_PORT_GEN_CFG, &reg); reg &= ~BM_WUC_HOST_WU_BIT; e1e_wphy(hw, BM_PORT_GEN_CFG, reg); } /* Configure the LCD with the extended configuration region in NVM */ ret_val = e1000_sw_lcd_config_ich8lan(hw); if (ret_val) return ret_val; /* Configure the LCD with the OEM bits in NVM */ ret_val = e1000_oem_bits_config_ich8lan(hw, true); if (hw->mac.type == e1000_pch2lan) { /* Ungate automatic PHY configuration on non-managed 82579 */ if (!(er32(FWSM) & E1000_ICH_FWSM_FW_VALID)) { usleep_range(10000, 11000); e1000_gate_hw_phy_config_ich8lan(hw, false); } /* Set EEE LPI Update Timer to 200usec */ ret_val = hw->phy.ops.acquire(hw); if (ret_val) return ret_val; ret_val = e1000_write_emi_reg_locked(hw, I82579_LPI_UPDATE_TIMER, 0x1387); hw->phy.ops.release(hw); } return ret_val; } /** * e1000_phy_hw_reset_ich8lan - Performs a PHY reset * @hw: pointer to the HW structure * * Resets the PHY * This is a function pointer entry point called by drivers * or other shared routines. **/ static s32 e1000_phy_hw_reset_ich8lan(struct e1000_hw *hw) { s32 ret_val = 0; /* Gate automatic PHY configuration by hardware on non-managed 82579 */ if ((hw->mac.type == e1000_pch2lan) && !(er32(FWSM) & E1000_ICH_FWSM_FW_VALID)) e1000_gate_hw_phy_config_ich8lan(hw, true); ret_val = e1000e_phy_hw_reset_generic(hw); if (ret_val) return ret_val; return e1000_post_phy_reset_ich8lan(hw); } /** * e1000_set_lplu_state_pchlan - Set Low Power Link Up state * @hw: pointer to the HW structure * @active: true to enable LPLU, false to disable * * Sets the LPLU state according to the active flag. For PCH, if OEM write * bit are disabled in the NVM, writing the LPLU bits in the MAC will not set * the phy speed. This function will manually set the LPLU bit and restart * auto-neg as hw would do. D3 and D0 LPLU will call the same function * since it configures the same bit. **/ static s32 e1000_set_lplu_state_pchlan(struct e1000_hw *hw, bool active) { s32 ret_val; u16 oem_reg; ret_val = e1e_rphy(hw, HV_OEM_BITS, &oem_reg); if (ret_val) return ret_val; if (active) oem_reg |= HV_OEM_BITS_LPLU; else oem_reg &= ~HV_OEM_BITS_LPLU; if (!hw->phy.ops.check_reset_block(hw)) oem_reg |= HV_OEM_BITS_RESTART_AN; return e1e_wphy(hw, HV_OEM_BITS, oem_reg); } /** * e1000_set_d0_lplu_state_ich8lan - Set Low Power Linkup D0 state * @hw: pointer to the HW structure * @active: true to enable LPLU, false to disable * * Sets the LPLU D0 state according to the active flag. When * activating LPLU this function also disables smart speed * and vice versa. LPLU will not be activated unless the * device autonegotiation advertisement meets standards of * either 10 or 10/100 or 10/100/1000 at all duplexes. * This is a function pointer entry point only called by * PHY setup routines. **/ static s32 e1000_set_d0_lplu_state_ich8lan(struct e1000_hw *hw, bool active) { struct e1000_phy_info *phy = &hw->phy; u32 phy_ctrl; s32 ret_val = 0; u16 data; if (phy->type == e1000_phy_ife) return 0; phy_ctrl = er32(PHY_CTRL); if (active) { phy_ctrl |= E1000_PHY_CTRL_D0A_LPLU; ew32(PHY_CTRL, phy_ctrl); if (phy->type != e1000_phy_igp_3) return 0; /* Call gig speed drop workaround on LPLU before accessing * any PHY registers */ if (hw->mac.type == e1000_ich8lan) e1000e_gig_downshift_workaround_ich8lan(hw); /* When LPLU is enabled, we should disable SmartSpeed */ ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CONFIG, &data); if (ret_val) return ret_val; data &= ~IGP01E1000_PSCFR_SMART_SPEED; ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CONFIG, data); if (ret_val) return ret_val; } else { phy_ctrl &= ~E1000_PHY_CTRL_D0A_LPLU; ew32(PHY_CTRL, phy_ctrl); if (phy->type != e1000_phy_igp_3) return 0; /* LPLU and SmartSpeed are mutually exclusive. LPLU is used * during Dx states where the power conservation is most * important. During driver activity we should enable * SmartSpeed, so performance is maintained. */ if (phy->smart_speed == e1000_smart_speed_on) { ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CONFIG, &data); if (ret_val) return ret_val; data |= IGP01E1000_PSCFR_SMART_SPEED; ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CONFIG, data); if (ret_val) return ret_val; } else if (phy->smart_speed == e1000_smart_speed_off) { ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CONFIG, &data); if (ret_val) return ret_val; data &= ~IGP01E1000_PSCFR_SMART_SPEED; ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CONFIG, data); if (ret_val) return ret_val; } } return 0; } /** * e1000_set_d3_lplu_state_ich8lan - Set Low Power Linkup D3 state * @hw: pointer to the HW structure * @active: true to enable LPLU, false to disable * * Sets the LPLU D3 state according to the active flag. When * activating LPLU this function also disables smart speed * and vice versa. LPLU will not be activated unless the * device autonegotiation advertisement meets standards of * either 10 or 10/100 or 10/100/1000 at all duplexes. * This is a function pointer entry point only called by * PHY setup routines. **/ static s32 e1000_set_d3_lplu_state_ich8lan(struct e1000_hw *hw, bool active) { struct e1000_phy_info *phy = &hw->phy; u32 phy_ctrl; s32 ret_val = 0; u16 data; phy_ctrl = er32(PHY_CTRL); if (!active) { phy_ctrl &= ~E1000_PHY_CTRL_NOND0A_LPLU; ew32(PHY_CTRL, phy_ctrl); if (phy->type != e1000_phy_igp_3) return 0; /* LPLU and SmartSpeed are mutually exclusive. LPLU is used * during Dx states where the power conservation is most * important. During driver activity we should enable * SmartSpeed, so performance is maintained. */ if (phy->smart_speed == e1000_smart_speed_on) { ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CONFIG, &data); if (ret_val) return ret_val; data |= IGP01E1000_PSCFR_SMART_SPEED; ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CONFIG, data); if (ret_val) return ret_val; } else if (phy->smart_speed == e1000_smart_speed_off) { ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CONFIG, &data); if (ret_val) return ret_val; data &= ~IGP01E1000_PSCFR_SMART_SPEED; ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CONFIG, data); if (ret_val) return ret_val; } } else if ((phy->autoneg_advertised == E1000_ALL_SPEED_DUPLEX) || (phy->autoneg_advertised == E1000_ALL_NOT_GIG) || (phy->autoneg_advertised == E1000_ALL_10_SPEED)) { phy_ctrl |= E1000_PHY_CTRL_NOND0A_LPLU; ew32(PHY_CTRL, phy_ctrl); if (phy->type != e1000_phy_igp_3) return 0; /* Call gig speed drop workaround on LPLU before accessing * any PHY registers */ if (hw->mac.type == e1000_ich8lan) e1000e_gig_downshift_workaround_ich8lan(hw); /* When LPLU is enabled, we should disable SmartSpeed */ ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CONFIG, &data); if (ret_val) return ret_val; data &= ~IGP01E1000_PSCFR_SMART_SPEED; ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CONFIG, data); } return ret_val; } /** * e1000_valid_nvm_bank_detect_ich8lan - finds out the valid bank 0 or 1 * @hw: pointer to the HW structure * @bank: pointer to the variable that returns the active bank * * Reads signature byte from the NVM using the flash access registers. * Word 0x13 bits 15:14 = 10b indicate a valid signature for that bank. **/ static s32 e1000_valid_nvm_bank_detect_ich8lan(struct e1000_hw *hw, u32 *bank) { u32 eecd; struct e1000_nvm_info *nvm = &hw->nvm; u32 bank1_offset = nvm->flash_bank_size * sizeof(u16); u32 act_offset = E1000_ICH_NVM_SIG_WORD * 2 + 1; u32 nvm_dword = 0; u8 sig_byte = 0; s32 ret_val; switch (hw->mac.type) { case e1000_pch_spt: case e1000_pch_cnp: case e1000_pch_tgp: case e1000_pch_adp: case e1000_pch_mtp: case e1000_pch_lnp: case e1000_pch_ptp: case e1000_pch_nvp: bank1_offset = nvm->flash_bank_size; act_offset = E1000_ICH_NVM_SIG_WORD; /* set bank to 0 in case flash read fails */ *bank = 0; /* Check bank 0 */ ret_val = e1000_read_flash_dword_ich8lan(hw, act_offset, &nvm_dword); if (ret_val) return ret_val; sig_byte = (u8)((nvm_dword & 0xFF00) >> 8); if ((sig_byte & E1000_ICH_NVM_VALID_SIG_MASK) == E1000_ICH_NVM_SIG_VALUE) { *bank = 0; return 0; } /* Check bank 1 */ ret_val = e1000_read_flash_dword_ich8lan(hw, act_offset + bank1_offset, &nvm_dword); if (ret_val) return ret_val; sig_byte = (u8)((nvm_dword & 0xFF00) >> 8); if ((sig_byte & E1000_ICH_NVM_VALID_SIG_MASK) == E1000_ICH_NVM_SIG_VALUE) { *bank = 1; return 0; } e_dbg("ERROR: No valid NVM bank present\n"); return -E1000_ERR_NVM; case e1000_ich8lan: case e1000_ich9lan: eecd = er32(EECD); if ((eecd & E1000_EECD_SEC1VAL_VALID_MASK) == E1000_EECD_SEC1VAL_VALID_MASK) { if (eecd & E1000_EECD_SEC1VAL) *bank = 1; else *bank = 0; return 0; } e_dbg("Unable to determine valid NVM bank via EEC - reading flash signature\n"); fallthrough; default: /* set bank to 0 in case flash read fails */ *bank = 0; /* Check bank 0 */ ret_val = e1000_read_flash_byte_ich8lan(hw, act_offset, &sig_byte); if (ret_val) return ret_val; if ((sig_byte & E1000_ICH_NVM_VALID_SIG_MASK) == E1000_ICH_NVM_SIG_VALUE) { *bank = 0; return 0; } /* Check bank 1 */ ret_val = e1000_read_flash_byte_ich8lan(hw, act_offset + bank1_offset, &sig_byte); if (ret_val) return ret_val; if ((sig_byte & E1000_ICH_NVM_VALID_SIG_MASK) == E1000_ICH_NVM_SIG_VALUE) { *bank = 1; return 0; } e_dbg("ERROR: No valid NVM bank present\n"); return -E1000_ERR_NVM; } } /** * e1000_read_nvm_spt - NVM access for SPT * @hw: pointer to the HW structure * @offset: The offset (in bytes) of the word(s) to read. * @words: Size of data to read in words. * @data: pointer to the word(s) to read at offset. * * Reads a word(s) from the NVM **/ static s32 e1000_read_nvm_spt(struct e1000_hw *hw, u16 offset, u16 words, u16 *data) { struct e1000_nvm_info *nvm = &hw->nvm; struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan; u32 act_offset; s32 ret_val = 0; u32 bank = 0; u32 dword = 0; u16 offset_to_read; u16 i; if ((offset >= nvm->word_size) || (words > nvm->word_size - offset) || (words == 0)) { e_dbg("nvm parameter(s) out of bounds\n"); ret_val = -E1000_ERR_NVM; goto out; } nvm->ops.acquire(hw); ret_val = e1000_valid_nvm_bank_detect_ich8lan(hw, &bank); if (ret_val) { e_dbg("Could not detect valid bank, assuming bank 0\n"); bank = 0; } act_offset = (bank) ? nvm->flash_bank_size : 0; act_offset += offset; ret_val = 0; for (i = 0; i < words; i += 2) { if (words - i == 1) { if (dev_spec->shadow_ram[offset + i].modified) { data[i] = dev_spec->shadow_ram[offset + i].value; } else { offset_to_read = act_offset + i - ((act_offset + i) % 2); ret_val = e1000_read_flash_dword_ich8lan(hw, offset_to_read, &dword); if (ret_val) break; if ((act_offset + i) % 2 == 0) data[i] = (u16)(dword & 0xFFFF); else data[i] = (u16)((dword >> 16) & 0xFFFF); } } else { offset_to_read = act_offset + i; if (!(dev_spec->shadow_ram[offset + i].modified) || !(dev_spec->shadow_ram[offset + i + 1].modified)) { ret_val = e1000_read_flash_dword_ich8lan(hw, offset_to_read, &dword); if (ret_val) break; } if (dev_spec->shadow_ram[offset + i].modified) data[i] = dev_spec->shadow_ram[offset + i].value; else data[i] = (u16)(dword & 0xFFFF); if (dev_spec->shadow_ram[offset + i].modified) data[i + 1] = dev_spec->shadow_ram[offset + i + 1].value; else data[i + 1] = (u16)(dword >> 16 & 0xFFFF); } } nvm->ops.release(hw); out: if (ret_val) e_dbg("NVM read error: %d\n", ret_val); return ret_val; } /** * e1000_read_nvm_ich8lan - Read word(s) from the NVM * @hw: pointer to the HW structure * @offset: The offset (in bytes) of the word(s) to read. * @words: Size of data to read in words * @data: Pointer to the word(s) to read at offset. * * Reads a word(s) from the NVM using the flash access registers. **/ static s32 e1000_read_nvm_ich8lan(struct e1000_hw *hw, u16 offset, u16 words, u16 *data) { struct e1000_nvm_info *nvm = &hw->nvm; struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan; u32 act_offset; s32 ret_val = 0; u32 bank = 0; u16 i, word; if ((offset >= nvm->word_size) || (words > nvm->word_size - offset) || (words == 0)) { e_dbg("nvm parameter(s) out of bounds\n"); ret_val = -E1000_ERR_NVM; goto out; } nvm->ops.acquire(hw); ret_val = e1000_valid_nvm_bank_detect_ich8lan(hw, &bank); if (ret_val) { e_dbg("Could not detect valid bank, assuming bank 0\n"); bank = 0; } act_offset = (bank) ? nvm->flash_bank_size : 0; act_offset += offset; ret_val = 0; for (i = 0; i < words; i++) { if (dev_spec->shadow_ram[offset + i].modified) { data[i] = dev_spec->shadow_ram[offset + i].value; } else { ret_val = e1000_read_flash_word_ich8lan(hw, act_offset + i, &word); if (ret_val) break; data[i] = word; } } nvm->ops.release(hw); out: if (ret_val) e_dbg("NVM read error: %d\n", ret_val); return ret_val; } /** * e1000_flash_cycle_init_ich8lan - Initialize flash * @hw: pointer to the HW structure * * This function does initial flash setup so that a new read/write/erase cycle * can be started. **/ static s32 e1000_flash_cycle_init_ich8lan(struct e1000_hw *hw) { union ich8_hws_flash_status hsfsts; s32 ret_val = -E1000_ERR_NVM; hsfsts.regval = er16flash(ICH_FLASH_HSFSTS); /* Check if the flash descriptor is valid */ if (!hsfsts.hsf_status.fldesvalid) { e_dbg("Flash descriptor invalid. SW Sequencing must be used.\n"); return -E1000_ERR_NVM; } /* Clear FCERR and DAEL in hw status by writing 1 */ hsfsts.hsf_status.flcerr = 1; hsfsts.hsf_status.dael = 1; if (hw->mac.type >= e1000_pch_spt) ew32flash(ICH_FLASH_HSFSTS, hsfsts.regval & 0xFFFF); else ew16flash(ICH_FLASH_HSFSTS, hsfsts.regval); /* Either we should have a hardware SPI cycle in progress * bit to check against, in order to start a new cycle or * FDONE bit should be changed in the hardware so that it * is 1 after hardware reset, which can then be used as an * indication whether a cycle is in progress or has been * completed. */ if (!hsfsts.hsf_status.flcinprog) { /* There is no cycle running at present, * so we can start a cycle. * Begin by setting Flash Cycle Done. */ hsfsts.hsf_status.flcdone = 1; if (hw->mac.type >= e1000_pch_spt) ew32flash(ICH_FLASH_HSFSTS, hsfsts.regval & 0xFFFF); else ew16flash(ICH_FLASH_HSFSTS, hsfsts.regval); ret_val = 0; } else { s32 i; /* Otherwise poll for sometime so the current * cycle has a chance to end before giving up. */ for (i = 0; i < ICH_FLASH_READ_COMMAND_TIMEOUT; i++) { hsfsts.regval = er16flash(ICH_FLASH_HSFSTS); if (!hsfsts.hsf_status.flcinprog) { ret_val = 0; break; } udelay(1); } if (!ret_val) { /* Successful in waiting for previous cycle to timeout, * now set the Flash Cycle Done. */ hsfsts.hsf_status.flcdone = 1; if (hw->mac.type >= e1000_pch_spt) ew32flash(ICH_FLASH_HSFSTS, hsfsts.regval & 0xFFFF); else ew16flash(ICH_FLASH_HSFSTS, hsfsts.regval); } else { e_dbg("Flash controller busy, cannot get access\n"); } } return ret_val; } /** * e1000_flash_cycle_ich8lan - Starts flash cycle (read/write/erase) * @hw: pointer to the HW structure * @timeout: maximum time to wait for completion * * This function starts a flash cycle and waits for its completion. **/ static s32 e1000_flash_cycle_ich8lan(struct e1000_hw *hw, u32 timeout) { union ich8_hws_flash_ctrl hsflctl; union ich8_hws_flash_status hsfsts; u32 i = 0; /* Start a cycle by writing 1 in Flash Cycle Go in Hw Flash Control */ if (hw->mac.type >= e1000_pch_spt) hsflctl.regval = er32flash(ICH_FLASH_HSFSTS) >> 16; else hsflctl.regval = er16flash(ICH_FLASH_HSFCTL); hsflctl.hsf_ctrl.flcgo = 1; if (hw->mac.type >= e1000_pch_spt) ew32flash(ICH_FLASH_HSFSTS, hsflctl.regval << 16); else ew16flash(ICH_FLASH_HSFCTL, hsflctl.regval); /* wait till FDONE bit is set to 1 */ do { hsfsts.regval = er16flash(ICH_FLASH_HSFSTS); if (hsfsts.hsf_status.flcdone) break; udelay(1); } while (i++ < timeout); if (hsfsts.hsf_status.flcdone && !hsfsts.hsf_status.flcerr) return 0; return -E1000_ERR_NVM; } /** * e1000_read_flash_dword_ich8lan - Read dword from flash * @hw: pointer to the HW structure * @offset: offset to data location * @data: pointer to the location for storing the data * * Reads the flash dword at offset into data. Offset is converted * to bytes before read. **/ static s32 e1000_read_flash_dword_ich8lan(struct e1000_hw *hw, u32 offset, u32 *data) { /* Must convert word offset into bytes. */ offset <<= 1; return e1000_read_flash_data32_ich8lan(hw, offset, data); } /** * e1000_read_flash_word_ich8lan - Read word from flash * @hw: pointer to the HW structure * @offset: offset to data location * @data: pointer to the location for storing the data * * Reads the flash word at offset into data. Offset is converted * to bytes before read. **/ static s32 e1000_read_flash_word_ich8lan(struct e1000_hw *hw, u32 offset, u16 *data) { /* Must convert offset into bytes. */ offset <<= 1; return e1000_read_flash_data_ich8lan(hw, offset, 2, data); } /** * e1000_read_flash_byte_ich8lan - Read byte from flash * @hw: pointer to the HW structure * @offset: The offset of the byte to read. * @data: Pointer to a byte to store the value read. * * Reads a single byte from the NVM using the flash access registers. **/ static s32 e1000_read_flash_byte_ich8lan(struct e1000_hw *hw, u32 offset, u8 *data) { s32 ret_val; u16 word = 0; /* In SPT, only 32 bits access is supported, * so this function should not be called. */ if (hw->mac.type >= e1000_pch_spt) return -E1000_ERR_NVM; else ret_val = e1000_read_flash_data_ich8lan(hw, offset, 1, &word); if (ret_val) return ret_val; *data = (u8)word; return 0; } /** * e1000_read_flash_data_ich8lan - Read byte or word from NVM * @hw: pointer to the HW structure * @offset: The offset (in bytes) of the byte or word to read. * @size: Size of data to read, 1=byte 2=word * @data: Pointer to the word to store the value read. * * Reads a byte or word from the NVM using the flash access registers. **/ static s32 e1000_read_flash_data_ich8lan(struct e1000_hw *hw, u32 offset, u8 size, u16 *data) { union ich8_hws_flash_status hsfsts; union ich8_hws_flash_ctrl hsflctl; u32 flash_linear_addr; u32 flash_data = 0; s32 ret_val = -E1000_ERR_NVM; u8 count = 0; if (size < 1 || size > 2 || offset > ICH_FLASH_LINEAR_ADDR_MASK) return -E1000_ERR_NVM; flash_linear_addr = ((ICH_FLASH_LINEAR_ADDR_MASK & offset) + hw->nvm.flash_base_addr); do { udelay(1); /* Steps */ ret_val = e1000_flash_cycle_init_ich8lan(hw); if (ret_val) break; hsflctl.regval = er16flash(ICH_FLASH_HSFCTL); /* 0b/1b corresponds to 1 or 2 byte size, respectively. */ hsflctl.hsf_ctrl.fldbcount = size - 1; hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_READ; ew16flash(ICH_FLASH_HSFCTL, hsflctl.regval); ew32flash(ICH_FLASH_FADDR, flash_linear_addr); ret_val = e1000_flash_cycle_ich8lan(hw, ICH_FLASH_READ_COMMAND_TIMEOUT); /* Check if FCERR is set to 1, if set to 1, clear it * and try the whole sequence a few more times, else * read in (shift in) the Flash Data0, the order is * least significant byte first msb to lsb */ if (!ret_val) { flash_data = er32flash(ICH_FLASH_FDATA0); if (size == 1) *data = (u8)(flash_data & 0x000000FF); else if (size == 2) *data = (u16)(flash_data & 0x0000FFFF); break; } else { /* If we've gotten here, then things are probably * completely hosed, but if the error condition is * detected, it won't hurt to give it another try... * ICH_FLASH_CYCLE_REPEAT_COUNT times. */ hsfsts.regval = er16flash(ICH_FLASH_HSFSTS); if (hsfsts.hsf_status.flcerr) { /* Repeat for some time before giving up. */ continue; } else if (!hsfsts.hsf_status.flcdone) { e_dbg("Timeout error - flash cycle did not complete.\n"); break; } } } while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT); return ret_val; } /** * e1000_read_flash_data32_ich8lan - Read dword from NVM * @hw: pointer to the HW structure * @offset: The offset (in bytes) of the dword to read. * @data: Pointer to the dword to store the value read. * * Reads a byte or word from the NVM using the flash access registers. **/ static s32 e1000_read_flash_data32_ich8lan(struct e1000_hw *hw, u32 offset, u32 *data) { union ich8_hws_flash_status hsfsts; union ich8_hws_flash_ctrl hsflctl; u32 flash_linear_addr; s32 ret_val = -E1000_ERR_NVM; u8 count = 0; if (offset > ICH_FLASH_LINEAR_ADDR_MASK || hw->mac.type < e1000_pch_spt) return -E1000_ERR_NVM; flash_linear_addr = ((ICH_FLASH_LINEAR_ADDR_MASK & offset) + hw->nvm.flash_base_addr); do { udelay(1); /* Steps */ ret_val = e1000_flash_cycle_init_ich8lan(hw); if (ret_val) break; /* In SPT, This register is in Lan memory space, not flash. * Therefore, only 32 bit access is supported */ hsflctl.regval = er32flash(ICH_FLASH_HSFSTS) >> 16; /* 0b/1b corresponds to 1 or 2 byte size, respectively. */ hsflctl.hsf_ctrl.fldbcount = sizeof(u32) - 1; hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_READ; /* In SPT, This register is in Lan memory space, not flash. * Therefore, only 32 bit access is supported */ ew32flash(ICH_FLASH_HSFSTS, (u32)hsflctl.regval << 16); ew32flash(ICH_FLASH_FADDR, flash_linear_addr); ret_val = e1000_flash_cycle_ich8lan(hw, ICH_FLASH_READ_COMMAND_TIMEOUT); /* Check if FCERR is set to 1, if set to 1, clear it * and try the whole sequence a few more times, else * read in (shift in) the Flash Data0, the order is * least significant byte first msb to lsb */ if (!ret_val) { *data = er32flash(ICH_FLASH_FDATA0); break; } else { /* If we've gotten here, then things are probably * completely hosed, but if the error condition is * detected, it won't hurt to give it another try... * ICH_FLASH_CYCLE_REPEAT_COUNT times. */ hsfsts.regval = er16flash(ICH_FLASH_HSFSTS); if (hsfsts.hsf_status.flcerr) { /* Repeat for some time before giving up. */ continue; } else if (!hsfsts.hsf_status.flcdone) { e_dbg("Timeout error - flash cycle did not complete.\n"); break; } } } while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT); return ret_val; } /** * e1000_write_nvm_ich8lan - Write word(s) to the NVM * @hw: pointer to the HW structure * @offset: The offset (in bytes) of the word(s) to write. * @words: Size of data to write in words * @data: Pointer to the word(s) to write at offset. * * Writes a byte or word to the NVM using the flash access registers. **/ static s32 e1000_write_nvm_ich8lan(struct e1000_hw *hw, u16 offset, u16 words, u16 *data) { struct e1000_nvm_info *nvm = &hw->nvm; struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan; u16 i; if ((offset >= nvm->word_size) || (words > nvm->word_size - offset) || (words == 0)) { e_dbg("nvm parameter(s) out of bounds\n"); return -E1000_ERR_NVM; } nvm->ops.acquire(hw); for (i = 0; i < words; i++) { dev_spec->shadow_ram[offset + i].modified = true; dev_spec->shadow_ram[offset + i].value = data[i]; } nvm->ops.release(hw); return 0; } /** * e1000_update_nvm_checksum_spt - Update the checksum for NVM * @hw: pointer to the HW structure * * The NVM checksum is updated by calling the generic update_nvm_checksum, * which writes the checksum to the shadow ram. The changes in the shadow * ram are then committed to the EEPROM by processing each bank at a time * checking for the modified bit and writing only the pending changes. * After a successful commit, the shadow ram is cleared and is ready for * future writes. **/ static s32 e1000_update_nvm_checksum_spt(struct e1000_hw *hw) { struct e1000_nvm_info *nvm = &hw->nvm; struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan; u32 i, act_offset, new_bank_offset, old_bank_offset, bank; s32 ret_val; u32 dword = 0; ret_val = e1000e_update_nvm_checksum_generic(hw); if (ret_val) goto out; if (nvm->type != e1000_nvm_flash_sw) goto out; nvm->ops.acquire(hw); /* We're writing to the opposite bank so if we're on bank 1, * write to bank 0 etc. We also need to erase the segment that * is going to be written */ ret_val = e1000_valid_nvm_bank_detect_ich8lan(hw, &bank); if (ret_val) { e_dbg("Could not detect valid bank, assuming bank 0\n"); bank = 0; } if (bank == 0) { new_bank_offset = nvm->flash_bank_size; old_bank_offset = 0; ret_val = e1000_erase_flash_bank_ich8lan(hw, 1); if (ret_val) goto release; } else { old_bank_offset = nvm->flash_bank_size; new_bank_offset = 0; ret_val = e1000_erase_flash_bank_ich8lan(hw, 0); if (ret_val) goto release; } for (i = 0; i < E1000_ICH8_SHADOW_RAM_WORDS; i += 2) { /* Determine whether to write the value stored * in the other NVM bank or a modified value stored * in the shadow RAM */ ret_val = e1000_read_flash_dword_ich8lan(hw, i + old_bank_offset, &dword); if (dev_spec->shadow_ram[i].modified) { dword &= 0xffff0000; dword |= (dev_spec->shadow_ram[i].value & 0xffff); } if (dev_spec->shadow_ram[i + 1].modified) { dword &= 0x0000ffff; dword |= ((dev_spec->shadow_ram[i + 1].value & 0xffff) << 16); } if (ret_val) break; /* If the word is 0x13, then make sure the signature bits * (15:14) are 11b until the commit has completed. * This will allow us to write 10b which indicates the * signature is valid. We want to do this after the write * has completed so that we don't mark the segment valid * while the write is still in progress */ if (i == E1000_ICH_NVM_SIG_WORD - 1) dword |= E1000_ICH_NVM_SIG_MASK << 16; /* Convert offset to bytes. */ act_offset = (i + new_bank_offset) << 1; usleep_range(100, 200); /* Write the data to the new bank. Offset in words */ act_offset = i + new_bank_offset; ret_val = e1000_retry_write_flash_dword_ich8lan(hw, act_offset, dword); if (ret_val) break; } /* Don't bother writing the segment valid bits if sector * programming failed. */ if (ret_val) { /* Possibly read-only, see e1000e_write_protect_nvm_ich8lan() */ e_dbg("Flash commit failed.\n"); goto release; } /* Finally validate the new segment by setting bit 15:14 * to 10b in word 0x13 , this can be done without an * erase as well since these bits are 11 to start with * and we need to change bit 14 to 0b */ act_offset = new_bank_offset + E1000_ICH_NVM_SIG_WORD; /*offset in words but we read dword */ --act_offset; ret_val = e1000_read_flash_dword_ich8lan(hw, act_offset, &dword); if (ret_val) goto release; dword &= 0xBFFFFFFF; ret_val = e1000_retry_write_flash_dword_ich8lan(hw, act_offset, dword); if (ret_val) goto release; /* offset in words but we read dword */ act_offset = old_bank_offset + E1000_ICH_NVM_SIG_WORD - 1; ret_val = e1000_read_flash_dword_ich8lan(hw, act_offset, &dword); if (ret_val) goto release; dword &= 0x00FFFFFF; ret_val = e1000_retry_write_flash_dword_ich8lan(hw, act_offset, dword); if (ret_val) goto release; /* Great! Everything worked, we can now clear the cached entries. */ for (i = 0; i < E1000_ICH8_SHADOW_RAM_WORDS; i++) { dev_spec->shadow_ram[i].modified = false; dev_spec->shadow_ram[i].value = 0xFFFF; } release: nvm->ops.release(hw); /* Reload the EEPROM, or else modifications will not appear * until after the next adapter reset. */ if (!ret_val) { nvm->ops.reload(hw); usleep_range(10000, 11000); } out: if (ret_val) e_dbg("NVM update error: %d\n", ret_val); return ret_val; } /** * e1000_update_nvm_checksum_ich8lan - Update the checksum for NVM * @hw: pointer to the HW structure * * The NVM checksum is updated by calling the generic update_nvm_checksum, * which writes the checksum to the shadow ram. The changes in the shadow * ram are then committed to the EEPROM by processing each bank at a time * checking for the modified bit and writing only the pending changes. * After a successful commit, the shadow ram is cleared and is ready for * future writes. **/ static s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw) { struct e1000_nvm_info *nvm = &hw->nvm; struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan; u32 i, act_offset, new_bank_offset, old_bank_offset, bank; s32 ret_val; u16 data = 0; ret_val = e1000e_update_nvm_checksum_generic(hw); if (ret_val) goto out; if (nvm->type != e1000_nvm_flash_sw) goto out; nvm->ops.acquire(hw); /* We're writing to the opposite bank so if we're on bank 1, * write to bank 0 etc. We also need to erase the segment that * is going to be written */ ret_val = e1000_valid_nvm_bank_detect_ich8lan(hw, &bank); if (ret_val) { e_dbg("Could not detect valid bank, assuming bank 0\n"); bank = 0; } if (bank == 0) { new_bank_offset = nvm->flash_bank_size; old_bank_offset = 0; ret_val = e1000_erase_flash_bank_ich8lan(hw, 1); if (ret_val) goto release; } else { old_bank_offset = nvm->flash_bank_size; new_bank_offset = 0; ret_val = e1000_erase_flash_bank_ich8lan(hw, 0); if (ret_val) goto release; } for (i = 0; i < E1000_ICH8_SHADOW_RAM_WORDS; i++) { if (dev_spec->shadow_ram[i].modified) { data = dev_spec->shadow_ram[i].value; } else { ret_val = e1000_read_flash_word_ich8lan(hw, i + old_bank_offset, &data); if (ret_val) break; } /* If the word is 0x13, then make sure the signature bits * (15:14) are 11b until the commit has completed. * This will allow us to write 10b which indicates the * signature is valid. We want to do this after the write * has completed so that we don't mark the segment valid * while the write is still in progress */ if (i == E1000_ICH_NVM_SIG_WORD) data |= E1000_ICH_NVM_SIG_MASK; /* Convert offset to bytes. */ act_offset = (i + new_bank_offset) << 1; usleep_range(100, 200); /* Write the bytes to the new bank. */ ret_val = e1000_retry_write_flash_byte_ich8lan(hw, act_offset, (u8)data); if (ret_val) break; usleep_range(100, 200); ret_val = e1000_retry_write_flash_byte_ich8lan(hw, act_offset + 1, (u8)(data >> 8)); if (ret_val) break; } /* Don't bother writing the segment valid bits if sector * programming failed. */ if (ret_val) { /* Possibly read-only, see e1000e_write_protect_nvm_ich8lan() */ e_dbg("Flash commit failed.\n"); goto release; } /* Finally validate the new segment by setting bit 15:14 * to 10b in word 0x13 , this can be done without an * erase as well since these bits are 11 to start with * and we need to change bit 14 to 0b */ act_offset = new_bank_offset + E1000_ICH_NVM_SIG_WORD; ret_val = e1000_read_flash_word_ich8lan(hw, act_offset, &data); if (ret_val) goto release; data &= 0xBFFF; ret_val = e1000_retry_write_flash_byte_ich8lan(hw, act_offset * 2 + 1, (u8)(data >> 8)); if (ret_val) goto release; /* And invalidate the previously valid segment by setting * its signature word (0x13) high_byte to 0b. This can be * done without an erase because flash erase sets all bits * to 1's. We can write 1's to 0's without an erase */ act_offset = (old_bank_offset + E1000_ICH_NVM_SIG_WORD) * 2 + 1; ret_val = e1000_retry_write_flash_byte_ich8lan(hw, act_offset, 0); if (ret_val) goto release; /* Great! Everything worked, we can now clear the cached entries. */ for (i = 0; i < E1000_ICH8_SHADOW_RAM_WORDS; i++) { dev_spec->shadow_ram[i].modified = false; dev_spec->shadow_ram[i].value = 0xFFFF; } release: nvm->ops.release(hw); /* Reload the EEPROM, or else modifications will not appear * until after the next adapter reset. */ if (!ret_val) { nvm->ops.reload(hw); usleep_range(10000, 11000); } out: if (ret_val) e_dbg("NVM update error: %d\n", ret_val); return ret_val; } /** * e1000_validate_nvm_checksum_ich8lan - Validate EEPROM checksum * @hw: pointer to the HW structure * * Check to see if checksum needs to be fixed by reading bit 6 in word 0x19. * If the bit is 0, that the EEPROM had been modified, but the checksum was not * calculated, in which case we need to calculate the checksum and set bit 6. **/ static s32 e1000_validate_nvm_checksum_ich8lan(struct e1000_hw *hw) { s32 ret_val; u16 data; u16 word; u16 valid_csum_mask; /* Read NVM and check Invalid Image CSUM bit. If this bit is 0, * the checksum needs to be fixed. This bit is an indication that * the NVM was prepared by OEM software and did not calculate * the checksum...a likely scenario. */ switch (hw->mac.type) { case e1000_pch_lpt: case e1000_pch_spt: case e1000_pch_cnp: case e1000_pch_tgp: case e1000_pch_adp: case e1000_pch_mtp: case e1000_pch_lnp: case e1000_pch_ptp: case e1000_pch_nvp: word = NVM_COMPAT; valid_csum_mask = NVM_COMPAT_VALID_CSUM; break; default: word = NVM_FUTURE_INIT_WORD1; valid_csum_mask = NVM_FUTURE_INIT_WORD1_VALID_CSUM; break; } ret_val = e1000_read_nvm(hw, word, 1, &data); if (ret_val) return ret_val; if (!(data & valid_csum_mask)) { e_dbg("NVM Checksum valid bit not set\n"); if (hw->mac.type < e1000_pch_tgp) { data |= valid_csum_mask; ret_val = e1000_write_nvm(hw, word, 1, &data); if (ret_val) return ret_val; ret_val = e1000e_update_nvm_checksum(hw); if (ret_val) return ret_val; } } return e1000e_validate_nvm_checksum_generic(hw); } /** * e1000e_write_protect_nvm_ich8lan - Make the NVM read-only * @hw: pointer to the HW structure * * To prevent malicious write/erase of the NVM, set it to be read-only * so that the hardware ignores all write/erase cycles of the NVM via * the flash control registers. The shadow-ram copy of the NVM will * still be updated, however any updates to this copy will not stick * across driver reloads. **/ void e1000e_write_protect_nvm_ich8lan(struct e1000_hw *hw) { struct e1000_nvm_info *nvm = &hw->nvm; union ich8_flash_protected_range pr0; union ich8_hws_flash_status hsfsts; u32 gfpreg; nvm->ops.acquire(hw); gfpreg = er32flash(ICH_FLASH_GFPREG); /* Write-protect GbE Sector of NVM */ pr0.regval = er32flash(ICH_FLASH_PR0); pr0.range.base = gfpreg & FLASH_GFPREG_BASE_MASK; pr0.range.limit = ((gfpreg >> 16) & FLASH_GFPREG_BASE_MASK); pr0.range.wpe = true; ew32flash(ICH_FLASH_PR0, pr0.regval); /* Lock down a subset of GbE Flash Control Registers, e.g. * PR0 to prevent the write-protection from being lifted. * Once FLOCKDN is set, the registers protected by it cannot * be written until FLOCKDN is cleared by a hardware reset. */ hsfsts.regval = er16flash(ICH_FLASH_HSFSTS); hsfsts.hsf_status.flockdn = true; ew32flash(ICH_FLASH_HSFSTS, hsfsts.regval); nvm->ops.release(hw); } /** * e1000_write_flash_data_ich8lan - Writes bytes to the NVM * @hw: pointer to the HW structure * @offset: The offset (in bytes) of the byte/word to read. * @size: Size of data to read, 1=byte 2=word * @data: The byte(s) to write to the NVM. * * Writes one/two bytes to the NVM using the flash access registers. **/ static s32 e1000_write_flash_data_ich8lan(struct e1000_hw *hw, u32 offset, u8 size, u16 data) { union ich8_hws_flash_status hsfsts; union ich8_hws_flash_ctrl hsflctl; u32 flash_linear_addr; u32 flash_data = 0; s32 ret_val; u8 count = 0; if (hw->mac.type >= e1000_pch_spt) { if (size != 4 || offset > ICH_FLASH_LINEAR_ADDR_MASK) return -E1000_ERR_NVM; } else { if (size < 1 || size > 2 || offset > ICH_FLASH_LINEAR_ADDR_MASK) return -E1000_ERR_NVM; } flash_linear_addr = ((ICH_FLASH_LINEAR_ADDR_MASK & offset) + hw->nvm.flash_base_addr); do { udelay(1); /* Steps */ ret_val = e1000_flash_cycle_init_ich8lan(hw); if (ret_val) break; /* In SPT, This register is in Lan memory space, not * flash. Therefore, only 32 bit access is supported */ if (hw->mac.type >= e1000_pch_spt) hsflctl.regval = er32flash(ICH_FLASH_HSFSTS) >> 16; else hsflctl.regval = er16flash(ICH_FLASH_HSFCTL); /* 0b/1b corresponds to 1 or 2 byte size, respectively. */ hsflctl.hsf_ctrl.fldbcount = size - 1; hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_WRITE; /* In SPT, This register is in Lan memory space, * not flash. Therefore, only 32 bit access is * supported */ if (hw->mac.type >= e1000_pch_spt) ew32flash(ICH_FLASH_HSFSTS, hsflctl.regval << 16); else ew16flash(ICH_FLASH_HSFCTL, hsflctl.regval); ew32flash(ICH_FLASH_FADDR, flash_linear_addr); if (size == 1) flash_data = (u32)data & 0x00FF; else flash_data = (u32)data; ew32flash(ICH_FLASH_FDATA0, flash_data); /* check if FCERR is set to 1 , if set to 1, clear it * and try the whole sequence a few more times else done */ ret_val = e1000_flash_cycle_ich8lan(hw, ICH_FLASH_WRITE_COMMAND_TIMEOUT); if (!ret_val) break; /* If we're here, then things are most likely * completely hosed, but if the error condition * is detected, it won't hurt to give it another * try...ICH_FLASH_CYCLE_REPEAT_COUNT times. */ hsfsts.regval = er16flash(ICH_FLASH_HSFSTS); if (hsfsts.hsf_status.flcerr) /* Repeat for some time before giving up. */ continue; if (!hsfsts.hsf_status.flcdone) { e_dbg("Timeout error - flash cycle did not complete.\n"); break; } } while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT); return ret_val; } /** * e1000_write_flash_data32_ich8lan - Writes 4 bytes to the NVM * @hw: pointer to the HW structure * @offset: The offset (in bytes) of the dwords to read. * @data: The 4 bytes to write to the NVM. * * Writes one/two/four bytes to the NVM using the flash access registers. **/ static s32 e1000_write_flash_data32_ich8lan(struct e1000_hw *hw, u32 offset, u32 data) { union ich8_hws_flash_status hsfsts; union ich8_hws_flash_ctrl hsflctl; u32 flash_linear_addr; s32 ret_val; u8 count = 0; if (hw->mac.type >= e1000_pch_spt) { if (offset > ICH_FLASH_LINEAR_ADDR_MASK) return -E1000_ERR_NVM; } flash_linear_addr = ((ICH_FLASH_LINEAR_ADDR_MASK & offset) + hw->nvm.flash_base_addr); do { udelay(1); /* Steps */ ret_val = e1000_flash_cycle_init_ich8lan(hw); if (ret_val) break; /* In SPT, This register is in Lan memory space, not * flash. Therefore, only 32 bit access is supported */ if (hw->mac.type >= e1000_pch_spt) hsflctl.regval = er32flash(ICH_FLASH_HSFSTS) >> 16; else hsflctl.regval = er16flash(ICH_FLASH_HSFCTL); hsflctl.hsf_ctrl.fldbcount = sizeof(u32) - 1; hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_WRITE; /* In SPT, This register is in Lan memory space, * not flash. Therefore, only 32 bit access is * supported */ if (hw->mac.type >= e1000_pch_spt) ew32flash(ICH_FLASH_HSFSTS, hsflctl.regval << 16); else ew16flash(ICH_FLASH_HSFCTL, hsflctl.regval); ew32flash(ICH_FLASH_FADDR, flash_linear_addr); ew32flash(ICH_FLASH_FDATA0, data); /* check if FCERR is set to 1 , if set to 1, clear it * and try the whole sequence a few more times else done */ ret_val = e1000_flash_cycle_ich8lan(hw, ICH_FLASH_WRITE_COMMAND_TIMEOUT); if (!ret_val) break; /* If we're here, then things are most likely * completely hosed, but if the error condition * is detected, it won't hurt to give it another * try...ICH_FLASH_CYCLE_REPEAT_COUNT times. */ hsfsts.regval = er16flash(ICH_FLASH_HSFSTS); if (hsfsts.hsf_status.flcerr) /* Repeat for some time before giving up. */ continue; if (!hsfsts.hsf_status.flcdone) { e_dbg("Timeout error - flash cycle did not complete.\n"); break; } } while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT); return ret_val; } /** * e1000_write_flash_byte_ich8lan - Write a single byte to NVM * @hw: pointer to the HW structure * @offset: The index of the byte to read. * @data: The byte to write to the NVM. * * Writes a single byte to the NVM using the flash access registers. **/ static s32 e1000_write_flash_byte_ich8lan(struct e1000_hw *hw, u32 offset, u8 data) { u16 word = (u16)data; return e1000_write_flash_data_ich8lan(hw, offset, 1, word); } /** * e1000_retry_write_flash_dword_ich8lan - Writes a dword to NVM * @hw: pointer to the HW structure * @offset: The offset of the word to write. * @dword: The dword to write to the NVM. * * Writes a single dword to the NVM using the flash access registers. * Goes through a retry algorithm before giving up. **/ static s32 e1000_retry_write_flash_dword_ich8lan(struct e1000_hw *hw, u32 offset, u32 dword) { s32 ret_val; u16 program_retries; /* Must convert word offset into bytes. */ offset <<= 1; ret_val = e1000_write_flash_data32_ich8lan(hw, offset, dword); if (!ret_val) return ret_val; for (program_retries = 0; program_retries < 100; program_retries++) { e_dbg("Retrying Byte %8.8X at offset %u\n", dword, offset); usleep_range(100, 200); ret_val = e1000_write_flash_data32_ich8lan(hw, offset, dword); if (!ret_val) break; } if (program_retries == 100) return -E1000_ERR_NVM; return 0; } /** * e1000_retry_write_flash_byte_ich8lan - Writes a single byte to NVM * @hw: pointer to the HW structure * @offset: The offset of the byte to write. * @byte: The byte to write to the NVM. * * Writes a single byte to the NVM using the flash access registers. * Goes through a retry algorithm before giving up. **/ static s32 e1000_retry_write_flash_byte_ich8lan(struct e1000_hw *hw, u32 offset, u8 byte) { s32 ret_val; u16 program_retries; ret_val = e1000_write_flash_byte_ich8lan(hw, offset, byte); if (!ret_val) return ret_val; for (program_retries = 0; program_retries < 100; program_retries++) { e_dbg("Retrying Byte %2.2X at offset %u\n", byte, offset); usleep_range(100, 200); ret_val = e1000_write_flash_byte_ich8lan(hw, offset, byte); if (!ret_val) break; } if (program_retries == 100) return -E1000_ERR_NVM; return 0; } /** * e1000_erase_flash_bank_ich8lan - Erase a bank (4k) from NVM * @hw: pointer to the HW structure * @bank: 0 for first bank, 1 for second bank, etc. * * Erases the bank specified. Each bank is a 4k block. Banks are 0 based. * bank N is 4096 * N + flash_reg_addr. **/ static s32 e1000_erase_flash_bank_ich8lan(struct e1000_hw *hw, u32 bank) { struct e1000_nvm_info *nvm = &hw->nvm; union ich8_hws_flash_status hsfsts; union ich8_hws_flash_ctrl hsflctl; u32 flash_linear_addr; /* bank size is in 16bit words - adjust to bytes */ u32 flash_bank_size = nvm->flash_bank_size * 2; s32 ret_val; s32 count = 0; s32 j, iteration, sector_size; hsfsts.regval = er16flash(ICH_FLASH_HSFSTS); /* Determine HW Sector size: Read BERASE bits of hw flash status * register * 00: The Hw sector is 256 bytes, hence we need to erase 16 * consecutive sectors. The start index for the nth Hw sector * can be calculated as = bank * 4096 + n * 256 * 01: The Hw sector is 4K bytes, hence we need to erase 1 sector. * The start index for the nth Hw sector can be calculated * as = bank * 4096 * 10: The Hw sector is 8K bytes, nth sector = bank * 8192 * (ich9 only, otherwise error condition) * 11: The Hw sector is 64K bytes, nth sector = bank * 65536 */ switch (hsfsts.hsf_status.berasesz) { case 0: /* Hw sector size 256 */ sector_size = ICH_FLASH_SEG_SIZE_256; iteration = flash_bank_size / ICH_FLASH_SEG_SIZE_256; break; case 1: sector_size = ICH_FLASH_SEG_SIZE_4K; iteration = 1; break; case 2: sector_size = ICH_FLASH_SEG_SIZE_8K; iteration = 1; break; case 3: sector_size = ICH_FLASH_SEG_SIZE_64K; iteration = 1; break; default: return -E1000_ERR_NVM; } /* Start with the base address, then add the sector offset. */ flash_linear_addr = hw->nvm.flash_base_addr; flash_linear_addr += (bank) ? flash_bank_size : 0; for (j = 0; j < iteration; j++) { do { u32 timeout = ICH_FLASH_ERASE_COMMAND_TIMEOUT; /* Steps */ ret_val = e1000_flash_cycle_init_ich8lan(hw); if (ret_val) return ret_val; /* Write a value 11 (block Erase) in Flash * Cycle field in hw flash control */ if (hw->mac.type >= e1000_pch_spt) hsflctl.regval = er32flash(ICH_FLASH_HSFSTS) >> 16; else hsflctl.regval = er16flash(ICH_FLASH_HSFCTL); hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_ERASE; if (hw->mac.type >= e1000_pch_spt) ew32flash(ICH_FLASH_HSFSTS, hsflctl.regval << 16); else ew16flash(ICH_FLASH_HSFCTL, hsflctl.regval); /* Write the last 24 bits of an index within the * block into Flash Linear address field in Flash * Address. */ flash_linear_addr += (j * sector_size); ew32flash(ICH_FLASH_FADDR, flash_linear_addr); ret_val = e1000_flash_cycle_ich8lan(hw, timeout); if (!ret_val) break; /* Check if FCERR is set to 1. If 1, * clear it and try the whole sequence * a few more times else Done */ hsfsts.regval = er16flash(ICH_FLASH_HSFSTS); if (hsfsts.hsf_status.flcerr) /* repeat for some time before giving up */ continue; else if (!hsfsts.hsf_status.flcdone) return ret_val; } while (++count < ICH_FLASH_CYCLE_REPEAT_COUNT); } return 0; } /** * e1000_valid_led_default_ich8lan - Set the default LED settings * @hw: pointer to the HW structure * @data: Pointer to the LED settings * * Reads the LED default settings from the NVM to data. If the NVM LED * settings is all 0's or F's, set the LED default to a valid LED default * setting. **/ static s32 e1000_valid_led_default_ich8lan(struct e1000_hw *hw, u16 *data) { s32 ret_val; ret_val = e1000_read_nvm(hw, NVM_ID_LED_SETTINGS, 1, data); if (ret_val) { e_dbg("NVM Read Error\n"); return ret_val; } if (*data == ID_LED_RESERVED_0000 || *data == ID_LED_RESERVED_FFFF) *data = ID_LED_DEFAULT_ICH8LAN; return 0; } /** * e1000_id_led_init_pchlan - store LED configurations * @hw: pointer to the HW structure * * PCH does not control LEDs via the LEDCTL register, rather it uses * the PHY LED configuration register. * * PCH also does not have an "always on" or "always off" mode which * complicates the ID feature. Instead of using the "on" mode to indicate * in ledctl_mode2 the LEDs to use for ID (see e1000e_id_led_init_generic()), * use "link_up" mode. The LEDs will still ID on request if there is no * link based on logic in e1000_led_[on|off]_pchlan(). **/ static s32 e1000_id_led_init_pchlan(struct e1000_hw *hw) { struct e1000_mac_info *mac = &hw->mac; s32 ret_val; const u32 ledctl_on = E1000_LEDCTL_MODE_LINK_UP; const u32 ledctl_off = E1000_LEDCTL_MODE_LINK_UP | E1000_PHY_LED0_IVRT; u16 data, i, temp, shift; /* Get default ID LED modes */ ret_val = hw->nvm.ops.valid_led_default(hw, &data); if (ret_val) return ret_val; mac->ledctl_default = er32(LEDCTL); mac->ledctl_mode1 = mac->ledctl_default; mac->ledctl_mode2 = mac->ledctl_default; for (i = 0; i < 4; i++) { temp = (data >> (i << 2)) & E1000_LEDCTL_LED0_MODE_MASK; shift = (i * 5); switch (temp) { case ID_LED_ON1_DEF2: case ID_LED_ON1_ON2: case ID_LED_ON1_OFF2: mac->ledctl_mode1 &= ~(E1000_PHY_LED0_MASK << shift); mac->ledctl_mode1 |= (ledctl_on << shift); break; case ID_LED_OFF1_DEF2: case ID_LED_OFF1_ON2: case ID_LED_OFF1_OFF2: mac->ledctl_mode1 &= ~(E1000_PHY_LED0_MASK << shift); mac->ledctl_mode1 |= (ledctl_off << shift); break; default: /* Do nothing */ break; } switch (temp) { case ID_LED_DEF1_ON2: case ID_LED_ON1_ON2: case ID_LED_OFF1_ON2: mac->ledctl_mode2 &= ~(E1000_PHY_LED0_MASK << shift); mac->ledctl_mode2 |= (ledctl_on << shift); break; case ID_LED_DEF1_OFF2: case ID_LED_ON1_OFF2: case ID_LED_OFF1_OFF2: mac->ledctl_mode2 &= ~(E1000_PHY_LED0_MASK << shift); mac->ledctl_mode2 |= (ledctl_off << shift); break; default: /* Do nothing */ break; } } return 0; } /** * e1000_get_bus_info_ich8lan - Get/Set the bus type and width * @hw: pointer to the HW structure * * ICH8 use the PCI Express bus, but does not contain a PCI Express Capability * register, so the bus width is hard coded. **/ static s32 e1000_get_bus_info_ich8lan(struct e1000_hw *hw) { struct e1000_bus_info *bus = &hw->bus; s32 ret_val; ret_val = e1000e_get_bus_info_pcie(hw); /* ICH devices are "PCI Express"-ish. They have * a configuration space, but do not contain * PCI Express Capability registers, so bus width * must be hardcoded. */ if (bus->width == e1000_bus_width_unknown) bus->width = e1000_bus_width_pcie_x1; return ret_val; } /** * e1000_reset_hw_ich8lan - Reset the hardware * @hw: pointer to the HW structure * * Does a full reset of the hardware which includes a reset of the PHY and * MAC. **/ static s32 e1000_reset_hw_ich8lan(struct e1000_hw *hw) { struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan; u16 kum_cfg; u32 ctrl, reg; s32 ret_val; /* Prevent the PCI-E bus from sticking if there is no TLP connection * on the last TLP read/write transaction when MAC is reset. */ ret_val = e1000e_disable_pcie_master(hw); if (ret_val) e_dbg("PCI-E Master disable polling has failed.\n"); e_dbg("Masking off all interrupts\n"); ew32(IMC, 0xffffffff); /* Disable the Transmit and Receive units. Then delay to allow * any pending transactions to complete before we hit the MAC * with the global reset. */ ew32(RCTL, 0); ew32(TCTL, E1000_TCTL_PSP); e1e_flush(); usleep_range(10000, 11000); /* Workaround for ICH8 bit corruption issue in FIFO memory */ if (hw->mac.type == e1000_ich8lan) { /* Set Tx and Rx buffer allocation to 8k apiece. */ ew32(PBA, E1000_PBA_8K); /* Set Packet Buffer Size to 16k. */ ew32(PBS, E1000_PBS_16K); } if (hw->mac.type == e1000_pchlan) { /* Save the NVM K1 bit setting */ ret_val = e1000_read_nvm(hw, E1000_NVM_K1_CONFIG, 1, &kum_cfg); if (ret_val) return ret_val; if (kum_cfg & E1000_NVM_K1_ENABLE) dev_spec->nvm_k1_enabled = true; else dev_spec->nvm_k1_enabled = false; } ctrl = er32(CTRL); if (!hw->phy.ops.check_reset_block(hw)) { /* Full-chip reset requires MAC and PHY reset at the same * time to make sure the interface between MAC and the * external PHY is reset. */ ctrl |= E1000_CTRL_PHY_RST; /* Gate automatic PHY configuration by hardware on * non-managed 82579 */ if ((hw->mac.type == e1000_pch2lan) && !(er32(FWSM) & E1000_ICH_FWSM_FW_VALID)) e1000_gate_hw_phy_config_ich8lan(hw, true); } ret_val = e1000_acquire_swflag_ich8lan(hw); e_dbg("Issuing a global reset to ich8lan\n"); ew32(CTRL, (ctrl | E1000_CTRL_RST)); /* cannot issue a flush here because it hangs the hardware */ msleep(20); /* Set Phy Config Counter to 50msec */ if (hw->mac.type == e1000_pch2lan) { reg = er32(FEXTNVM3); reg &= ~E1000_FEXTNVM3_PHY_CFG_COUNTER_MASK; reg |= E1000_FEXTNVM3_PHY_CFG_COUNTER_50MSEC; ew32(FEXTNVM3, reg); } if (!ret_val) clear_bit(__E1000_ACCESS_SHARED_RESOURCE, &hw->adapter->state); if (ctrl & E1000_CTRL_PHY_RST) { ret_val = hw->phy.ops.get_cfg_done(hw); if (ret_val) return ret_val; ret_val = e1000_post_phy_reset_ich8lan(hw); if (ret_val) return ret_val; } /* For PCH, this write will make sure that any noise * will be detected as a CRC error and be dropped rather than show up * as a bad packet to the DMA engine. */ if (hw->mac.type == e1000_pchlan) ew32(CRC_OFFSET, 0x65656565); ew32(IMC, 0xffffffff); er32(ICR); reg = er32(KABGTXD); reg |= E1000_KABGTXD_BGSQLBIAS; ew32(KABGTXD, reg); return 0; } /** * e1000_init_hw_ich8lan - Initialize the hardware * @hw: pointer to the HW structure * * Prepares the hardware for transmit and receive by doing the following: * - initialize hardware bits * - initialize LED identification * - setup receive address registers * - setup flow control * - setup transmit descriptors * - clear statistics **/ static s32 e1000_init_hw_ich8lan(struct e1000_hw *hw) { struct e1000_mac_info *mac = &hw->mac; u32 ctrl_ext, txdctl, snoop, fflt_dbg; s32 ret_val; u16 i; e1000_initialize_hw_bits_ich8lan(hw); /* Initialize identification LED */ ret_val = mac->ops.id_led_init(hw); /* An error is not fatal and we should not stop init due to this */ if (ret_val) e_dbg("Error initializing identification LED\n"); /* Setup the receive address. */ e1000e_init_rx_addrs(hw, mac->rar_entry_count); /* Zero out the Multicast HASH table */ e_dbg("Zeroing the MTA\n"); for (i = 0; i < mac->mta_reg_count; i++) E1000_WRITE_REG_ARRAY(hw, E1000_MTA, i, 0); /* The 82578 Rx buffer will stall if wakeup is enabled in host and * the ME. Disable wakeup by clearing the host wakeup bit. * Reset the phy after disabling host wakeup to reset the Rx buffer. */ if (hw->phy.type == e1000_phy_82578) { e1e_rphy(hw, BM_PORT_GEN_CFG, &i); i &= ~BM_WUC_HOST_WU_BIT; e1e_wphy(hw, BM_PORT_GEN_CFG, i); ret_val = e1000_phy_hw_reset_ich8lan(hw); if (ret_val) return ret_val; } /* Setup link and flow control */ ret_val = mac->ops.setup_link(hw); /* Set the transmit descriptor write-back policy for both queues */ txdctl = er32(TXDCTL(0)); txdctl = ((txdctl & ~E1000_TXDCTL_WTHRESH) | E1000_TXDCTL_FULL_TX_DESC_WB); txdctl = ((txdctl & ~E1000_TXDCTL_PTHRESH) | E1000_TXDCTL_MAX_TX_DESC_PREFETCH); ew32(TXDCTL(0), txdctl); txdctl = er32(TXDCTL(1)); txdctl = ((txdctl & ~E1000_TXDCTL_WTHRESH) | E1000_TXDCTL_FULL_TX_DESC_WB); txdctl = ((txdctl & ~E1000_TXDCTL_PTHRESH) | E1000_TXDCTL_MAX_TX_DESC_PREFETCH); ew32(TXDCTL(1), txdctl); /* ICH8 has opposite polarity of no_snoop bits. * By default, we should use snoop behavior. */ if (mac->type == e1000_ich8lan) snoop = PCIE_ICH8_SNOOP_ALL; else snoop = (u32)~(PCIE_NO_SNOOP_ALL); e1000e_set_pcie_no_snoop(hw, snoop); /* Enable workaround for packet loss issue on TGP PCH * Do not gate DMA clock from the modPHY block */ if (mac->type >= e1000_pch_tgp) { fflt_dbg = er32(FFLT_DBG); fflt_dbg |= E1000_FFLT_DBG_DONT_GATE_WAKE_DMA_CLK; ew32(FFLT_DBG, fflt_dbg); } ctrl_ext = er32(CTRL_EXT); ctrl_ext |= E1000_CTRL_EXT_RO_DIS; ew32(CTRL_EXT, ctrl_ext); /* Clear all of the statistics registers (clear on read). It is * important that we do this after we have tried to establish link * because the symbol error count will increment wildly if there * is no link. */ e1000_clear_hw_cntrs_ich8lan(hw); return ret_val; } /** * e1000_initialize_hw_bits_ich8lan - Initialize required hardware bits * @hw: pointer to the HW structure * * Sets/Clears required hardware bits necessary for correctly setting up the * hardware for transmit and receive. **/ static void e1000_initialize_hw_bits_ich8lan(struct e1000_hw *hw) { u32 reg; /* Extended Device Control */ reg = er32(CTRL_EXT); reg |= BIT(22); /* Enable PHY low-power state when MAC is at D3 w/o WoL */ if (hw->mac.type >= e1000_pchlan) reg |= E1000_CTRL_EXT_PHYPDEN; ew32(CTRL_EXT, reg); /* Transmit Descriptor Control 0 */ reg = er32(TXDCTL(0)); reg |= BIT(22); ew32(TXDCTL(0), reg); /* Transmit Descriptor Control 1 */ reg = er32(TXDCTL(1)); reg |= BIT(22); ew32(TXDCTL(1), reg); /* Transmit Arbitration Control 0 */ reg = er32(TARC(0)); if (hw->mac.type == e1000_ich8lan) reg |= BIT(28) | BIT(29); reg |= BIT(23) | BIT(24) | BIT(26) | BIT(27); ew32(TARC(0), reg); /* Transmit Arbitration Control 1 */ reg = er32(TARC(1)); if (er32(TCTL) & E1000_TCTL_MULR) reg &= ~BIT(28); else reg |= BIT(28); reg |= BIT(24) | BIT(26) | BIT(30); ew32(TARC(1), reg); /* Device Status */ if (hw->mac.type == e1000_ich8lan) { reg = er32(STATUS); reg &= ~BIT(31); ew32(STATUS, reg); } /* work-around descriptor data corruption issue during nfs v2 udp * traffic, just disable the nfs filtering capability */ reg = er32(RFCTL); reg |= (E1000_RFCTL_NFSW_DIS | E1000_RFCTL_NFSR_DIS); /* Disable IPv6 extension header parsing because some malformed * IPv6 headers can hang the Rx. */ if (hw->mac.type == e1000_ich8lan) reg |= (E1000_RFCTL_IPV6_EX_DIS | E1000_RFCTL_NEW_IPV6_EXT_DIS); ew32(RFCTL, reg); /* Enable ECC on Lynxpoint */ if (hw->mac.type >= e1000_pch_lpt) { reg = er32(PBECCSTS); reg |= E1000_PBECCSTS_ECC_ENABLE; ew32(PBECCSTS, reg); reg = er32(CTRL); reg |= E1000_CTRL_MEHE; ew32(CTRL, reg); } } /** * e1000_setup_link_ich8lan - Setup flow control and link settings * @hw: pointer to the HW structure * * Determines which flow control settings to use, then configures flow * control. Calls the appropriate media-specific link configuration * function. Assuming the adapter has a valid link partner, a valid link * should be established. Assumes the hardware has previously been reset * and the transmitter and receiver are not enabled. **/ static s32 e1000_setup_link_ich8lan(struct e1000_hw *hw) { s32 ret_val; if (hw->phy.ops.check_reset_block(hw)) return 0; /* ICH parts do not have a word in the NVM to determine * the default flow control setting, so we explicitly * set it to full. */ if (hw->fc.requested_mode == e1000_fc_default) { /* Workaround h/w hang when Tx flow control enabled */ if (hw->mac.type == e1000_pchlan) hw->fc.requested_mode = e1000_fc_rx_pause; else hw->fc.requested_mode = e1000_fc_full; } /* Save off the requested flow control mode for use later. Depending * on the link partner's capabilities, we may or may not use this mode. */ hw->fc.current_mode = hw->fc.requested_mode; e_dbg("After fix-ups FlowControl is now = %x\n", hw->fc.current_mode); /* Continue to configure the copper link. */ ret_val = hw->mac.ops.setup_physical_interface(hw); if (ret_val) return ret_val; ew32(FCTTV, hw->fc.pause_time); if ((hw->phy.type == e1000_phy_82578) || (hw->phy.type == e1000_phy_82579) || (hw->phy.type == e1000_phy_i217) || (hw->phy.type == e1000_phy_82577)) { ew32(FCRTV_PCH, hw->fc.refresh_time); ret_val = e1e_wphy(hw, PHY_REG(BM_PORT_CTRL_PAGE, 27), hw->fc.pause_time); if (ret_val) return ret_val; } return e1000e_set_fc_watermarks(hw); } /** * e1000_setup_copper_link_ich8lan - Configure MAC/PHY interface * @hw: pointer to the HW structure * * Configures the kumeran interface to the PHY to wait the appropriate time * when polling the PHY, then call the generic setup_copper_link to finish * configuring the copper link. **/ static s32 e1000_setup_copper_link_ich8lan(struct e1000_hw *hw) { u32 ctrl; s32 ret_val; u16 reg_data; ctrl = er32(CTRL); ctrl |= E1000_CTRL_SLU; ctrl &= ~(E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX); ew32(CTRL, ctrl); /* Set the mac to wait the maximum time between each iteration * and increase the max iterations when polling the phy; * this fixes erroneous timeouts at 10Mbps. */ ret_val = e1000e_write_kmrn_reg(hw, E1000_KMRNCTRLSTA_TIMEOUTS, 0xFFFF); if (ret_val) return ret_val; ret_val = e1000e_read_kmrn_reg(hw, E1000_KMRNCTRLSTA_INBAND_PARAM, &reg_data); if (ret_val) return ret_val; reg_data |= 0x3F; ret_val = e1000e_write_kmrn_reg(hw, E1000_KMRNCTRLSTA_INBAND_PARAM, reg_data); if (ret_val) return ret_val; switch (hw->phy.type) { case e1000_phy_igp_3: ret_val = e1000e_copper_link_setup_igp(hw); if (ret_val) return ret_val; break; case e1000_phy_bm: case e1000_phy_82578: ret_val = e1000e_copper_link_setup_m88(hw); if (ret_val) return ret_val; break; case e1000_phy_82577: case e1000_phy_82579: ret_val = e1000_copper_link_setup_82577(hw); if (ret_val) return ret_val; break; case e1000_phy_ife: ret_val = e1e_rphy(hw, IFE_PHY_MDIX_CONTROL, &reg_data); if (ret_val) return ret_val; reg_data &= ~IFE_PMC_AUTO_MDIX; switch (hw->phy.mdix) { case 1: reg_data &= ~IFE_PMC_FORCE_MDIX; break; case 2: reg_data |= IFE_PMC_FORCE_MDIX; break; case 0: default: reg_data |= IFE_PMC_AUTO_MDIX; break; } ret_val = e1e_wphy(hw, IFE_PHY_MDIX_CONTROL, reg_data); if (ret_val) return ret_val; break; default: break; } return e1000e_setup_copper_link(hw); } /** * e1000_setup_copper_link_pch_lpt - Configure MAC/PHY interface * @hw: pointer to the HW structure * * Calls the PHY specific link setup function and then calls the * generic setup_copper_link to finish configuring the link for * Lynxpoint PCH devices **/ static s32 e1000_setup_copper_link_pch_lpt(struct e1000_hw *hw) { u32 ctrl; s32 ret_val; ctrl = er32(CTRL); ctrl |= E1000_CTRL_SLU; ctrl &= ~(E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX); ew32(CTRL, ctrl); ret_val = e1000_copper_link_setup_82577(hw); if (ret_val) return ret_val; return e1000e_setup_copper_link(hw); } /** * e1000_get_link_up_info_ich8lan - Get current link speed and duplex * @hw: pointer to the HW structure * @speed: pointer to store current link speed * @duplex: pointer to store the current link duplex * * Calls the generic get_speed_and_duplex to retrieve the current link * information and then calls the Kumeran lock loss workaround for links at * gigabit speeds. **/ static s32 e1000_get_link_up_info_ich8lan(struct e1000_hw *hw, u16 *speed, u16 *duplex) { s32 ret_val; ret_val = e1000e_get_speed_and_duplex_copper(hw, speed, duplex); if (ret_val) return ret_val; if ((hw->mac.type == e1000_ich8lan) && (hw->phy.type == e1000_phy_igp_3) && (*speed == SPEED_1000)) { ret_val = e1000_kmrn_lock_loss_workaround_ich8lan(hw); } return ret_val; } /** * e1000_kmrn_lock_loss_workaround_ich8lan - Kumeran workaround * @hw: pointer to the HW structure * * Work-around for 82566 Kumeran PCS lock loss: * On link status change (i.e. PCI reset, speed change) and link is up and * speed is gigabit- * 0) if workaround is optionally disabled do nothing * 1) wait 1ms for Kumeran link to come up * 2) check Kumeran Diagnostic register PCS lock loss bit * 3) if not set the link is locked (all is good), otherwise... * 4) reset the PHY * 5) repeat up to 10 times * Note: this is only called for IGP3 copper when speed is 1gb. **/ static s32 e1000_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw) { struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan; u32 phy_ctrl; s32 ret_val; u16 i, data; bool link; if (!dev_spec->kmrn_lock_loss_workaround_enabled) return 0; /* Make sure link is up before proceeding. If not just return. * Attempting this while link is negotiating fouled up link * stability */ ret_val = e1000e_phy_has_link_generic(hw, 1, 0, &link); if (!link) return 0; for (i = 0; i < 10; i++) { /* read once to clear */ ret_val = e1e_rphy(hw, IGP3_KMRN_DIAG, &data); if (ret_val) return ret_val; /* and again to get new status */ ret_val = e1e_rphy(hw, IGP3_KMRN_DIAG, &data); if (ret_val) return ret_val; /* check for PCS lock */ if (!(data & IGP3_KMRN_DIAG_PCS_LOCK_LOSS)) return 0; /* Issue PHY reset */ e1000_phy_hw_reset(hw); mdelay(5); } /* Disable GigE link negotiation */ phy_ctrl = er32(PHY_CTRL); phy_ctrl |= (E1000_PHY_CTRL_GBE_DISABLE | E1000_PHY_CTRL_NOND0A_GBE_DISABLE); ew32(PHY_CTRL, phy_ctrl); /* Call gig speed drop workaround on Gig disable before accessing * any PHY registers */ e1000e_gig_downshift_workaround_ich8lan(hw); /* unable to acquire PCS lock */ return -E1000_ERR_PHY; } /** * e1000e_set_kmrn_lock_loss_workaround_ich8lan - Set Kumeran workaround state * @hw: pointer to the HW structure * @state: boolean value used to set the current Kumeran workaround state * * If ICH8, set the current Kumeran workaround state (enabled - true * /disabled - false). **/ void e1000e_set_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw, bool state) { struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan; if (hw->mac.type != e1000_ich8lan) { e_dbg("Workaround applies to ICH8 only.\n"); return; } dev_spec->kmrn_lock_loss_workaround_enabled = state; } /** * e1000e_igp3_phy_powerdown_workaround_ich8lan - Power down workaround on D3 * @hw: pointer to the HW structure * * Workaround for 82566 power-down on D3 entry: * 1) disable gigabit link * 2) write VR power-down enable * 3) read it back * Continue if successful, else issue LCD reset and repeat **/ void e1000e_igp3_phy_powerdown_workaround_ich8lan(struct e1000_hw *hw) { u32 reg; u16 data; u8 retry = 0; if (hw->phy.type != e1000_phy_igp_3) return; /* Try the workaround twice (if needed) */ do { /* Disable link */ reg = er32(PHY_CTRL); reg |= (E1000_PHY_CTRL_GBE_DISABLE | E1000_PHY_CTRL_NOND0A_GBE_DISABLE); ew32(PHY_CTRL, reg); /* Call gig speed drop workaround on Gig disable before * accessing any PHY registers */ if (hw->mac.type == e1000_ich8lan) e1000e_gig_downshift_workaround_ich8lan(hw); /* Write VR power-down enable */ e1e_rphy(hw, IGP3_VR_CTRL, &data); data &= ~IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK; e1e_wphy(hw, IGP3_VR_CTRL, data | IGP3_VR_CTRL_MODE_SHUTDOWN); /* Read it back and test */ e1e_rphy(hw, IGP3_VR_CTRL, &data); data &= IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK; if ((data == IGP3_VR_CTRL_MODE_SHUTDOWN) || retry) break; /* Issue PHY reset and repeat at most one more time */ reg = er32(CTRL); ew32(CTRL, reg | E1000_CTRL_PHY_RST); retry++; } while (retry); } /** * e1000e_gig_downshift_workaround_ich8lan - WoL from S5 stops working * @hw: pointer to the HW structure * * Steps to take when dropping from 1Gb/s (eg. link cable removal (LSC), * LPLU, Gig disable, MDIC PHY reset): * 1) Set Kumeran Near-end loopback * 2) Clear Kumeran Near-end loopback * Should only be called for ICH8[m] devices with any 1G Phy. **/ void e1000e_gig_downshift_workaround_ich8lan(struct e1000_hw *hw) { s32 ret_val; u16 reg_data; if ((hw->mac.type != e1000_ich8lan) || (hw->phy.type == e1000_phy_ife)) return; ret_val = e1000e_read_kmrn_reg(hw, E1000_KMRNCTRLSTA_DIAG_OFFSET, &reg_data); if (ret_val) return; reg_data |= E1000_KMRNCTRLSTA_DIAG_NELPBK; ret_val = e1000e_write_kmrn_reg(hw, E1000_KMRNCTRLSTA_DIAG_OFFSET, reg_data); if (ret_val) return; reg_data &= ~E1000_KMRNCTRLSTA_DIAG_NELPBK; e1000e_write_kmrn_reg(hw, E1000_KMRNCTRLSTA_DIAG_OFFSET, reg_data); } /** * e1000_suspend_workarounds_ich8lan - workarounds needed during S0->Sx * @hw: pointer to the HW structure * * During S0 to Sx transition, it is possible the link remains at gig * instead of negotiating to a lower speed. Before going to Sx, set * 'Gig Disable' to force link speed negotiation to a lower speed based on * the LPLU setting in the NVM or custom setting. For PCH and newer parts, * the OEM bits PHY register (LED, GbE disable and LPLU configurations) also * needs to be written. * Parts that support (and are linked to a partner which support) EEE in * 100Mbps should disable LPLU since 100Mbps w/ EEE requires less power * than 10Mbps w/o EEE. **/ void e1000_suspend_workarounds_ich8lan(struct e1000_hw *hw) { struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan; u32 phy_ctrl; s32 ret_val; phy_ctrl = er32(PHY_CTRL); phy_ctrl |= E1000_PHY_CTRL_GBE_DISABLE; if (hw->phy.type == e1000_phy_i217) { u16 phy_reg, device_id = hw->adapter->pdev->device; if ((device_id == E1000_DEV_ID_PCH_LPTLP_I218_LM) || (device_id == E1000_DEV_ID_PCH_LPTLP_I218_V) || (device_id == E1000_DEV_ID_PCH_I218_LM3) || (device_id == E1000_DEV_ID_PCH_I218_V3) || (hw->mac.type >= e1000_pch_spt)) { u32 fextnvm6 = er32(FEXTNVM6); ew32(FEXTNVM6, fextnvm6 & ~E1000_FEXTNVM6_REQ_PLL_CLK); } ret_val = hw->phy.ops.acquire(hw); if (ret_val) goto out; if (!dev_spec->eee_disable) { u16 eee_advert; ret_val = e1000_read_emi_reg_locked(hw, I217_EEE_ADVERTISEMENT, &eee_advert); if (ret_val) goto release; /* Disable LPLU if both link partners support 100BaseT * EEE and 100Full is advertised on both ends of the * link, and enable Auto Enable LPI since there will * be no driver to enable LPI while in Sx. */ if ((eee_advert & I82579_EEE_100_SUPPORTED) && (dev_spec->eee_lp_ability & I82579_EEE_100_SUPPORTED) && (hw->phy.autoneg_advertised & ADVERTISE_100_FULL)) { phy_ctrl &= ~(E1000_PHY_CTRL_D0A_LPLU | E1000_PHY_CTRL_NOND0A_LPLU); /* Set Auto Enable LPI after link up */ e1e_rphy_locked(hw, I217_LPI_GPIO_CTRL, &phy_reg); phy_reg |= I217_LPI_GPIO_CTRL_AUTO_EN_LPI; e1e_wphy_locked(hw, I217_LPI_GPIO_CTRL, phy_reg); } } /* For i217 Intel Rapid Start Technology support, * when the system is going into Sx and no manageability engine * is present, the driver must configure proxy to reset only on * power good. LPI (Low Power Idle) state must also reset only * on power good, as well as the MTA (Multicast table array). * The SMBus release must also be disabled on LCD reset. */ if (!(er32(FWSM) & E1000_ICH_FWSM_FW_VALID)) { /* Enable proxy to reset only on power good. */ e1e_rphy_locked(hw, I217_PROXY_CTRL, &phy_reg); phy_reg |= I217_PROXY_CTRL_AUTO_DISABLE; e1e_wphy_locked(hw, I217_PROXY_CTRL, phy_reg); /* Set bit enable LPI (EEE) to reset only on * power good. */ e1e_rphy_locked(hw, I217_SxCTRL, &phy_reg); phy_reg |= I217_SxCTRL_ENABLE_LPI_RESET; e1e_wphy_locked(hw, I217_SxCTRL, phy_reg); /* Disable the SMB release on LCD reset. */ e1e_rphy_locked(hw, I217_MEMPWR, &phy_reg); phy_reg &= ~I217_MEMPWR_DISABLE_SMB_RELEASE; e1e_wphy_locked(hw, I217_MEMPWR, phy_reg); } /* Enable MTA to reset for Intel Rapid Start Technology * Support */ e1e_rphy_locked(hw, I217_CGFREG, &phy_reg); phy_reg |= I217_CGFREG_ENABLE_MTA_RESET; e1e_wphy_locked(hw, I217_CGFREG, phy_reg); release: hw->phy.ops.release(hw); } out: ew32(PHY_CTRL, phy_ctrl); if (hw->mac.type == e1000_ich8lan) e1000e_gig_downshift_workaround_ich8lan(hw); if (hw->mac.type >= e1000_pchlan) { e1000_oem_bits_config_ich8lan(hw, false); /* Reset PHY to activate OEM bits on 82577/8 */ if (hw->mac.type == e1000_pchlan) e1000e_phy_hw_reset_generic(hw); ret_val = hw->phy.ops.acquire(hw); if (ret_val) return; e1000_write_smbus_addr(hw); hw->phy.ops.release(hw); } } /** * e1000_resume_workarounds_pchlan - workarounds needed during Sx->S0 * @hw: pointer to the HW structure * * During Sx to S0 transitions on non-managed devices or managed devices * on which PHY resets are not blocked, if the PHY registers cannot be * accessed properly by the s/w toggle the LANPHYPC value to power cycle * the PHY. * On i217, setup Intel Rapid Start Technology. **/ void e1000_resume_workarounds_pchlan(struct e1000_hw *hw) { s32 ret_val; if (hw->mac.type < e1000_pch2lan) return; ret_val = e1000_init_phy_workarounds_pchlan(hw); if (ret_val) { e_dbg("Failed to init PHY flow ret_val=%d\n", ret_val); return; } /* For i217 Intel Rapid Start Technology support when the system * is transitioning from Sx and no manageability engine is present * configure SMBus to restore on reset, disable proxy, and enable * the reset on MTA (Multicast table array). */ if (hw->phy.type == e1000_phy_i217) { u16 phy_reg; ret_val = hw->phy.ops.acquire(hw); if (ret_val) { e_dbg("Failed to setup iRST\n"); return; } /* Clear Auto Enable LPI after link up */ e1e_rphy_locked(hw, I217_LPI_GPIO_CTRL, &phy_reg); phy_reg &= ~I217_LPI_GPIO_CTRL_AUTO_EN_LPI; e1e_wphy_locked(hw, I217_LPI_GPIO_CTRL, phy_reg); if (!(er32(FWSM) & E1000_ICH_FWSM_FW_VALID)) { /* Restore clear on SMB if no manageability engine * is present */ ret_val = e1e_rphy_locked(hw, I217_MEMPWR, &phy_reg); if (ret_val) goto release; phy_reg |= I217_MEMPWR_DISABLE_SMB_RELEASE; e1e_wphy_locked(hw, I217_MEMPWR, phy_reg); /* Disable Proxy */ e1e_wphy_locked(hw, I217_PROXY_CTRL, 0); } /* Enable reset on MTA */ ret_val = e1e_rphy_locked(hw, I217_CGFREG, &phy_reg); if (ret_val) goto release; phy_reg &= ~I217_CGFREG_ENABLE_MTA_RESET; e1e_wphy_locked(hw, I217_CGFREG, phy_reg); release: if (ret_val) e_dbg("Error %d in resume workarounds\n", ret_val); hw->phy.ops.release(hw); } } /** * e1000_cleanup_led_ich8lan - Restore the default LED operation * @hw: pointer to the HW structure * * Return the LED back to the default configuration. **/ static s32 e1000_cleanup_led_ich8lan(struct e1000_hw *hw) { if (hw->phy.type == e1000_phy_ife) return e1e_wphy(hw, IFE_PHY_SPECIAL_CONTROL_LED, 0); ew32(LEDCTL, hw->mac.ledctl_default); return 0; } /** * e1000_led_on_ich8lan - Turn LEDs on * @hw: pointer to the HW structure * * Turn on the LEDs. **/ static s32 e1000_led_on_ich8lan(struct e1000_hw *hw) { if (hw->phy.type == e1000_phy_ife) return e1e_wphy(hw, IFE_PHY_SPECIAL_CONTROL_LED, (IFE_PSCL_PROBE_MODE | IFE_PSCL_PROBE_LEDS_ON)); ew32(LEDCTL, hw->mac.ledctl_mode2); return 0; } /** * e1000_led_off_ich8lan - Turn LEDs off * @hw: pointer to the HW structure * * Turn off the LEDs. **/ static s32 e1000_led_off_ich8lan(struct e1000_hw *hw) { if (hw->phy.type == e1000_phy_ife) return e1e_wphy(hw, IFE_PHY_SPECIAL_CONTROL_LED, (IFE_PSCL_PROBE_MODE | IFE_PSCL_PROBE_LEDS_OFF)); ew32(LEDCTL, hw->mac.ledctl_mode1); return 0; } /** * e1000_setup_led_pchlan - Configures SW controllable LED * @hw: pointer to the HW structure * * This prepares the SW controllable LED for use. **/ static s32 e1000_setup_led_pchlan(struct e1000_hw *hw) { return e1e_wphy(hw, HV_LED_CONFIG, (u16)hw->mac.ledctl_mode1); } /** * e1000_cleanup_led_pchlan - Restore the default LED operation * @hw: pointer to the HW structure * * Return the LED back to the default configuration. **/ static s32 e1000_cleanup_led_pchlan(struct e1000_hw *hw) { return e1e_wphy(hw, HV_LED_CONFIG, (u16)hw->mac.ledctl_default); } /** * e1000_led_on_pchlan - Turn LEDs on * @hw: pointer to the HW structure * * Turn on the LEDs. **/ static s32 e1000_led_on_pchlan(struct e1000_hw *hw) { u16 data = (u16)hw->mac.ledctl_mode2; u32 i, led; /* If no link, then turn LED on by setting the invert bit * for each LED that's mode is "link_up" in ledctl_mode2. */ if (!(er32(STATUS) & E1000_STATUS_LU)) { for (i = 0; i < 3; i++) { led = (data >> (i * 5)) & E1000_PHY_LED0_MASK; if ((led & E1000_PHY_LED0_MODE_MASK) != E1000_LEDCTL_MODE_LINK_UP) continue; if (led & E1000_PHY_LED0_IVRT) data &= ~(E1000_PHY_LED0_IVRT << (i * 5)); else data |= (E1000_PHY_LED0_IVRT << (i * 5)); } } return e1e_wphy(hw, HV_LED_CONFIG, data); } /** * e1000_led_off_pchlan - Turn LEDs off * @hw: pointer to the HW structure * * Turn off the LEDs. **/ static s32 e1000_led_off_pchlan(struct e1000_hw *hw) { u16 data = (u16)hw->mac.ledctl_mode1; u32 i, led; /* If no link, then turn LED off by clearing the invert bit * for each LED that's mode is "link_up" in ledctl_mode1. */ if (!(er32(STATUS) & E1000_STATUS_LU)) { for (i = 0; i < 3; i++) { led = (data >> (i * 5)) & E1000_PHY_LED0_MASK; if ((led & E1000_PHY_LED0_MODE_MASK) != E1000_LEDCTL_MODE_LINK_UP) continue; if (led & E1000_PHY_LED0_IVRT) data &= ~(E1000_PHY_LED0_IVRT << (i * 5)); else data |= (E1000_PHY_LED0_IVRT << (i * 5)); } } return e1e_wphy(hw, HV_LED_CONFIG, data); } /** * e1000_get_cfg_done_ich8lan - Read config done bit after Full or PHY reset * @hw: pointer to the HW structure * * Read appropriate register for the config done bit for completion status * and configure the PHY through s/w for EEPROM-less parts. * * NOTE: some silicon which is EEPROM-less will fail trying to read the * config done bit, so only an error is logged and continues. If we were * to return with error, EEPROM-less silicon would not be able to be reset * or change link. **/ static s32 e1000_get_cfg_done_ich8lan(struct e1000_hw *hw) { s32 ret_val = 0; u32 bank = 0; u32 status; e1000e_get_cfg_done_generic(hw); /* Wait for indication from h/w that it has completed basic config */ if (hw->mac.type >= e1000_ich10lan) { e1000_lan_init_done_ich8lan(hw); } else { ret_val = e1000e_get_auto_rd_done(hw); if (ret_val) { /* When auto config read does not complete, do not * return with an error. This can happen in situations * where there is no eeprom and prevents getting link. */ e_dbg("Auto Read Done did not complete\n"); ret_val = 0; } } /* Clear PHY Reset Asserted bit */ status = er32(STATUS); if (status & E1000_STATUS_PHYRA) ew32(STATUS, status & ~E1000_STATUS_PHYRA); else e_dbg("PHY Reset Asserted not set - needs delay\n"); /* If EEPROM is not marked present, init the IGP 3 PHY manually */ if (hw->mac.type <= e1000_ich9lan) { if (!(er32(EECD) & E1000_EECD_PRES) && (hw->phy.type == e1000_phy_igp_3)) { e1000e_phy_init_script_igp3(hw); } } else { if (e1000_valid_nvm_bank_detect_ich8lan(hw, &bank)) { /* Maybe we should do a basic PHY config */ e_dbg("EEPROM not present\n"); ret_val = -E1000_ERR_CONFIG; } } return ret_val; } /** * e1000_power_down_phy_copper_ich8lan - Remove link during PHY power down * @hw: pointer to the HW structure * * In the case of a PHY power down to save power, or to turn off link during a * driver unload, or wake on lan is not enabled, remove the link. **/ static void e1000_power_down_phy_copper_ich8lan(struct e1000_hw *hw) { /* If the management interface is not enabled, then power down */ if (!(hw->mac.ops.check_mng_mode(hw) || hw->phy.ops.check_reset_block(hw))) e1000_power_down_phy_copper(hw); } /** * e1000_clear_hw_cntrs_ich8lan - Clear statistical counters * @hw: pointer to the HW structure * * Clears hardware counters specific to the silicon family and calls * clear_hw_cntrs_generic to clear all general purpose counters. **/ static void e1000_clear_hw_cntrs_ich8lan(struct e1000_hw *hw) { u16 phy_data; s32 ret_val; e1000e_clear_hw_cntrs_base(hw); er32(ALGNERRC); er32(RXERRC); er32(TNCRS); er32(CEXTERR); er32(TSCTC); er32(TSCTFC); er32(MGTPRC); er32(MGTPDC); er32(MGTPTC); er32(IAC); er32(ICRXOC); /* Clear PHY statistics registers */ if ((hw->phy.type == e1000_phy_82578) || (hw->phy.type == e1000_phy_82579) || (hw->phy.type == e1000_phy_i217) || (hw->phy.type == e1000_phy_82577)) { ret_val = hw->phy.ops.acquire(hw); if (ret_val) return; ret_val = hw->phy.ops.set_page(hw, HV_STATS_PAGE << IGP_PAGE_SHIFT); if (ret_val) goto release; hw->phy.ops.read_reg_page(hw, HV_SCC_UPPER, &phy_data); hw->phy.ops.read_reg_page(hw, HV_SCC_LOWER, &phy_data); hw->phy.ops.read_reg_page(hw, HV_ECOL_UPPER, &phy_data); hw->phy.ops.read_reg_page(hw, HV_ECOL_LOWER, &phy_data); hw->phy.ops.read_reg_page(hw, HV_MCC_UPPER, &phy_data); hw->phy.ops.read_reg_page(hw, HV_MCC_LOWER, &phy_data); hw->phy.ops.read_reg_page(hw, HV_LATECOL_UPPER, &phy_data); hw->phy.ops.read_reg_page(hw, HV_LATECOL_LOWER, &phy_data); hw->phy.ops.read_reg_page(hw, HV_COLC_UPPER, &phy_data); hw->phy.ops.read_reg_page(hw, HV_COLC_LOWER, &phy_data); hw->phy.ops.read_reg_page(hw, HV_DC_UPPER, &phy_data); hw->phy.ops.read_reg_page(hw, HV_DC_LOWER, &phy_data); hw->phy.ops.read_reg_page(hw, HV_TNCRS_UPPER, &phy_data); hw->phy.ops.read_reg_page(hw, HV_TNCRS_LOWER, &phy_data); release: hw->phy.ops.release(hw); } } static const struct e1000_mac_operations ich8_mac_ops = { /* check_mng_mode dependent on mac type */ .check_for_link = e1000_check_for_copper_link_ich8lan, /* cleanup_led dependent on mac type */ .clear_hw_cntrs = e1000_clear_hw_cntrs_ich8lan, .get_bus_info = e1000_get_bus_info_ich8lan, .set_lan_id = e1000_set_lan_id_single_port, .get_link_up_info = e1000_get_link_up_info_ich8lan, /* led_on dependent on mac type */ /* led_off dependent on mac type */ .update_mc_addr_list = e1000e_update_mc_addr_list_generic, .reset_hw = e1000_reset_hw_ich8lan, .init_hw = e1000_init_hw_ich8lan, .setup_link = e1000_setup_link_ich8lan, .setup_physical_interface = e1000_setup_copper_link_ich8lan, /* id_led_init dependent on mac type */ .config_collision_dist = e1000e_config_collision_dist_generic, .rar_set = e1000e_rar_set_generic, .rar_get_count = e1000e_rar_get_count_generic, }; static const struct e1000_phy_operations ich8_phy_ops = { .acquire = e1000_acquire_swflag_ich8lan, .check_reset_block = e1000_check_reset_block_ich8lan, .commit = NULL, .get_cfg_done = e1000_get_cfg_done_ich8lan, .get_cable_length = e1000e_get_cable_length_igp_2, .read_reg = e1000e_read_phy_reg_igp, .release = e1000_release_swflag_ich8lan, .reset = e1000_phy_hw_reset_ich8lan, .set_d0_lplu_state = e1000_set_d0_lplu_state_ich8lan, .set_d3_lplu_state = e1000_set_d3_lplu_state_ich8lan, .write_reg = e1000e_write_phy_reg_igp, }; static const struct e1000_nvm_operations ich8_nvm_ops = { .acquire = e1000_acquire_nvm_ich8lan, .read = e1000_read_nvm_ich8lan, .release = e1000_release_nvm_ich8lan, .reload = e1000e_reload_nvm_generic, .update = e1000_update_nvm_checksum_ich8lan, .valid_led_default = e1000_valid_led_default_ich8lan, .validate = e1000_validate_nvm_checksum_ich8lan, .write = e1000_write_nvm_ich8lan, }; static const struct e1000_nvm_operations spt_nvm_ops = { .acquire = e1000_acquire_nvm_ich8lan, .release = e1000_release_nvm_ich8lan, .read = e1000_read_nvm_spt, .update = e1000_update_nvm_checksum_spt, .reload = e1000e_reload_nvm_generic, .valid_led_default = e1000_valid_led_default_ich8lan, .validate = e1000_validate_nvm_checksum_ich8lan, .write = e1000_write_nvm_ich8lan, }; const struct e1000_info e1000_ich8_info = { .mac = e1000_ich8lan, .flags = FLAG_HAS_WOL | FLAG_IS_ICH | FLAG_HAS_CTRLEXT_ON_LOAD | FLAG_HAS_AMT | FLAG_HAS_FLASH | FLAG_APME_IN_WUC, .pba = 8, .max_hw_frame_size = VLAN_ETH_FRAME_LEN + ETH_FCS_LEN, .get_variants = e1000_get_variants_ich8lan, .mac_ops = &ich8_mac_ops, .phy_ops = &ich8_phy_ops, .nvm_ops = &ich8_nvm_ops, }; const struct e1000_info e1000_ich9_info = { .mac = e1000_ich9lan, .flags = FLAG_HAS_JUMBO_FRAMES | FLAG_IS_ICH | FLAG_HAS_WOL | FLAG_HAS_CTRLEXT_ON_LOAD | FLAG_HAS_AMT | FLAG_HAS_FLASH | FLAG_APME_IN_WUC, .pba = 18, .max_hw_frame_size = DEFAULT_JUMBO, .get_variants = e1000_get_variants_ich8lan, .mac_ops = &ich8_mac_ops, .phy_ops = &ich8_phy_ops, .nvm_ops = &ich8_nvm_ops, }; const struct e1000_info e1000_ich10_info = { .mac = e1000_ich10lan, .flags = FLAG_HAS_JUMBO_FRAMES | FLAG_IS_ICH | FLAG_HAS_WOL | FLAG_HAS_CTRLEXT_ON_LOAD | FLAG_HAS_AMT | FLAG_HAS_FLASH | FLAG_APME_IN_WUC, .pba = 18, .max_hw_frame_size = DEFAULT_JUMBO, .get_variants = e1000_get_variants_ich8lan, .mac_ops = &ich8_mac_ops, .phy_ops = &ich8_phy_ops, .nvm_ops = &ich8_nvm_ops, }; const struct e1000_info e1000_pch_info = { .mac = e1000_pchlan, .flags = FLAG_IS_ICH | FLAG_HAS_WOL | FLAG_HAS_CTRLEXT_ON_LOAD | FLAG_HAS_AMT | FLAG_HAS_FLASH | FLAG_HAS_JUMBO_FRAMES | FLAG_DISABLE_FC_PAUSE_TIME /* errata */ | FLAG_APME_IN_WUC, .flags2 = FLAG2_HAS_PHY_STATS, .pba = 26, .max_hw_frame_size = 4096, .get_variants = e1000_get_variants_ich8lan, .mac_ops = &ich8_mac_ops, .phy_ops = &ich8_phy_ops, .nvm_ops = &ich8_nvm_ops, }; const struct e1000_info e1000_pch2_info = { .mac = e1000_pch2lan, .flags = FLAG_IS_ICH | FLAG_HAS_WOL | FLAG_HAS_HW_TIMESTAMP | FLAG_HAS_CTRLEXT_ON_LOAD | FLAG_HAS_AMT | FLAG_HAS_FLASH | FLAG_HAS_JUMBO_FRAMES | FLAG_APME_IN_WUC, .flags2 = FLAG2_HAS_PHY_STATS | FLAG2_HAS_EEE | FLAG2_CHECK_SYSTIM_OVERFLOW, .pba = 26, .max_hw_frame_size = 9022, .get_variants = e1000_get_variants_ich8lan, .mac_ops = &ich8_mac_ops, .phy_ops = &ich8_phy_ops, .nvm_ops = &ich8_nvm_ops, }; const struct e1000_info e1000_pch_lpt_info = { .mac = e1000_pch_lpt, .flags = FLAG_IS_ICH | FLAG_HAS_WOL | FLAG_HAS_HW_TIMESTAMP | FLAG_HAS_CTRLEXT_ON_LOAD | FLAG_HAS_AMT | FLAG_HAS_FLASH | FLAG_HAS_JUMBO_FRAMES | FLAG_APME_IN_WUC, .flags2 = FLAG2_HAS_PHY_STATS | FLAG2_HAS_EEE | FLAG2_CHECK_SYSTIM_OVERFLOW, .pba = 26, .max_hw_frame_size = 9022, .get_variants = e1000_get_variants_ich8lan, .mac_ops = &ich8_mac_ops, .phy_ops = &ich8_phy_ops, .nvm_ops = &ich8_nvm_ops, }; const struct e1000_info e1000_pch_spt_info = { .mac = e1000_pch_spt, .flags = FLAG_IS_ICH | FLAG_HAS_WOL | FLAG_HAS_HW_TIMESTAMP | FLAG_HAS_CTRLEXT_ON_LOAD | FLAG_HAS_AMT | FLAG_HAS_FLASH | FLAG_HAS_JUMBO_FRAMES | FLAG_APME_IN_WUC, .flags2 = FLAG2_HAS_PHY_STATS | FLAG2_HAS_EEE, .pba = 26, .max_hw_frame_size = 9022, .get_variants = e1000_get_variants_ich8lan, .mac_ops = &ich8_mac_ops, .phy_ops = &ich8_phy_ops, .nvm_ops = &spt_nvm_ops, }; const struct e1000_info e1000_pch_cnp_info = { .mac = e1000_pch_cnp, .flags = FLAG_IS_ICH | FLAG_HAS_WOL | FLAG_HAS_HW_TIMESTAMP | FLAG_HAS_CTRLEXT_ON_LOAD | FLAG_HAS_AMT | FLAG_HAS_FLASH | FLAG_HAS_JUMBO_FRAMES | FLAG_APME_IN_WUC, .flags2 = FLAG2_HAS_PHY_STATS | FLAG2_HAS_EEE, .pba = 26, .max_hw_frame_size = 9022, .get_variants = e1000_get_variants_ich8lan, .mac_ops = &ich8_mac_ops, .phy_ops = &ich8_phy_ops, .nvm_ops = &spt_nvm_ops, }; const struct e1000_info e1000_pch_tgp_info = { .mac = e1000_pch_tgp, .flags = FLAG_IS_ICH | FLAG_HAS_WOL | FLAG_HAS_HW_TIMESTAMP | FLAG_HAS_CTRLEXT_ON_LOAD | FLAG_HAS_AMT | FLAG_HAS_FLASH | FLAG_HAS_JUMBO_FRAMES | FLAG_APME_IN_WUC, .flags2 = FLAG2_HAS_PHY_STATS | FLAG2_HAS_EEE, .pba = 26, .max_hw_frame_size = 9022, .get_variants = e1000_get_variants_ich8lan, .mac_ops = &ich8_mac_ops, .phy_ops = &ich8_phy_ops, .nvm_ops = &spt_nvm_ops, }; const struct e1000_info e1000_pch_adp_info = { .mac = e1000_pch_adp, .flags = FLAG_IS_ICH | FLAG_HAS_WOL | FLAG_HAS_HW_TIMESTAMP | FLAG_HAS_CTRLEXT_ON_LOAD | FLAG_HAS_AMT | FLAG_HAS_FLASH | FLAG_HAS_JUMBO_FRAMES | FLAG_APME_IN_WUC, .flags2 = FLAG2_HAS_PHY_STATS | FLAG2_HAS_EEE, .pba = 26, .max_hw_frame_size = 9022, .get_variants = e1000_get_variants_ich8lan, .mac_ops = &ich8_mac_ops, .phy_ops = &ich8_phy_ops, .nvm_ops = &spt_nvm_ops, }; const struct e1000_info e1000_pch_mtp_info = { .mac = e1000_pch_mtp, .flags = FLAG_IS_ICH | FLAG_HAS_WOL | FLAG_HAS_HW_TIMESTAMP | FLAG_HAS_CTRLEXT_ON_LOAD | FLAG_HAS_AMT | FLAG_HAS_FLASH | FLAG_HAS_JUMBO_FRAMES | FLAG_APME_IN_WUC, .flags2 = FLAG2_HAS_PHY_STATS | FLAG2_HAS_EEE, .pba = 26, .max_hw_frame_size = 9022, .get_variants = e1000_get_variants_ich8lan, .mac_ops = &ich8_mac_ops, .phy_ops = &ich8_phy_ops, .nvm_ops = &spt_nvm_ops, };
linux-master
drivers/net/ethernet/intel/e1000e/ich8lan.c
// SPDX-License-Identifier: GPL-2.0 /* Copyright(c) 1999 - 2018 Intel Corporation. */ #include "e1000.h" /** * e1000e_get_bus_info_pcie - Get PCIe bus information * @hw: pointer to the HW structure * * Determines and stores the system bus information for a particular * network interface. The following bus information is determined and stored: * bus speed, bus width, type (PCIe), and PCIe function. **/ s32 e1000e_get_bus_info_pcie(struct e1000_hw *hw) { struct e1000_mac_info *mac = &hw->mac; struct e1000_bus_info *bus = &hw->bus; struct e1000_adapter *adapter = hw->adapter; u16 pcie_link_status, cap_offset; cap_offset = adapter->pdev->pcie_cap; if (!cap_offset) { bus->width = e1000_bus_width_unknown; } else { pci_read_config_word(adapter->pdev, cap_offset + PCIE_LINK_STATUS, &pcie_link_status); bus->width = (enum e1000_bus_width)((pcie_link_status & PCIE_LINK_WIDTH_MASK) >> PCIE_LINK_WIDTH_SHIFT); } mac->ops.set_lan_id(hw); return 0; } /** * e1000_set_lan_id_multi_port_pcie - Set LAN id for PCIe multiple port devices * * @hw: pointer to the HW structure * * Determines the LAN function id by reading memory-mapped registers * and swaps the port value if requested. **/ void e1000_set_lan_id_multi_port_pcie(struct e1000_hw *hw) { struct e1000_bus_info *bus = &hw->bus; u32 reg; /* The status register reports the correct function number * for the device regardless of function swap state. */ reg = er32(STATUS); bus->func = (reg & E1000_STATUS_FUNC_MASK) >> E1000_STATUS_FUNC_SHIFT; } /** * e1000_set_lan_id_single_port - Set LAN id for a single port device * @hw: pointer to the HW structure * * Sets the LAN function id to zero for a single port device. **/ void e1000_set_lan_id_single_port(struct e1000_hw *hw) { struct e1000_bus_info *bus = &hw->bus; bus->func = 0; } /** * e1000_clear_vfta_generic - Clear VLAN filter table * @hw: pointer to the HW structure * * Clears the register array which contains the VLAN filter table by * setting all the values to 0. **/ void e1000_clear_vfta_generic(struct e1000_hw *hw) { u32 offset; for (offset = 0; offset < E1000_VLAN_FILTER_TBL_SIZE; offset++) { E1000_WRITE_REG_ARRAY(hw, E1000_VFTA, offset, 0); e1e_flush(); } } /** * e1000_write_vfta_generic - Write value to VLAN filter table * @hw: pointer to the HW structure * @offset: register offset in VLAN filter table * @value: register value written to VLAN filter table * * Writes value at the given offset in the register array which stores * the VLAN filter table. **/ void e1000_write_vfta_generic(struct e1000_hw *hw, u32 offset, u32 value) { E1000_WRITE_REG_ARRAY(hw, E1000_VFTA, offset, value); e1e_flush(); } /** * e1000e_init_rx_addrs - Initialize receive address's * @hw: pointer to the HW structure * @rar_count: receive address registers * * Setup the receive address registers by setting the base receive address * register to the devices MAC address and clearing all the other receive * address registers to 0. **/ void e1000e_init_rx_addrs(struct e1000_hw *hw, u16 rar_count) { u32 i; u8 mac_addr[ETH_ALEN] = { 0 }; /* Setup the receive address */ e_dbg("Programming MAC Address into RAR[0]\n"); hw->mac.ops.rar_set(hw, hw->mac.addr, 0); /* Zero out the other (rar_entry_count - 1) receive addresses */ e_dbg("Clearing RAR[1-%u]\n", rar_count - 1); for (i = 1; i < rar_count; i++) hw->mac.ops.rar_set(hw, mac_addr, i); } /** * e1000_check_alt_mac_addr_generic - Check for alternate MAC addr * @hw: pointer to the HW structure * * Checks the nvm for an alternate MAC address. An alternate MAC address * can be setup by pre-boot software and must be treated like a permanent * address and must override the actual permanent MAC address. If an * alternate MAC address is found it is programmed into RAR0, replacing * the permanent address that was installed into RAR0 by the Si on reset. * This function will return SUCCESS unless it encounters an error while * reading the EEPROM. **/ s32 e1000_check_alt_mac_addr_generic(struct e1000_hw *hw) { u32 i; s32 ret_val; u16 offset, nvm_alt_mac_addr_offset, nvm_data; u8 alt_mac_addr[ETH_ALEN]; ret_val = e1000_read_nvm(hw, NVM_COMPAT, 1, &nvm_data); if (ret_val) return ret_val; /* not supported on 82573 */ if (hw->mac.type == e1000_82573) return 0; ret_val = e1000_read_nvm(hw, NVM_ALT_MAC_ADDR_PTR, 1, &nvm_alt_mac_addr_offset); if (ret_val) { e_dbg("NVM Read Error\n"); return ret_val; } if ((nvm_alt_mac_addr_offset == 0xFFFF) || (nvm_alt_mac_addr_offset == 0x0000)) /* There is no Alternate MAC Address */ return 0; if (hw->bus.func == E1000_FUNC_1) nvm_alt_mac_addr_offset += E1000_ALT_MAC_ADDRESS_OFFSET_LAN1; for (i = 0; i < ETH_ALEN; i += 2) { offset = nvm_alt_mac_addr_offset + (i >> 1); ret_val = e1000_read_nvm(hw, offset, 1, &nvm_data); if (ret_val) { e_dbg("NVM Read Error\n"); return ret_val; } alt_mac_addr[i] = (u8)(nvm_data & 0xFF); alt_mac_addr[i + 1] = (u8)(nvm_data >> 8); } /* if multicast bit is set, the alternate address will not be used */ if (is_multicast_ether_addr(alt_mac_addr)) { e_dbg("Ignoring Alternate Mac Address with MC bit set\n"); return 0; } /* We have a valid alternate MAC address, and we want to treat it the * same as the normal permanent MAC address stored by the HW into the * RAR. Do this by mapping this address into RAR0. */ hw->mac.ops.rar_set(hw, alt_mac_addr, 0); return 0; } u32 e1000e_rar_get_count_generic(struct e1000_hw *hw) { return hw->mac.rar_entry_count; } /** * e1000e_rar_set_generic - Set receive address register * @hw: pointer to the HW structure * @addr: pointer to the receive address * @index: receive address array register * * Sets the receive address array register at index to the address passed * in by addr. **/ int e1000e_rar_set_generic(struct e1000_hw *hw, u8 *addr, u32 index) { u32 rar_low, rar_high; /* HW expects these in little endian so we reverse the byte order * from network order (big endian) to little endian */ rar_low = ((u32)addr[0] | ((u32)addr[1] << 8) | ((u32)addr[2] << 16) | ((u32)addr[3] << 24)); rar_high = ((u32)addr[4] | ((u32)addr[5] << 8)); /* If MAC address zero, no need to set the AV bit */ if (rar_low || rar_high) rar_high |= E1000_RAH_AV; /* Some bridges will combine consecutive 32-bit writes into * a single burst write, which will malfunction on some parts. * The flushes avoid this. */ ew32(RAL(index), rar_low); e1e_flush(); ew32(RAH(index), rar_high); e1e_flush(); return 0; } /** * e1000_hash_mc_addr - Generate a multicast hash value * @hw: pointer to the HW structure * @mc_addr: pointer to a multicast address * * Generates a multicast address hash value which is used to determine * the multicast filter table array address and new table value. **/ static u32 e1000_hash_mc_addr(struct e1000_hw *hw, u8 *mc_addr) { u32 hash_value, hash_mask; u8 bit_shift = 0; /* Register count multiplied by bits per register */ hash_mask = (hw->mac.mta_reg_count * 32) - 1; /* For a mc_filter_type of 0, bit_shift is the number of left-shifts * where 0xFF would still fall within the hash mask. */ while (hash_mask >> bit_shift != 0xFF) bit_shift++; /* The portion of the address that is used for the hash table * is determined by the mc_filter_type setting. * The algorithm is such that there is a total of 8 bits of shifting. * The bit_shift for a mc_filter_type of 0 represents the number of * left-shifts where the MSB of mc_addr[5] would still fall within * the hash_mask. Case 0 does this exactly. Since there are a total * of 8 bits of shifting, then mc_addr[4] will shift right the * remaining number of bits. Thus 8 - bit_shift. The rest of the * cases are a variation of this algorithm...essentially raising the * number of bits to shift mc_addr[5] left, while still keeping the * 8-bit shifting total. * * For example, given the following Destination MAC Address and an * mta register count of 128 (thus a 4096-bit vector and 0xFFF mask), * we can see that the bit_shift for case 0 is 4. These are the hash * values resulting from each mc_filter_type... * [0] [1] [2] [3] [4] [5] * 01 AA 00 12 34 56 * LSB MSB * * case 0: hash_value = ((0x34 >> 4) | (0x56 << 4)) & 0xFFF = 0x563 * case 1: hash_value = ((0x34 >> 3) | (0x56 << 5)) & 0xFFF = 0xAC6 * case 2: hash_value = ((0x34 >> 2) | (0x56 << 6)) & 0xFFF = 0x163 * case 3: hash_value = ((0x34 >> 0) | (0x56 << 8)) & 0xFFF = 0x634 */ switch (hw->mac.mc_filter_type) { default: case 0: break; case 1: bit_shift += 1; break; case 2: bit_shift += 2; break; case 3: bit_shift += 4; break; } hash_value = hash_mask & (((mc_addr[4] >> (8 - bit_shift)) | (((u16)mc_addr[5]) << bit_shift))); return hash_value; } /** * e1000e_update_mc_addr_list_generic - Update Multicast addresses * @hw: pointer to the HW structure * @mc_addr_list: array of multicast addresses to program * @mc_addr_count: number of multicast addresses to program * * Updates entire Multicast Table Array. * The caller must have a packed mc_addr_list of multicast addresses. **/ void e1000e_update_mc_addr_list_generic(struct e1000_hw *hw, u8 *mc_addr_list, u32 mc_addr_count) { u32 hash_value, hash_bit, hash_reg; int i; /* clear mta_shadow */ memset(&hw->mac.mta_shadow, 0, sizeof(hw->mac.mta_shadow)); /* update mta_shadow from mc_addr_list */ for (i = 0; (u32)i < mc_addr_count; i++) { hash_value = e1000_hash_mc_addr(hw, mc_addr_list); hash_reg = (hash_value >> 5) & (hw->mac.mta_reg_count - 1); hash_bit = hash_value & 0x1F; hw->mac.mta_shadow[hash_reg] |= BIT(hash_bit); mc_addr_list += (ETH_ALEN); } /* replace the entire MTA table */ for (i = hw->mac.mta_reg_count - 1; i >= 0; i--) E1000_WRITE_REG_ARRAY(hw, E1000_MTA, i, hw->mac.mta_shadow[i]); e1e_flush(); } /** * e1000e_clear_hw_cntrs_base - Clear base hardware counters * @hw: pointer to the HW structure * * Clears the base hardware counters by reading the counter registers. **/ void e1000e_clear_hw_cntrs_base(struct e1000_hw *hw) { er32(CRCERRS); er32(SYMERRS); er32(MPC); er32(SCC); er32(ECOL); er32(MCC); er32(LATECOL); er32(COLC); er32(DC); er32(SEC); er32(RLEC); er32(XONRXC); er32(XONTXC); er32(XOFFRXC); er32(XOFFTXC); er32(FCRUC); er32(GPRC); er32(BPRC); er32(MPRC); er32(GPTC); er32(GORCL); er32(GORCH); er32(GOTCL); er32(GOTCH); er32(RNBC); er32(RUC); er32(RFC); er32(ROC); er32(RJC); er32(TORL); er32(TORH); er32(TOTL); er32(TOTH); er32(TPR); er32(TPT); er32(MPTC); er32(BPTC); } /** * e1000e_check_for_copper_link - Check for link (Copper) * @hw: pointer to the HW structure * * Checks to see of the link status of the hardware has changed. If a * change in link status has been detected, then we read the PHY registers * to get the current speed/duplex if link exists. **/ s32 e1000e_check_for_copper_link(struct e1000_hw *hw) { struct e1000_mac_info *mac = &hw->mac; s32 ret_val; bool link; /* We only want to go out to the PHY registers to see if Auto-Neg * has completed and/or if our link status has changed. The * get_link_status flag is set upon receiving a Link Status * Change or Rx Sequence Error interrupt. */ if (!mac->get_link_status) return 0; mac->get_link_status = false; /* First we want to see if the MII Status Register reports * link. If so, then we want to get the current speed/duplex * of the PHY. */ ret_val = e1000e_phy_has_link_generic(hw, 1, 0, &link); if (ret_val || !link) goto out; /* Check if there was DownShift, must be checked * immediately after link-up */ e1000e_check_downshift(hw); /* If we are forcing speed/duplex, then we simply return since * we have already determined whether we have link or not. */ if (!mac->autoneg) return -E1000_ERR_CONFIG; /* Auto-Neg is enabled. Auto Speed Detection takes care * of MAC speed/duplex configuration. So we only need to * configure Collision Distance in the MAC. */ mac->ops.config_collision_dist(hw); /* Configure Flow Control now that Auto-Neg has completed. * First, we need to restore the desired flow control * settings because we may have had to re-autoneg with a * different link partner. */ ret_val = e1000e_config_fc_after_link_up(hw); if (ret_val) e_dbg("Error configuring flow control\n"); return ret_val; out: mac->get_link_status = true; return ret_val; } /** * e1000e_check_for_fiber_link - Check for link (Fiber) * @hw: pointer to the HW structure * * Checks for link up on the hardware. If link is not up and we have * a signal, then we need to force link up. **/ s32 e1000e_check_for_fiber_link(struct e1000_hw *hw) { struct e1000_mac_info *mac = &hw->mac; u32 rxcw; u32 ctrl; u32 status; s32 ret_val; ctrl = er32(CTRL); status = er32(STATUS); rxcw = er32(RXCW); /* If we don't have link (auto-negotiation failed or link partner * cannot auto-negotiate), the cable is plugged in (we have signal), * and our link partner is not trying to auto-negotiate with us (we * are receiving idles or data), we need to force link up. We also * need to give auto-negotiation time to complete, in case the cable * was just plugged in. The autoneg_failed flag does this. */ /* (ctrl & E1000_CTRL_SWDPIN1) == 1 == have signal */ if ((ctrl & E1000_CTRL_SWDPIN1) && !(status & E1000_STATUS_LU) && !(rxcw & E1000_RXCW_C)) { if (!mac->autoneg_failed) { mac->autoneg_failed = true; return 0; } e_dbg("NOT Rx'ing /C/, disable AutoNeg and force link.\n"); /* Disable auto-negotiation in the TXCW register */ ew32(TXCW, (mac->txcw & ~E1000_TXCW_ANE)); /* Force link-up and also force full-duplex. */ ctrl = er32(CTRL); ctrl |= (E1000_CTRL_SLU | E1000_CTRL_FD); ew32(CTRL, ctrl); /* Configure Flow Control after forcing link up. */ ret_val = e1000e_config_fc_after_link_up(hw); if (ret_val) { e_dbg("Error configuring flow control\n"); return ret_val; } } else if ((ctrl & E1000_CTRL_SLU) && (rxcw & E1000_RXCW_C)) { /* If we are forcing link and we are receiving /C/ ordered * sets, re-enable auto-negotiation in the TXCW register * and disable forced link in the Device Control register * in an attempt to auto-negotiate with our link partner. */ e_dbg("Rx'ing /C/, enable AutoNeg and stop forcing link.\n"); ew32(TXCW, mac->txcw); ew32(CTRL, (ctrl & ~E1000_CTRL_SLU)); mac->serdes_has_link = true; } return 0; } /** * e1000e_check_for_serdes_link - Check for link (Serdes) * @hw: pointer to the HW structure * * Checks for link up on the hardware. If link is not up and we have * a signal, then we need to force link up. **/ s32 e1000e_check_for_serdes_link(struct e1000_hw *hw) { struct e1000_mac_info *mac = &hw->mac; u32 rxcw; u32 ctrl; u32 status; s32 ret_val; ctrl = er32(CTRL); status = er32(STATUS); rxcw = er32(RXCW); /* If we don't have link (auto-negotiation failed or link partner * cannot auto-negotiate), and our link partner is not trying to * auto-negotiate with us (we are receiving idles or data), * we need to force link up. We also need to give auto-negotiation * time to complete. */ /* (ctrl & E1000_CTRL_SWDPIN1) == 1 == have signal */ if (!(status & E1000_STATUS_LU) && !(rxcw & E1000_RXCW_C)) { if (!mac->autoneg_failed) { mac->autoneg_failed = true; return 0; } e_dbg("NOT Rx'ing /C/, disable AutoNeg and force link.\n"); /* Disable auto-negotiation in the TXCW register */ ew32(TXCW, (mac->txcw & ~E1000_TXCW_ANE)); /* Force link-up and also force full-duplex. */ ctrl = er32(CTRL); ctrl |= (E1000_CTRL_SLU | E1000_CTRL_FD); ew32(CTRL, ctrl); /* Configure Flow Control after forcing link up. */ ret_val = e1000e_config_fc_after_link_up(hw); if (ret_val) { e_dbg("Error configuring flow control\n"); return ret_val; } } else if ((ctrl & E1000_CTRL_SLU) && (rxcw & E1000_RXCW_C)) { /* If we are forcing link and we are receiving /C/ ordered * sets, re-enable auto-negotiation in the TXCW register * and disable forced link in the Device Control register * in an attempt to auto-negotiate with our link partner. */ e_dbg("Rx'ing /C/, enable AutoNeg and stop forcing link.\n"); ew32(TXCW, mac->txcw); ew32(CTRL, (ctrl & ~E1000_CTRL_SLU)); mac->serdes_has_link = true; } else if (!(E1000_TXCW_ANE & er32(TXCW))) { /* If we force link for non-auto-negotiation switch, check * link status based on MAC synchronization for internal * serdes media type. */ /* SYNCH bit and IV bit are sticky. */ usleep_range(10, 20); rxcw = er32(RXCW); if (rxcw & E1000_RXCW_SYNCH) { if (!(rxcw & E1000_RXCW_IV)) { mac->serdes_has_link = true; e_dbg("SERDES: Link up - forced.\n"); } } else { mac->serdes_has_link = false; e_dbg("SERDES: Link down - force failed.\n"); } } if (E1000_TXCW_ANE & er32(TXCW)) { status = er32(STATUS); if (status & E1000_STATUS_LU) { /* SYNCH bit and IV bit are sticky, so reread rxcw. */ usleep_range(10, 20); rxcw = er32(RXCW); if (rxcw & E1000_RXCW_SYNCH) { if (!(rxcw & E1000_RXCW_IV)) { mac->serdes_has_link = true; e_dbg("SERDES: Link up - autoneg completed successfully.\n"); } else { mac->serdes_has_link = false; e_dbg("SERDES: Link down - invalid codewords detected in autoneg.\n"); } } else { mac->serdes_has_link = false; e_dbg("SERDES: Link down - no sync.\n"); } } else { mac->serdes_has_link = false; e_dbg("SERDES: Link down - autoneg failed\n"); } } return 0; } /** * e1000_set_default_fc_generic - Set flow control default values * @hw: pointer to the HW structure * * Read the EEPROM for the default values for flow control and store the * values. **/ static s32 e1000_set_default_fc_generic(struct e1000_hw *hw) { s32 ret_val; u16 nvm_data; /* Read and store word 0x0F of the EEPROM. This word contains bits * that determine the hardware's default PAUSE (flow control) mode, * a bit that determines whether the HW defaults to enabling or * disabling auto-negotiation, and the direction of the * SW defined pins. If there is no SW over-ride of the flow * control setting, then the variable hw->fc will * be initialized based on a value in the EEPROM. */ ret_val = e1000_read_nvm(hw, NVM_INIT_CONTROL2_REG, 1, &nvm_data); if (ret_val) { e_dbg("NVM Read Error\n"); return ret_val; } if (!(nvm_data & NVM_WORD0F_PAUSE_MASK)) hw->fc.requested_mode = e1000_fc_none; else if ((nvm_data & NVM_WORD0F_PAUSE_MASK) == NVM_WORD0F_ASM_DIR) hw->fc.requested_mode = e1000_fc_tx_pause; else hw->fc.requested_mode = e1000_fc_full; return 0; } /** * e1000e_setup_link_generic - Setup flow control and link settings * @hw: pointer to the HW structure * * Determines which flow control settings to use, then configures flow * control. Calls the appropriate media-specific link configuration * function. Assuming the adapter has a valid link partner, a valid link * should be established. Assumes the hardware has previously been reset * and the transmitter and receiver are not enabled. **/ s32 e1000e_setup_link_generic(struct e1000_hw *hw) { s32 ret_val; /* In the case of the phy reset being blocked, we already have a link. * We do not need to set it up again. */ if (hw->phy.ops.check_reset_block && hw->phy.ops.check_reset_block(hw)) return 0; /* If requested flow control is set to default, set flow control * based on the EEPROM flow control settings. */ if (hw->fc.requested_mode == e1000_fc_default) { ret_val = e1000_set_default_fc_generic(hw); if (ret_val) return ret_val; } /* Save off the requested flow control mode for use later. Depending * on the link partner's capabilities, we may or may not use this mode. */ hw->fc.current_mode = hw->fc.requested_mode; e_dbg("After fix-ups FlowControl is now = %x\n", hw->fc.current_mode); /* Call the necessary media_type subroutine to configure the link. */ ret_val = hw->mac.ops.setup_physical_interface(hw); if (ret_val) return ret_val; /* Initialize the flow control address, type, and PAUSE timer * registers to their default values. This is done even if flow * control is disabled, because it does not hurt anything to * initialize these registers. */ e_dbg("Initializing the Flow Control address, type and timer regs\n"); ew32(FCT, FLOW_CONTROL_TYPE); ew32(FCAH, FLOW_CONTROL_ADDRESS_HIGH); ew32(FCAL, FLOW_CONTROL_ADDRESS_LOW); ew32(FCTTV, hw->fc.pause_time); return e1000e_set_fc_watermarks(hw); } /** * e1000_commit_fc_settings_generic - Configure flow control * @hw: pointer to the HW structure * * Write the flow control settings to the Transmit Config Word Register (TXCW) * base on the flow control settings in e1000_mac_info. **/ static s32 e1000_commit_fc_settings_generic(struct e1000_hw *hw) { struct e1000_mac_info *mac = &hw->mac; u32 txcw; /* Check for a software override of the flow control settings, and * setup the device accordingly. If auto-negotiation is enabled, then * software will have to set the "PAUSE" bits to the correct value in * the Transmit Config Word Register (TXCW) and re-start auto- * negotiation. However, if auto-negotiation is disabled, then * software will have to manually configure the two flow control enable * bits in the CTRL register. * * The possible values of the "fc" parameter are: * 0: Flow control is completely disabled * 1: Rx flow control is enabled (we can receive pause frames, * but not send pause frames). * 2: Tx flow control is enabled (we can send pause frames but we * do not support receiving pause frames). * 3: Both Rx and Tx flow control (symmetric) are enabled. */ switch (hw->fc.current_mode) { case e1000_fc_none: /* Flow control completely disabled by a software over-ride. */ txcw = (E1000_TXCW_ANE | E1000_TXCW_FD); break; case e1000_fc_rx_pause: /* Rx Flow control is enabled and Tx Flow control is disabled * by a software over-ride. Since there really isn't a way to * advertise that we are capable of Rx Pause ONLY, we will * advertise that we support both symmetric and asymmetric Rx * PAUSE. Later, we will disable the adapter's ability to send * PAUSE frames. */ txcw = (E1000_TXCW_ANE | E1000_TXCW_FD | E1000_TXCW_PAUSE_MASK); break; case e1000_fc_tx_pause: /* Tx Flow control is enabled, and Rx Flow control is disabled, * by a software over-ride. */ txcw = (E1000_TXCW_ANE | E1000_TXCW_FD | E1000_TXCW_ASM_DIR); break; case e1000_fc_full: /* Flow control (both Rx and Tx) is enabled by a software * over-ride. */ txcw = (E1000_TXCW_ANE | E1000_TXCW_FD | E1000_TXCW_PAUSE_MASK); break; default: e_dbg("Flow control param set incorrectly\n"); return -E1000_ERR_CONFIG; } ew32(TXCW, txcw); mac->txcw = txcw; return 0; } /** * e1000_poll_fiber_serdes_link_generic - Poll for link up * @hw: pointer to the HW structure * * Polls for link up by reading the status register, if link fails to come * up with auto-negotiation, then the link is forced if a signal is detected. **/ static s32 e1000_poll_fiber_serdes_link_generic(struct e1000_hw *hw) { struct e1000_mac_info *mac = &hw->mac; u32 i, status; s32 ret_val; /* If we have a signal (the cable is plugged in, or assumed true for * serdes media) then poll for a "Link-Up" indication in the Device * Status Register. Time-out if a link isn't seen in 500 milliseconds * seconds (Auto-negotiation should complete in less than 500 * milliseconds even if the other end is doing it in SW). */ for (i = 0; i < FIBER_LINK_UP_LIMIT; i++) { usleep_range(10000, 11000); status = er32(STATUS); if (status & E1000_STATUS_LU) break; } if (i == FIBER_LINK_UP_LIMIT) { e_dbg("Never got a valid link from auto-neg!!!\n"); mac->autoneg_failed = true; /* AutoNeg failed to achieve a link, so we'll call * mac->check_for_link. This routine will force the * link up if we detect a signal. This will allow us to * communicate with non-autonegotiating link partners. */ ret_val = mac->ops.check_for_link(hw); if (ret_val) { e_dbg("Error while checking for link\n"); return ret_val; } mac->autoneg_failed = false; } else { mac->autoneg_failed = false; e_dbg("Valid Link Found\n"); } return 0; } /** * e1000e_setup_fiber_serdes_link - Setup link for fiber/serdes * @hw: pointer to the HW structure * * Configures collision distance and flow control for fiber and serdes * links. Upon successful setup, poll for link. **/ s32 e1000e_setup_fiber_serdes_link(struct e1000_hw *hw) { u32 ctrl; s32 ret_val; ctrl = er32(CTRL); /* Take the link out of reset */ ctrl &= ~E1000_CTRL_LRST; hw->mac.ops.config_collision_dist(hw); ret_val = e1000_commit_fc_settings_generic(hw); if (ret_val) return ret_val; /* Since auto-negotiation is enabled, take the link out of reset (the * link will be in reset, because we previously reset the chip). This * will restart auto-negotiation. If auto-negotiation is successful * then the link-up status bit will be set and the flow control enable * bits (RFCE and TFCE) will be set according to their negotiated value. */ e_dbg("Auto-negotiation enabled\n"); ew32(CTRL, ctrl); e1e_flush(); usleep_range(1000, 2000); /* For these adapters, the SW definable pin 1 is set when the optics * detect a signal. If we have a signal, then poll for a "Link-Up" * indication. */ if (hw->phy.media_type == e1000_media_type_internal_serdes || (er32(CTRL) & E1000_CTRL_SWDPIN1)) { ret_val = e1000_poll_fiber_serdes_link_generic(hw); } else { e_dbg("No signal detected\n"); } return ret_val; } /** * e1000e_config_collision_dist_generic - Configure collision distance * @hw: pointer to the HW structure * * Configures the collision distance to the default value and is used * during link setup. **/ void e1000e_config_collision_dist_generic(struct e1000_hw *hw) { u32 tctl; tctl = er32(TCTL); tctl &= ~E1000_TCTL_COLD; tctl |= E1000_COLLISION_DISTANCE << E1000_COLD_SHIFT; ew32(TCTL, tctl); e1e_flush(); } /** * e1000e_set_fc_watermarks - Set flow control high/low watermarks * @hw: pointer to the HW structure * * Sets the flow control high/low threshold (watermark) registers. If * flow control XON frame transmission is enabled, then set XON frame * transmission as well. **/ s32 e1000e_set_fc_watermarks(struct e1000_hw *hw) { u32 fcrtl = 0, fcrth = 0; /* Set the flow control receive threshold registers. Normally, * these registers will be set to a default threshold that may be * adjusted later by the driver's runtime code. However, if the * ability to transmit pause frames is not enabled, then these * registers will be set to 0. */ if (hw->fc.current_mode & e1000_fc_tx_pause) { /* We need to set up the Receive Threshold high and low water * marks as well as (optionally) enabling the transmission of * XON frames. */ fcrtl = hw->fc.low_water; if (hw->fc.send_xon) fcrtl |= E1000_FCRTL_XONE; fcrth = hw->fc.high_water; } ew32(FCRTL, fcrtl); ew32(FCRTH, fcrth); return 0; } /** * e1000e_force_mac_fc - Force the MAC's flow control settings * @hw: pointer to the HW structure * * Force the MAC's flow control settings. Sets the TFCE and RFCE bits in the * device control register to reflect the adapter settings. TFCE and RFCE * need to be explicitly set by software when a copper PHY is used because * autonegotiation is managed by the PHY rather than the MAC. Software must * also configure these bits when link is forced on a fiber connection. **/ s32 e1000e_force_mac_fc(struct e1000_hw *hw) { u32 ctrl; ctrl = er32(CTRL); /* Because we didn't get link via the internal auto-negotiation * mechanism (we either forced link or we got link via PHY * auto-neg), we have to manually enable/disable transmit an * receive flow control. * * The "Case" statement below enables/disable flow control * according to the "hw->fc.current_mode" parameter. * * The possible values of the "fc" parameter are: * 0: Flow control is completely disabled * 1: Rx flow control is enabled (we can receive pause * frames but not send pause frames). * 2: Tx flow control is enabled (we can send pause frames * but we do not receive pause frames). * 3: Both Rx and Tx flow control (symmetric) is enabled. * other: No other values should be possible at this point. */ e_dbg("hw->fc.current_mode = %u\n", hw->fc.current_mode); switch (hw->fc.current_mode) { case e1000_fc_none: ctrl &= (~(E1000_CTRL_TFCE | E1000_CTRL_RFCE)); break; case e1000_fc_rx_pause: ctrl &= (~E1000_CTRL_TFCE); ctrl |= E1000_CTRL_RFCE; break; case e1000_fc_tx_pause: ctrl &= (~E1000_CTRL_RFCE); ctrl |= E1000_CTRL_TFCE; break; case e1000_fc_full: ctrl |= (E1000_CTRL_TFCE | E1000_CTRL_RFCE); break; default: e_dbg("Flow control param set incorrectly\n"); return -E1000_ERR_CONFIG; } ew32(CTRL, ctrl); return 0; } /** * e1000e_config_fc_after_link_up - Configures flow control after link * @hw: pointer to the HW structure * * Checks the status of auto-negotiation after link up to ensure that the * speed and duplex were not forced. If the link needed to be forced, then * flow control needs to be forced also. If auto-negotiation is enabled * and did not fail, then we configure flow control based on our link * partner. **/ s32 e1000e_config_fc_after_link_up(struct e1000_hw *hw) { struct e1000_mac_info *mac = &hw->mac; s32 ret_val = 0; u32 pcs_status_reg, pcs_adv_reg, pcs_lp_ability_reg, pcs_ctrl_reg; u16 mii_status_reg, mii_nway_adv_reg, mii_nway_lp_ability_reg; u16 speed, duplex; /* Check for the case where we have fiber media and auto-neg failed * so we had to force link. In this case, we need to force the * configuration of the MAC to match the "fc" parameter. */ if (mac->autoneg_failed) { if (hw->phy.media_type == e1000_media_type_fiber || hw->phy.media_type == e1000_media_type_internal_serdes) ret_val = e1000e_force_mac_fc(hw); } else { if (hw->phy.media_type == e1000_media_type_copper) ret_val = e1000e_force_mac_fc(hw); } if (ret_val) { e_dbg("Error forcing flow control settings\n"); return ret_val; } /* Check for the case where we have copper media and auto-neg is * enabled. In this case, we need to check and see if Auto-Neg * has completed, and if so, how the PHY and link partner has * flow control configured. */ if ((hw->phy.media_type == e1000_media_type_copper) && mac->autoneg) { /* Read the MII Status Register and check to see if AutoNeg * has completed. We read this twice because this reg has * some "sticky" (latched) bits. */ ret_val = e1e_rphy(hw, MII_BMSR, &mii_status_reg); if (ret_val) return ret_val; ret_val = e1e_rphy(hw, MII_BMSR, &mii_status_reg); if (ret_val) return ret_val; if (!(mii_status_reg & BMSR_ANEGCOMPLETE)) { e_dbg("Copper PHY and Auto Neg has not completed.\n"); return ret_val; } /* The AutoNeg process has completed, so we now need to * read both the Auto Negotiation Advertisement * Register (Address 4) and the Auto_Negotiation Base * Page Ability Register (Address 5) to determine how * flow control was negotiated. */ ret_val = e1e_rphy(hw, MII_ADVERTISE, &mii_nway_adv_reg); if (ret_val) return ret_val; ret_val = e1e_rphy(hw, MII_LPA, &mii_nway_lp_ability_reg); if (ret_val) return ret_val; /* Two bits in the Auto Negotiation Advertisement Register * (Address 4) and two bits in the Auto Negotiation Base * Page Ability Register (Address 5) determine flow control * for both the PHY and the link partner. The following * table, taken out of the IEEE 802.3ab/D6.0 dated March 25, * 1999, describes these PAUSE resolution bits and how flow * control is determined based upon these settings. * NOTE: DC = Don't Care * * LOCAL DEVICE | LINK PARTNER * PAUSE | ASM_DIR | PAUSE | ASM_DIR | NIC Resolution *-------|---------|-------|---------|-------------------- * 0 | 0 | DC | DC | e1000_fc_none * 0 | 1 | 0 | DC | e1000_fc_none * 0 | 1 | 1 | 0 | e1000_fc_none * 0 | 1 | 1 | 1 | e1000_fc_tx_pause * 1 | 0 | 0 | DC | e1000_fc_none * 1 | DC | 1 | DC | e1000_fc_full * 1 | 1 | 0 | 0 | e1000_fc_none * 1 | 1 | 0 | 1 | e1000_fc_rx_pause * * Are both PAUSE bits set to 1? If so, this implies * Symmetric Flow Control is enabled at both ends. The * ASM_DIR bits are irrelevant per the spec. * * For Symmetric Flow Control: * * LOCAL DEVICE | LINK PARTNER * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result *-------|---------|-------|---------|-------------------- * 1 | DC | 1 | DC | E1000_fc_full * */ if ((mii_nway_adv_reg & ADVERTISE_PAUSE_CAP) && (mii_nway_lp_ability_reg & LPA_PAUSE_CAP)) { /* Now we need to check if the user selected Rx ONLY * of pause frames. In this case, we had to advertise * FULL flow control because we could not advertise Rx * ONLY. Hence, we must now check to see if we need to * turn OFF the TRANSMISSION of PAUSE frames. */ if (hw->fc.requested_mode == e1000_fc_full) { hw->fc.current_mode = e1000_fc_full; e_dbg("Flow Control = FULL.\n"); } else { hw->fc.current_mode = e1000_fc_rx_pause; e_dbg("Flow Control = Rx PAUSE frames only.\n"); } } /* For receiving PAUSE frames ONLY. * * LOCAL DEVICE | LINK PARTNER * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result *-------|---------|-------|---------|-------------------- * 0 | 1 | 1 | 1 | e1000_fc_tx_pause */ else if (!(mii_nway_adv_reg & ADVERTISE_PAUSE_CAP) && (mii_nway_adv_reg & ADVERTISE_PAUSE_ASYM) && (mii_nway_lp_ability_reg & LPA_PAUSE_CAP) && (mii_nway_lp_ability_reg & LPA_PAUSE_ASYM)) { hw->fc.current_mode = e1000_fc_tx_pause; e_dbg("Flow Control = Tx PAUSE frames only.\n"); } /* For transmitting PAUSE frames ONLY. * * LOCAL DEVICE | LINK PARTNER * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result *-------|---------|-------|---------|-------------------- * 1 | 1 | 0 | 1 | e1000_fc_rx_pause */ else if ((mii_nway_adv_reg & ADVERTISE_PAUSE_CAP) && (mii_nway_adv_reg & ADVERTISE_PAUSE_ASYM) && !(mii_nway_lp_ability_reg & LPA_PAUSE_CAP) && (mii_nway_lp_ability_reg & LPA_PAUSE_ASYM)) { hw->fc.current_mode = e1000_fc_rx_pause; e_dbg("Flow Control = Rx PAUSE frames only.\n"); } else { /* Per the IEEE spec, at this point flow control * should be disabled. */ hw->fc.current_mode = e1000_fc_none; e_dbg("Flow Control = NONE.\n"); } /* Now we need to do one last check... If we auto- * negotiated to HALF DUPLEX, flow control should not be * enabled per IEEE 802.3 spec. */ ret_val = mac->ops.get_link_up_info(hw, &speed, &duplex); if (ret_val) { e_dbg("Error getting link speed and duplex\n"); return ret_val; } if (duplex == HALF_DUPLEX) hw->fc.current_mode = e1000_fc_none; /* Now we call a subroutine to actually force the MAC * controller to use the correct flow control settings. */ ret_val = e1000e_force_mac_fc(hw); if (ret_val) { e_dbg("Error forcing flow control settings\n"); return ret_val; } } /* Check for the case where we have SerDes media and auto-neg is * enabled. In this case, we need to check and see if Auto-Neg * has completed, and if so, how the PHY and link partner has * flow control configured. */ if ((hw->phy.media_type == e1000_media_type_internal_serdes) && mac->autoneg) { /* Read the PCS_LSTS and check to see if AutoNeg * has completed. */ pcs_status_reg = er32(PCS_LSTAT); if (!(pcs_status_reg & E1000_PCS_LSTS_AN_COMPLETE)) { e_dbg("PCS Auto Neg has not completed.\n"); return ret_val; } /* The AutoNeg process has completed, so we now need to * read both the Auto Negotiation Advertisement * Register (PCS_ANADV) and the Auto_Negotiation Base * Page Ability Register (PCS_LPAB) to determine how * flow control was negotiated. */ pcs_adv_reg = er32(PCS_ANADV); pcs_lp_ability_reg = er32(PCS_LPAB); /* Two bits in the Auto Negotiation Advertisement Register * (PCS_ANADV) and two bits in the Auto Negotiation Base * Page Ability Register (PCS_LPAB) determine flow control * for both the PHY and the link partner. The following * table, taken out of the IEEE 802.3ab/D6.0 dated March 25, * 1999, describes these PAUSE resolution bits and how flow * control is determined based upon these settings. * NOTE: DC = Don't Care * * LOCAL DEVICE | LINK PARTNER * PAUSE | ASM_DIR | PAUSE | ASM_DIR | NIC Resolution *-------|---------|-------|---------|-------------------- * 0 | 0 | DC | DC | e1000_fc_none * 0 | 1 | 0 | DC | e1000_fc_none * 0 | 1 | 1 | 0 | e1000_fc_none * 0 | 1 | 1 | 1 | e1000_fc_tx_pause * 1 | 0 | 0 | DC | e1000_fc_none * 1 | DC | 1 | DC | e1000_fc_full * 1 | 1 | 0 | 0 | e1000_fc_none * 1 | 1 | 0 | 1 | e1000_fc_rx_pause * * Are both PAUSE bits set to 1? If so, this implies * Symmetric Flow Control is enabled at both ends. The * ASM_DIR bits are irrelevant per the spec. * * For Symmetric Flow Control: * * LOCAL DEVICE | LINK PARTNER * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result *-------|---------|-------|---------|-------------------- * 1 | DC | 1 | DC | e1000_fc_full * */ if ((pcs_adv_reg & E1000_TXCW_PAUSE) && (pcs_lp_ability_reg & E1000_TXCW_PAUSE)) { /* Now we need to check if the user selected Rx ONLY * of pause frames. In this case, we had to advertise * FULL flow control because we could not advertise Rx * ONLY. Hence, we must now check to see if we need to * turn OFF the TRANSMISSION of PAUSE frames. */ if (hw->fc.requested_mode == e1000_fc_full) { hw->fc.current_mode = e1000_fc_full; e_dbg("Flow Control = FULL.\n"); } else { hw->fc.current_mode = e1000_fc_rx_pause; e_dbg("Flow Control = Rx PAUSE frames only.\n"); } } /* For receiving PAUSE frames ONLY. * * LOCAL DEVICE | LINK PARTNER * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result *-------|---------|-------|---------|-------------------- * 0 | 1 | 1 | 1 | e1000_fc_tx_pause */ else if (!(pcs_adv_reg & E1000_TXCW_PAUSE) && (pcs_adv_reg & E1000_TXCW_ASM_DIR) && (pcs_lp_ability_reg & E1000_TXCW_PAUSE) && (pcs_lp_ability_reg & E1000_TXCW_ASM_DIR)) { hw->fc.current_mode = e1000_fc_tx_pause; e_dbg("Flow Control = Tx PAUSE frames only.\n"); } /* For transmitting PAUSE frames ONLY. * * LOCAL DEVICE | LINK PARTNER * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result *-------|---------|-------|---------|-------------------- * 1 | 1 | 0 | 1 | e1000_fc_rx_pause */ else if ((pcs_adv_reg & E1000_TXCW_PAUSE) && (pcs_adv_reg & E1000_TXCW_ASM_DIR) && !(pcs_lp_ability_reg & E1000_TXCW_PAUSE) && (pcs_lp_ability_reg & E1000_TXCW_ASM_DIR)) { hw->fc.current_mode = e1000_fc_rx_pause; e_dbg("Flow Control = Rx PAUSE frames only.\n"); } else { /* Per the IEEE spec, at this point flow control * should be disabled. */ hw->fc.current_mode = e1000_fc_none; e_dbg("Flow Control = NONE.\n"); } /* Now we call a subroutine to actually force the MAC * controller to use the correct flow control settings. */ pcs_ctrl_reg = er32(PCS_LCTL); pcs_ctrl_reg |= E1000_PCS_LCTL_FORCE_FCTRL; ew32(PCS_LCTL, pcs_ctrl_reg); ret_val = e1000e_force_mac_fc(hw); if (ret_val) { e_dbg("Error forcing flow control settings\n"); return ret_val; } } return 0; } /** * e1000e_get_speed_and_duplex_copper - Retrieve current speed/duplex * @hw: pointer to the HW structure * @speed: stores the current speed * @duplex: stores the current duplex * * Read the status register for the current speed/duplex and store the current * speed and duplex for copper connections. **/ s32 e1000e_get_speed_and_duplex_copper(struct e1000_hw *hw, u16 *speed, u16 *duplex) { u32 status; status = er32(STATUS); if (status & E1000_STATUS_SPEED_1000) *speed = SPEED_1000; else if (status & E1000_STATUS_SPEED_100) *speed = SPEED_100; else *speed = SPEED_10; if (status & E1000_STATUS_FD) *duplex = FULL_DUPLEX; else *duplex = HALF_DUPLEX; e_dbg("%u Mbps, %s Duplex\n", *speed == SPEED_1000 ? 1000 : *speed == SPEED_100 ? 100 : 10, *duplex == FULL_DUPLEX ? "Full" : "Half"); return 0; } /** * e1000e_get_speed_and_duplex_fiber_serdes - Retrieve current speed/duplex * @hw: pointer to the HW structure * @speed: stores the current speed * @duplex: stores the current duplex * * Sets the speed and duplex to gigabit full duplex (the only possible option) * for fiber/serdes links. **/ s32 e1000e_get_speed_and_duplex_fiber_serdes(struct e1000_hw __always_unused *hw, u16 *speed, u16 *duplex) { *speed = SPEED_1000; *duplex = FULL_DUPLEX; return 0; } /** * e1000e_get_hw_semaphore - Acquire hardware semaphore * @hw: pointer to the HW structure * * Acquire the HW semaphore to access the PHY or NVM **/ s32 e1000e_get_hw_semaphore(struct e1000_hw *hw) { u32 swsm; s32 timeout = hw->nvm.word_size + 1; s32 i = 0; /* Get the SW semaphore */ while (i < timeout) { swsm = er32(SWSM); if (!(swsm & E1000_SWSM_SMBI)) break; udelay(100); i++; } if (i == timeout) { e_dbg("Driver can't access device - SMBI bit is set.\n"); return -E1000_ERR_NVM; } /* Get the FW semaphore. */ for (i = 0; i < timeout; i++) { swsm = er32(SWSM); ew32(SWSM, swsm | E1000_SWSM_SWESMBI); /* Semaphore acquired if bit latched */ if (er32(SWSM) & E1000_SWSM_SWESMBI) break; udelay(100); } if (i == timeout) { /* Release semaphores */ e1000e_put_hw_semaphore(hw); e_dbg("Driver can't access the NVM\n"); return -E1000_ERR_NVM; } return 0; } /** * e1000e_put_hw_semaphore - Release hardware semaphore * @hw: pointer to the HW structure * * Release hardware semaphore used to access the PHY or NVM **/ void e1000e_put_hw_semaphore(struct e1000_hw *hw) { u32 swsm; swsm = er32(SWSM); swsm &= ~(E1000_SWSM_SMBI | E1000_SWSM_SWESMBI); ew32(SWSM, swsm); } /** * e1000e_get_auto_rd_done - Check for auto read completion * @hw: pointer to the HW structure * * Check EEPROM for Auto Read done bit. **/ s32 e1000e_get_auto_rd_done(struct e1000_hw *hw) { s32 i = 0; while (i < AUTO_READ_DONE_TIMEOUT) { if (er32(EECD) & E1000_EECD_AUTO_RD) break; usleep_range(1000, 2000); i++; } if (i == AUTO_READ_DONE_TIMEOUT) { e_dbg("Auto read by HW from NVM has not completed.\n"); return -E1000_ERR_RESET; } return 0; } /** * e1000e_valid_led_default - Verify a valid default LED config * @hw: pointer to the HW structure * @data: pointer to the NVM (EEPROM) * * Read the EEPROM for the current default LED configuration. If the * LED configuration is not valid, set to a valid LED configuration. **/ s32 e1000e_valid_led_default(struct e1000_hw *hw, u16 *data) { s32 ret_val; ret_val = e1000_read_nvm(hw, NVM_ID_LED_SETTINGS, 1, data); if (ret_val) { e_dbg("NVM Read Error\n"); return ret_val; } if (*data == ID_LED_RESERVED_0000 || *data == ID_LED_RESERVED_FFFF) *data = ID_LED_DEFAULT; return 0; } /** * e1000e_id_led_init_generic - * @hw: pointer to the HW structure * **/ s32 e1000e_id_led_init_generic(struct e1000_hw *hw) { struct e1000_mac_info *mac = &hw->mac; s32 ret_val; const u32 ledctl_mask = 0x000000FF; const u32 ledctl_on = E1000_LEDCTL_MODE_LED_ON; const u32 ledctl_off = E1000_LEDCTL_MODE_LED_OFF; u16 data, i, temp; const u16 led_mask = 0x0F; ret_val = hw->nvm.ops.valid_led_default(hw, &data); if (ret_val) return ret_val; mac->ledctl_default = er32(LEDCTL); mac->ledctl_mode1 = mac->ledctl_default; mac->ledctl_mode2 = mac->ledctl_default; for (i = 0; i < 4; i++) { temp = (data >> (i << 2)) & led_mask; switch (temp) { case ID_LED_ON1_DEF2: case ID_LED_ON1_ON2: case ID_LED_ON1_OFF2: mac->ledctl_mode1 &= ~(ledctl_mask << (i << 3)); mac->ledctl_mode1 |= ledctl_on << (i << 3); break; case ID_LED_OFF1_DEF2: case ID_LED_OFF1_ON2: case ID_LED_OFF1_OFF2: mac->ledctl_mode1 &= ~(ledctl_mask << (i << 3)); mac->ledctl_mode1 |= ledctl_off << (i << 3); break; default: /* Do nothing */ break; } switch (temp) { case ID_LED_DEF1_ON2: case ID_LED_ON1_ON2: case ID_LED_OFF1_ON2: mac->ledctl_mode2 &= ~(ledctl_mask << (i << 3)); mac->ledctl_mode2 |= ledctl_on << (i << 3); break; case ID_LED_DEF1_OFF2: case ID_LED_ON1_OFF2: case ID_LED_OFF1_OFF2: mac->ledctl_mode2 &= ~(ledctl_mask << (i << 3)); mac->ledctl_mode2 |= ledctl_off << (i << 3); break; default: /* Do nothing */ break; } } return 0; } /** * e1000e_setup_led_generic - Configures SW controllable LED * @hw: pointer to the HW structure * * This prepares the SW controllable LED for use and saves the current state * of the LED so it can be later restored. **/ s32 e1000e_setup_led_generic(struct e1000_hw *hw) { u32 ledctl; if (hw->mac.ops.setup_led != e1000e_setup_led_generic) return -E1000_ERR_CONFIG; if (hw->phy.media_type == e1000_media_type_fiber) { ledctl = er32(LEDCTL); hw->mac.ledctl_default = ledctl; /* Turn off LED0 */ ledctl &= ~(E1000_LEDCTL_LED0_IVRT | E1000_LEDCTL_LED0_BLINK | E1000_LEDCTL_LED0_MODE_MASK); ledctl |= (E1000_LEDCTL_MODE_LED_OFF << E1000_LEDCTL_LED0_MODE_SHIFT); ew32(LEDCTL, ledctl); } else if (hw->phy.media_type == e1000_media_type_copper) { ew32(LEDCTL, hw->mac.ledctl_mode1); } return 0; } /** * e1000e_cleanup_led_generic - Set LED config to default operation * @hw: pointer to the HW structure * * Remove the current LED configuration and set the LED configuration * to the default value, saved from the EEPROM. **/ s32 e1000e_cleanup_led_generic(struct e1000_hw *hw) { ew32(LEDCTL, hw->mac.ledctl_default); return 0; } /** * e1000e_blink_led_generic - Blink LED * @hw: pointer to the HW structure * * Blink the LEDs which are set to be on. **/ s32 e1000e_blink_led_generic(struct e1000_hw *hw) { u32 ledctl_blink = 0; u32 i; if (hw->phy.media_type == e1000_media_type_fiber) { /* always blink LED0 for PCI-E fiber */ ledctl_blink = E1000_LEDCTL_LED0_BLINK | (E1000_LEDCTL_MODE_LED_ON << E1000_LEDCTL_LED0_MODE_SHIFT); } else { /* Set the blink bit for each LED that's "on" (0x0E) * (or "off" if inverted) in ledctl_mode2. The blink * logic in hardware only works when mode is set to "on" * so it must be changed accordingly when the mode is * "off" and inverted. */ ledctl_blink = hw->mac.ledctl_mode2; for (i = 0; i < 32; i += 8) { u32 mode = (hw->mac.ledctl_mode2 >> i) & E1000_LEDCTL_LED0_MODE_MASK; u32 led_default = hw->mac.ledctl_default >> i; if ((!(led_default & E1000_LEDCTL_LED0_IVRT) && (mode == E1000_LEDCTL_MODE_LED_ON)) || ((led_default & E1000_LEDCTL_LED0_IVRT) && (mode == E1000_LEDCTL_MODE_LED_OFF))) { ledctl_blink &= ~(E1000_LEDCTL_LED0_MODE_MASK << i); ledctl_blink |= (E1000_LEDCTL_LED0_BLINK | E1000_LEDCTL_MODE_LED_ON) << i; } } } ew32(LEDCTL, ledctl_blink); return 0; } /** * e1000e_led_on_generic - Turn LED on * @hw: pointer to the HW structure * * Turn LED on. **/ s32 e1000e_led_on_generic(struct e1000_hw *hw) { u32 ctrl; switch (hw->phy.media_type) { case e1000_media_type_fiber: ctrl = er32(CTRL); ctrl &= ~E1000_CTRL_SWDPIN0; ctrl |= E1000_CTRL_SWDPIO0; ew32(CTRL, ctrl); break; case e1000_media_type_copper: ew32(LEDCTL, hw->mac.ledctl_mode2); break; default: break; } return 0; } /** * e1000e_led_off_generic - Turn LED off * @hw: pointer to the HW structure * * Turn LED off. **/ s32 e1000e_led_off_generic(struct e1000_hw *hw) { u32 ctrl; switch (hw->phy.media_type) { case e1000_media_type_fiber: ctrl = er32(CTRL); ctrl |= E1000_CTRL_SWDPIN0; ctrl |= E1000_CTRL_SWDPIO0; ew32(CTRL, ctrl); break; case e1000_media_type_copper: ew32(LEDCTL, hw->mac.ledctl_mode1); break; default: break; } return 0; } /** * e1000e_set_pcie_no_snoop - Set PCI-express capabilities * @hw: pointer to the HW structure * @no_snoop: bitmap of snoop events * * Set the PCI-express register to snoop for events enabled in 'no_snoop'. **/ void e1000e_set_pcie_no_snoop(struct e1000_hw *hw, u32 no_snoop) { u32 gcr; if (no_snoop) { gcr = er32(GCR); gcr &= ~(PCIE_NO_SNOOP_ALL); gcr |= no_snoop; ew32(GCR, gcr); } } /** * e1000e_disable_pcie_master - Disables PCI-express master access * @hw: pointer to the HW structure * * Returns 0 if successful, else returns -10 * (-E1000_ERR_MASTER_REQUESTS_PENDING) if master disable bit has not caused * the master requests to be disabled. * * Disables PCI-Express master access and verifies there are no pending * requests. **/ s32 e1000e_disable_pcie_master(struct e1000_hw *hw) { u32 ctrl; s32 timeout = MASTER_DISABLE_TIMEOUT; ctrl = er32(CTRL); ctrl |= E1000_CTRL_GIO_MASTER_DISABLE; ew32(CTRL, ctrl); while (timeout) { if (!(er32(STATUS) & E1000_STATUS_GIO_MASTER_ENABLE)) break; usleep_range(100, 200); timeout--; } if (!timeout) { e_dbg("Master requests are pending.\n"); return -E1000_ERR_MASTER_REQUESTS_PENDING; } return 0; } /** * e1000e_reset_adaptive - Reset Adaptive Interframe Spacing * @hw: pointer to the HW structure * * Reset the Adaptive Interframe Spacing throttle to default values. **/ void e1000e_reset_adaptive(struct e1000_hw *hw) { struct e1000_mac_info *mac = &hw->mac; if (!mac->adaptive_ifs) { e_dbg("Not in Adaptive IFS mode!\n"); return; } mac->current_ifs_val = 0; mac->ifs_min_val = IFS_MIN; mac->ifs_max_val = IFS_MAX; mac->ifs_step_size = IFS_STEP; mac->ifs_ratio = IFS_RATIO; mac->in_ifs_mode = false; ew32(AIT, 0); } /** * e1000e_update_adaptive - Update Adaptive Interframe Spacing * @hw: pointer to the HW structure * * Update the Adaptive Interframe Spacing Throttle value based on the * time between transmitted packets and time between collisions. **/ void e1000e_update_adaptive(struct e1000_hw *hw) { struct e1000_mac_info *mac = &hw->mac; if (!mac->adaptive_ifs) { e_dbg("Not in Adaptive IFS mode!\n"); return; } if ((mac->collision_delta * mac->ifs_ratio) > mac->tx_packet_delta) { if (mac->tx_packet_delta > MIN_NUM_XMITS) { mac->in_ifs_mode = true; if (mac->current_ifs_val < mac->ifs_max_val) { if (!mac->current_ifs_val) mac->current_ifs_val = mac->ifs_min_val; else mac->current_ifs_val += mac->ifs_step_size; ew32(AIT, mac->current_ifs_val); } } } else { if (mac->in_ifs_mode && (mac->tx_packet_delta <= MIN_NUM_XMITS)) { mac->current_ifs_val = 0; mac->in_ifs_mode = false; ew32(AIT, 0); } } }
linux-master
drivers/net/ethernet/intel/e1000e/mac.c
// SPDX-License-Identifier: GPL-2.0 /* Copyright(c) 1999 - 2018 Intel Corporation. */ /* ethtool support for e1000 */ #include <linux/netdevice.h> #include <linux/interrupt.h> #include <linux/ethtool.h> #include <linux/pci.h> #include <linux/slab.h> #include <linux/delay.h> #include <linux/vmalloc.h> #include <linux/pm_runtime.h> #include "e1000.h" enum { NETDEV_STATS, E1000_STATS }; struct e1000_stats { char stat_string[ETH_GSTRING_LEN]; int type; int sizeof_stat; int stat_offset; }; static const char e1000e_priv_flags_strings[][ETH_GSTRING_LEN] = { #define E1000E_PRIV_FLAGS_S0IX_ENABLED BIT(0) "s0ix-enabled", }; #define E1000E_PRIV_FLAGS_STR_LEN ARRAY_SIZE(e1000e_priv_flags_strings) #define E1000_STAT(str, m) { \ .stat_string = str, \ .type = E1000_STATS, \ .sizeof_stat = sizeof(((struct e1000_adapter *)0)->m), \ .stat_offset = offsetof(struct e1000_adapter, m) } #define E1000_NETDEV_STAT(str, m) { \ .stat_string = str, \ .type = NETDEV_STATS, \ .sizeof_stat = sizeof(((struct rtnl_link_stats64 *)0)->m), \ .stat_offset = offsetof(struct rtnl_link_stats64, m) } static const struct e1000_stats e1000_gstrings_stats[] = { E1000_STAT("rx_packets", stats.gprc), E1000_STAT("tx_packets", stats.gptc), E1000_STAT("rx_bytes", stats.gorc), E1000_STAT("tx_bytes", stats.gotc), E1000_STAT("rx_broadcast", stats.bprc), E1000_STAT("tx_broadcast", stats.bptc), E1000_STAT("rx_multicast", stats.mprc), E1000_STAT("tx_multicast", stats.mptc), E1000_NETDEV_STAT("rx_errors", rx_errors), E1000_NETDEV_STAT("tx_errors", tx_errors), E1000_NETDEV_STAT("tx_dropped", tx_dropped), E1000_STAT("multicast", stats.mprc), E1000_STAT("collisions", stats.colc), E1000_NETDEV_STAT("rx_length_errors", rx_length_errors), E1000_NETDEV_STAT("rx_over_errors", rx_over_errors), E1000_STAT("rx_crc_errors", stats.crcerrs), E1000_NETDEV_STAT("rx_frame_errors", rx_frame_errors), E1000_STAT("rx_no_buffer_count", stats.rnbc), E1000_STAT("rx_missed_errors", stats.mpc), E1000_STAT("tx_aborted_errors", stats.ecol), E1000_STAT("tx_carrier_errors", stats.tncrs), E1000_NETDEV_STAT("tx_fifo_errors", tx_fifo_errors), E1000_NETDEV_STAT("tx_heartbeat_errors", tx_heartbeat_errors), E1000_STAT("tx_window_errors", stats.latecol), E1000_STAT("tx_abort_late_coll", stats.latecol), E1000_STAT("tx_deferred_ok", stats.dc), E1000_STAT("tx_single_coll_ok", stats.scc), E1000_STAT("tx_multi_coll_ok", stats.mcc), E1000_STAT("tx_timeout_count", tx_timeout_count), E1000_STAT("tx_restart_queue", restart_queue), E1000_STAT("rx_long_length_errors", stats.roc), E1000_STAT("rx_short_length_errors", stats.ruc), E1000_STAT("rx_align_errors", stats.algnerrc), E1000_STAT("tx_tcp_seg_good", stats.tsctc), E1000_STAT("tx_tcp_seg_failed", stats.tsctfc), E1000_STAT("rx_flow_control_xon", stats.xonrxc), E1000_STAT("rx_flow_control_xoff", stats.xoffrxc), E1000_STAT("tx_flow_control_xon", stats.xontxc), E1000_STAT("tx_flow_control_xoff", stats.xofftxc), E1000_STAT("rx_csum_offload_good", hw_csum_good), E1000_STAT("rx_csum_offload_errors", hw_csum_err), E1000_STAT("rx_header_split", rx_hdr_split), E1000_STAT("alloc_rx_buff_failed", alloc_rx_buff_failed), E1000_STAT("tx_smbus", stats.mgptc), E1000_STAT("rx_smbus", stats.mgprc), E1000_STAT("dropped_smbus", stats.mgpdc), E1000_STAT("rx_dma_failed", rx_dma_failed), E1000_STAT("tx_dma_failed", tx_dma_failed), E1000_STAT("rx_hwtstamp_cleared", rx_hwtstamp_cleared), E1000_STAT("uncorr_ecc_errors", uncorr_errors), E1000_STAT("corr_ecc_errors", corr_errors), E1000_STAT("tx_hwtstamp_timeouts", tx_hwtstamp_timeouts), E1000_STAT("tx_hwtstamp_skipped", tx_hwtstamp_skipped), }; #define E1000_GLOBAL_STATS_LEN ARRAY_SIZE(e1000_gstrings_stats) #define E1000_STATS_LEN (E1000_GLOBAL_STATS_LEN) static const char e1000_gstrings_test[][ETH_GSTRING_LEN] = { "Register test (offline)", "Eeprom test (offline)", "Interrupt test (offline)", "Loopback test (offline)", "Link test (on/offline)" }; #define E1000_TEST_LEN ARRAY_SIZE(e1000_gstrings_test) static int e1000_get_link_ksettings(struct net_device *netdev, struct ethtool_link_ksettings *cmd) { u32 speed, supported, advertising, lp_advertising, lpa_t; struct e1000_adapter *adapter = netdev_priv(netdev); struct e1000_hw *hw = &adapter->hw; if (hw->phy.media_type == e1000_media_type_copper) { supported = (SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full | SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full | SUPPORTED_1000baseT_Full | SUPPORTED_Asym_Pause | SUPPORTED_Autoneg | SUPPORTED_Pause | SUPPORTED_TP); if (hw->phy.type == e1000_phy_ife) supported &= ~SUPPORTED_1000baseT_Full; advertising = ADVERTISED_TP; if (hw->mac.autoneg == 1) { advertising |= ADVERTISED_Autoneg; /* the e1000 autoneg seems to match ethtool nicely */ advertising |= hw->phy.autoneg_advertised; } cmd->base.port = PORT_TP; cmd->base.phy_address = hw->phy.addr; } else { supported = (SUPPORTED_1000baseT_Full | SUPPORTED_FIBRE | SUPPORTED_Autoneg); advertising = (ADVERTISED_1000baseT_Full | ADVERTISED_FIBRE | ADVERTISED_Autoneg); cmd->base.port = PORT_FIBRE; } speed = SPEED_UNKNOWN; cmd->base.duplex = DUPLEX_UNKNOWN; if (netif_running(netdev)) { if (netif_carrier_ok(netdev)) { speed = adapter->link_speed; cmd->base.duplex = adapter->link_duplex - 1; } } else if (!pm_runtime_suspended(netdev->dev.parent)) { u32 status = er32(STATUS); if (status & E1000_STATUS_LU) { if (status & E1000_STATUS_SPEED_1000) speed = SPEED_1000; else if (status & E1000_STATUS_SPEED_100) speed = SPEED_100; else speed = SPEED_10; if (status & E1000_STATUS_FD) cmd->base.duplex = DUPLEX_FULL; else cmd->base.duplex = DUPLEX_HALF; } } cmd->base.speed = speed; cmd->base.autoneg = ((hw->phy.media_type == e1000_media_type_fiber) || hw->mac.autoneg) ? AUTONEG_ENABLE : AUTONEG_DISABLE; /* MDI-X => 2; MDI =>1; Invalid =>0 */ if ((hw->phy.media_type == e1000_media_type_copper) && netif_carrier_ok(netdev)) cmd->base.eth_tp_mdix = hw->phy.is_mdix ? ETH_TP_MDI_X : ETH_TP_MDI; else cmd->base.eth_tp_mdix = ETH_TP_MDI_INVALID; if (hw->phy.mdix == AUTO_ALL_MODES) cmd->base.eth_tp_mdix_ctrl = ETH_TP_MDI_AUTO; else cmd->base.eth_tp_mdix_ctrl = hw->phy.mdix; if (hw->phy.media_type != e1000_media_type_copper) cmd->base.eth_tp_mdix_ctrl = ETH_TP_MDI_INVALID; lpa_t = mii_stat1000_to_ethtool_lpa_t(adapter->phy_regs.stat1000); lp_advertising = lpa_t | mii_lpa_to_ethtool_lpa_t(adapter->phy_regs.lpa); ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported, supported); ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising, advertising); ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.lp_advertising, lp_advertising); return 0; } static int e1000_set_spd_dplx(struct e1000_adapter *adapter, u32 spd, u8 dplx) { struct e1000_mac_info *mac = &adapter->hw.mac; mac->autoneg = 0; /* Make sure dplx is at most 1 bit and lsb of speed is not set * for the switch() below to work */ if ((spd & 1) || (dplx & ~1)) goto err_inval; /* Fiber NICs only allow 1000 gbps Full duplex */ if ((adapter->hw.phy.media_type == e1000_media_type_fiber) && (spd != SPEED_1000) && (dplx != DUPLEX_FULL)) { goto err_inval; } switch (spd + dplx) { case SPEED_10 + DUPLEX_HALF: mac->forced_speed_duplex = ADVERTISE_10_HALF; break; case SPEED_10 + DUPLEX_FULL: mac->forced_speed_duplex = ADVERTISE_10_FULL; break; case SPEED_100 + DUPLEX_HALF: mac->forced_speed_duplex = ADVERTISE_100_HALF; break; case SPEED_100 + DUPLEX_FULL: mac->forced_speed_duplex = ADVERTISE_100_FULL; break; case SPEED_1000 + DUPLEX_FULL: if (adapter->hw.phy.media_type == e1000_media_type_copper) { mac->autoneg = 1; adapter->hw.phy.autoneg_advertised = ADVERTISE_1000_FULL; } else { mac->forced_speed_duplex = ADVERTISE_1000_FULL; } break; case SPEED_1000 + DUPLEX_HALF: /* not supported */ default: goto err_inval; } /* clear MDI, MDI(-X) override is only allowed when autoneg enabled */ adapter->hw.phy.mdix = AUTO_ALL_MODES; return 0; err_inval: e_err("Unsupported Speed/Duplex configuration\n"); return -EINVAL; } static int e1000_set_link_ksettings(struct net_device *netdev, const struct ethtool_link_ksettings *cmd) { struct e1000_adapter *adapter = netdev_priv(netdev); struct e1000_hw *hw = &adapter->hw; int ret_val = 0; u32 advertising; ethtool_convert_link_mode_to_legacy_u32(&advertising, cmd->link_modes.advertising); pm_runtime_get_sync(netdev->dev.parent); /* When SoL/IDER sessions are active, autoneg/speed/duplex * cannot be changed */ if (hw->phy.ops.check_reset_block && hw->phy.ops.check_reset_block(hw)) { e_err("Cannot change link characteristics when SoL/IDER is active.\n"); ret_val = -EINVAL; goto out; } /* MDI setting is only allowed when autoneg enabled because * some hardware doesn't allow MDI setting when speed or * duplex is forced. */ if (cmd->base.eth_tp_mdix_ctrl) { if (hw->phy.media_type != e1000_media_type_copper) { ret_val = -EOPNOTSUPP; goto out; } if ((cmd->base.eth_tp_mdix_ctrl != ETH_TP_MDI_AUTO) && (cmd->base.autoneg != AUTONEG_ENABLE)) { e_err("forcing MDI/MDI-X state is not supported when link speed and/or duplex are forced\n"); ret_val = -EINVAL; goto out; } } while (test_and_set_bit(__E1000_RESETTING, &adapter->state)) usleep_range(1000, 2000); if (cmd->base.autoneg == AUTONEG_ENABLE) { hw->mac.autoneg = 1; if (hw->phy.media_type == e1000_media_type_fiber) hw->phy.autoneg_advertised = ADVERTISED_1000baseT_Full | ADVERTISED_FIBRE | ADVERTISED_Autoneg; else hw->phy.autoneg_advertised = advertising | ADVERTISED_TP | ADVERTISED_Autoneg; advertising = hw->phy.autoneg_advertised; if (adapter->fc_autoneg) hw->fc.requested_mode = e1000_fc_default; } else { u32 speed = cmd->base.speed; /* calling this overrides forced MDI setting */ if (e1000_set_spd_dplx(adapter, speed, cmd->base.duplex)) { ret_val = -EINVAL; goto out; } } /* MDI-X => 2; MDI => 1; Auto => 3 */ if (cmd->base.eth_tp_mdix_ctrl) { /* fix up the value for auto (3 => 0) as zero is mapped * internally to auto */ if (cmd->base.eth_tp_mdix_ctrl == ETH_TP_MDI_AUTO) hw->phy.mdix = AUTO_ALL_MODES; else hw->phy.mdix = cmd->base.eth_tp_mdix_ctrl; } /* reset the link */ if (netif_running(adapter->netdev)) { e1000e_down(adapter, true); e1000e_up(adapter); } else { e1000e_reset(adapter); } out: pm_runtime_put_sync(netdev->dev.parent); clear_bit(__E1000_RESETTING, &adapter->state); return ret_val; } static void e1000_get_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *pause) { struct e1000_adapter *adapter = netdev_priv(netdev); struct e1000_hw *hw = &adapter->hw; pause->autoneg = (adapter->fc_autoneg ? AUTONEG_ENABLE : AUTONEG_DISABLE); if (hw->fc.current_mode == e1000_fc_rx_pause) { pause->rx_pause = 1; } else if (hw->fc.current_mode == e1000_fc_tx_pause) { pause->tx_pause = 1; } else if (hw->fc.current_mode == e1000_fc_full) { pause->rx_pause = 1; pause->tx_pause = 1; } } static int e1000_set_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *pause) { struct e1000_adapter *adapter = netdev_priv(netdev); struct e1000_hw *hw = &adapter->hw; int retval = 0; adapter->fc_autoneg = pause->autoneg; while (test_and_set_bit(__E1000_RESETTING, &adapter->state)) usleep_range(1000, 2000); pm_runtime_get_sync(netdev->dev.parent); if (adapter->fc_autoneg == AUTONEG_ENABLE) { hw->fc.requested_mode = e1000_fc_default; if (netif_running(adapter->netdev)) { e1000e_down(adapter, true); e1000e_up(adapter); } else { e1000e_reset(adapter); } } else { if (pause->rx_pause && pause->tx_pause) hw->fc.requested_mode = e1000_fc_full; else if (pause->rx_pause && !pause->tx_pause) hw->fc.requested_mode = e1000_fc_rx_pause; else if (!pause->rx_pause && pause->tx_pause) hw->fc.requested_mode = e1000_fc_tx_pause; else if (!pause->rx_pause && !pause->tx_pause) hw->fc.requested_mode = e1000_fc_none; hw->fc.current_mode = hw->fc.requested_mode; if (hw->phy.media_type == e1000_media_type_fiber) { retval = hw->mac.ops.setup_link(hw); /* implicit goto out */ } else { retval = e1000e_force_mac_fc(hw); if (retval) goto out; e1000e_set_fc_watermarks(hw); } } out: pm_runtime_put_sync(netdev->dev.parent); clear_bit(__E1000_RESETTING, &adapter->state); return retval; } static u32 e1000_get_msglevel(struct net_device *netdev) { struct e1000_adapter *adapter = netdev_priv(netdev); return adapter->msg_enable; } static void e1000_set_msglevel(struct net_device *netdev, u32 data) { struct e1000_adapter *adapter = netdev_priv(netdev); adapter->msg_enable = data; } static int e1000_get_regs_len(struct net_device __always_unused *netdev) { #define E1000_REGS_LEN 32 /* overestimate */ return E1000_REGS_LEN * sizeof(u32); } static void e1000_get_regs(struct net_device *netdev, struct ethtool_regs *regs, void *p) { struct e1000_adapter *adapter = netdev_priv(netdev); struct e1000_hw *hw = &adapter->hw; u32 *regs_buff = p; u16 phy_data; pm_runtime_get_sync(netdev->dev.parent); memset(p, 0, E1000_REGS_LEN * sizeof(u32)); regs->version = (1u << 24) | (adapter->pdev->revision << 16) | adapter->pdev->device; regs_buff[0] = er32(CTRL); regs_buff[1] = er32(STATUS); regs_buff[2] = er32(RCTL); regs_buff[3] = er32(RDLEN(0)); regs_buff[4] = er32(RDH(0)); regs_buff[5] = er32(RDT(0)); regs_buff[6] = er32(RDTR); regs_buff[7] = er32(TCTL); regs_buff[8] = er32(TDLEN(0)); regs_buff[9] = er32(TDH(0)); regs_buff[10] = er32(TDT(0)); regs_buff[11] = er32(TIDV); regs_buff[12] = adapter->hw.phy.type; /* PHY type (IGP=1, M88=0) */ /* ethtool doesn't use anything past this point, so all this * code is likely legacy junk for apps that may or may not exist */ if (hw->phy.type == e1000_phy_m88) { e1e_rphy(hw, M88E1000_PHY_SPEC_STATUS, &phy_data); regs_buff[13] = (u32)phy_data; /* cable length */ regs_buff[14] = 0; /* Dummy (to align w/ IGP phy reg dump) */ regs_buff[15] = 0; /* Dummy (to align w/ IGP phy reg dump) */ regs_buff[16] = 0; /* Dummy (to align w/ IGP phy reg dump) */ e1e_rphy(hw, M88E1000_PHY_SPEC_CTRL, &phy_data); regs_buff[17] = (u32)phy_data; /* extended 10bt distance */ regs_buff[18] = regs_buff[13]; /* cable polarity */ regs_buff[19] = 0; /* Dummy (to align w/ IGP phy reg dump) */ regs_buff[20] = regs_buff[17]; /* polarity correction */ /* phy receive errors */ regs_buff[22] = adapter->phy_stats.receive_errors; regs_buff[23] = regs_buff[13]; /* mdix mode */ } regs_buff[21] = 0; /* was idle_errors */ e1e_rphy(hw, MII_STAT1000, &phy_data); regs_buff[24] = (u32)phy_data; /* phy local receiver status */ regs_buff[25] = regs_buff[24]; /* phy remote receiver status */ pm_runtime_put_sync(netdev->dev.parent); } static int e1000_get_eeprom_len(struct net_device *netdev) { struct e1000_adapter *adapter = netdev_priv(netdev); return adapter->hw.nvm.word_size * 2; } static int e1000_get_eeprom(struct net_device *netdev, struct ethtool_eeprom *eeprom, u8 *bytes) { struct e1000_adapter *adapter = netdev_priv(netdev); struct e1000_hw *hw = &adapter->hw; u16 *eeprom_buff; int first_word; int last_word; int ret_val = 0; u16 i; if (eeprom->len == 0) return -EINVAL; eeprom->magic = adapter->pdev->vendor | (adapter->pdev->device << 16); first_word = eeprom->offset >> 1; last_word = (eeprom->offset + eeprom->len - 1) >> 1; eeprom_buff = kmalloc_array(last_word - first_word + 1, sizeof(u16), GFP_KERNEL); if (!eeprom_buff) return -ENOMEM; pm_runtime_get_sync(netdev->dev.parent); if (hw->nvm.type == e1000_nvm_eeprom_spi) { ret_val = e1000_read_nvm(hw, first_word, last_word - first_word + 1, eeprom_buff); } else { for (i = 0; i < last_word - first_word + 1; i++) { ret_val = e1000_read_nvm(hw, first_word + i, 1, &eeprom_buff[i]); if (ret_val) break; } } pm_runtime_put_sync(netdev->dev.parent); if (ret_val) { /* a read error occurred, throw away the result */ memset(eeprom_buff, 0xff, sizeof(u16) * (last_word - first_word + 1)); } else { /* Device's eeprom is always little-endian, word addressable */ for (i = 0; i < last_word - first_word + 1; i++) le16_to_cpus(&eeprom_buff[i]); } memcpy(bytes, (u8 *)eeprom_buff + (eeprom->offset & 1), eeprom->len); kfree(eeprom_buff); return ret_val; } static int e1000_set_eeprom(struct net_device *netdev, struct ethtool_eeprom *eeprom, u8 *bytes) { struct e1000_adapter *adapter = netdev_priv(netdev); struct e1000_hw *hw = &adapter->hw; u16 *eeprom_buff; void *ptr; int max_len; int first_word; int last_word; int ret_val = 0; u16 i; if (eeprom->len == 0) return -EOPNOTSUPP; if (eeprom->magic != (adapter->pdev->vendor | (adapter->pdev->device << 16))) return -EFAULT; if (adapter->flags & FLAG_READ_ONLY_NVM) return -EINVAL; max_len = hw->nvm.word_size * 2; first_word = eeprom->offset >> 1; last_word = (eeprom->offset + eeprom->len - 1) >> 1; eeprom_buff = kmalloc(max_len, GFP_KERNEL); if (!eeprom_buff) return -ENOMEM; ptr = (void *)eeprom_buff; pm_runtime_get_sync(netdev->dev.parent); if (eeprom->offset & 1) { /* need read/modify/write of first changed EEPROM word */ /* only the second byte of the word is being modified */ ret_val = e1000_read_nvm(hw, first_word, 1, &eeprom_buff[0]); ptr++; } if (((eeprom->offset + eeprom->len) & 1) && (!ret_val)) /* need read/modify/write of last changed EEPROM word */ /* only the first byte of the word is being modified */ ret_val = e1000_read_nvm(hw, last_word, 1, &eeprom_buff[last_word - first_word]); if (ret_val) goto out; /* Device's eeprom is always little-endian, word addressable */ for (i = 0; i < last_word - first_word + 1; i++) le16_to_cpus(&eeprom_buff[i]); memcpy(ptr, bytes, eeprom->len); for (i = 0; i < last_word - first_word + 1; i++) cpu_to_le16s(&eeprom_buff[i]); ret_val = e1000_write_nvm(hw, first_word, last_word - first_word + 1, eeprom_buff); if (ret_val) goto out; /* Update the checksum over the first part of the EEPROM if needed * and flush shadow RAM for applicable controllers */ if ((first_word <= NVM_CHECKSUM_REG) || (hw->mac.type == e1000_82583) || (hw->mac.type == e1000_82574) || (hw->mac.type == e1000_82573)) ret_val = e1000e_update_nvm_checksum(hw); out: pm_runtime_put_sync(netdev->dev.parent); kfree(eeprom_buff); return ret_val; } static void e1000_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo) { struct e1000_adapter *adapter = netdev_priv(netdev); strscpy(drvinfo->driver, e1000e_driver_name, sizeof(drvinfo->driver)); /* EEPROM image version # is reported as firmware version # for * PCI-E controllers */ snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version), "%d.%d-%d", (adapter->eeprom_vers & 0xF000) >> 12, (adapter->eeprom_vers & 0x0FF0) >> 4, (adapter->eeprom_vers & 0x000F)); strscpy(drvinfo->bus_info, pci_name(adapter->pdev), sizeof(drvinfo->bus_info)); } static void e1000_get_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring, struct kernel_ethtool_ringparam *kernel_ring, struct netlink_ext_ack *extack) { struct e1000_adapter *adapter = netdev_priv(netdev); ring->rx_max_pending = E1000_MAX_RXD; ring->tx_max_pending = E1000_MAX_TXD; ring->rx_pending = adapter->rx_ring_count; ring->tx_pending = adapter->tx_ring_count; } static int e1000_set_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring, struct kernel_ethtool_ringparam *kernel_ring, struct netlink_ext_ack *extack) { struct e1000_adapter *adapter = netdev_priv(netdev); struct e1000_ring *temp_tx = NULL, *temp_rx = NULL; int err = 0, size = sizeof(struct e1000_ring); bool set_tx = false, set_rx = false; u16 new_rx_count, new_tx_count; if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending)) return -EINVAL; new_rx_count = clamp_t(u32, ring->rx_pending, E1000_MIN_RXD, E1000_MAX_RXD); new_rx_count = ALIGN(new_rx_count, REQ_RX_DESCRIPTOR_MULTIPLE); new_tx_count = clamp_t(u32, ring->tx_pending, E1000_MIN_TXD, E1000_MAX_TXD); new_tx_count = ALIGN(new_tx_count, REQ_TX_DESCRIPTOR_MULTIPLE); if ((new_tx_count == adapter->tx_ring_count) && (new_rx_count == adapter->rx_ring_count)) /* nothing to do */ return 0; while (test_and_set_bit(__E1000_RESETTING, &adapter->state)) usleep_range(1000, 2000); if (!netif_running(adapter->netdev)) { /* Set counts now and allocate resources during open() */ adapter->tx_ring->count = new_tx_count; adapter->rx_ring->count = new_rx_count; adapter->tx_ring_count = new_tx_count; adapter->rx_ring_count = new_rx_count; goto clear_reset; } set_tx = (new_tx_count != adapter->tx_ring_count); set_rx = (new_rx_count != adapter->rx_ring_count); /* Allocate temporary storage for ring updates */ if (set_tx) { temp_tx = vmalloc(size); if (!temp_tx) { err = -ENOMEM; goto free_temp; } } if (set_rx) { temp_rx = vmalloc(size); if (!temp_rx) { err = -ENOMEM; goto free_temp; } } pm_runtime_get_sync(netdev->dev.parent); e1000e_down(adapter, true); /* We can't just free everything and then setup again, because the * ISRs in MSI-X mode get passed pointers to the Tx and Rx ring * structs. First, attempt to allocate new resources... */ if (set_tx) { memcpy(temp_tx, adapter->tx_ring, size); temp_tx->count = new_tx_count; err = e1000e_setup_tx_resources(temp_tx); if (err) goto err_setup; } if (set_rx) { memcpy(temp_rx, adapter->rx_ring, size); temp_rx->count = new_rx_count; err = e1000e_setup_rx_resources(temp_rx); if (err) goto err_setup_rx; } /* ...then free the old resources and copy back any new ring data */ if (set_tx) { e1000e_free_tx_resources(adapter->tx_ring); memcpy(adapter->tx_ring, temp_tx, size); adapter->tx_ring_count = new_tx_count; } if (set_rx) { e1000e_free_rx_resources(adapter->rx_ring); memcpy(adapter->rx_ring, temp_rx, size); adapter->rx_ring_count = new_rx_count; } err_setup_rx: if (err && set_tx) e1000e_free_tx_resources(temp_tx); err_setup: e1000e_up(adapter); pm_runtime_put_sync(netdev->dev.parent); free_temp: vfree(temp_tx); vfree(temp_rx); clear_reset: clear_bit(__E1000_RESETTING, &adapter->state); return err; } static bool reg_pattern_test(struct e1000_adapter *adapter, u64 *data, int reg, int offset, u32 mask, u32 write) { u32 pat, val; static const u32 test[] = { 0x5A5A5A5A, 0xA5A5A5A5, 0x00000000, 0xFFFFFFFF }; for (pat = 0; pat < ARRAY_SIZE(test); pat++) { E1000_WRITE_REG_ARRAY(&adapter->hw, reg, offset, (test[pat] & write)); val = E1000_READ_REG_ARRAY(&adapter->hw, reg, offset); if (val != (test[pat] & write & mask)) { e_err("pattern test failed (reg 0x%05X): got 0x%08X expected 0x%08X\n", reg + (offset << 2), val, (test[pat] & write & mask)); *data = reg; return true; } } return false; } static bool reg_set_and_check(struct e1000_adapter *adapter, u64 *data, int reg, u32 mask, u32 write) { u32 val; __ew32(&adapter->hw, reg, write & mask); val = __er32(&adapter->hw, reg); if ((write & mask) != (val & mask)) { e_err("set/check test failed (reg 0x%05X): got 0x%08X expected 0x%08X\n", reg, (val & mask), (write & mask)); *data = reg; return true; } return false; } #define REG_PATTERN_TEST_ARRAY(reg, offset, mask, write) \ do { \ if (reg_pattern_test(adapter, data, reg, offset, mask, write)) \ return 1; \ } while (0) #define REG_PATTERN_TEST(reg, mask, write) \ REG_PATTERN_TEST_ARRAY(reg, 0, mask, write) #define REG_SET_AND_CHECK(reg, mask, write) \ do { \ if (reg_set_and_check(adapter, data, reg, mask, write)) \ return 1; \ } while (0) static int e1000_reg_test(struct e1000_adapter *adapter, u64 *data) { struct e1000_hw *hw = &adapter->hw; struct e1000_mac_info *mac = &adapter->hw.mac; u32 value; u32 before; u32 after; u32 i; u32 toggle; u32 mask; u32 wlock_mac = 0; /* The status register is Read Only, so a write should fail. * Some bits that get toggled are ignored. There are several bits * on newer hardware that are r/w. */ switch (mac->type) { case e1000_82571: case e1000_82572: case e1000_80003es2lan: toggle = 0x7FFFF3FF; break; default: toggle = 0x7FFFF033; break; } before = er32(STATUS); value = (er32(STATUS) & toggle); ew32(STATUS, toggle); after = er32(STATUS) & toggle; if (value != after) { e_err("failed STATUS register test got: 0x%08X expected: 0x%08X\n", after, value); *data = 1; return 1; } /* restore previous status */ ew32(STATUS, before); if (!(adapter->flags & FLAG_IS_ICH)) { REG_PATTERN_TEST(E1000_FCAL, 0xFFFFFFFF, 0xFFFFFFFF); REG_PATTERN_TEST(E1000_FCAH, 0x0000FFFF, 0xFFFFFFFF); REG_PATTERN_TEST(E1000_FCT, 0x0000FFFF, 0xFFFFFFFF); REG_PATTERN_TEST(E1000_VET, 0x0000FFFF, 0xFFFFFFFF); } REG_PATTERN_TEST(E1000_RDTR, 0x0000FFFF, 0xFFFFFFFF); REG_PATTERN_TEST(E1000_RDBAH(0), 0xFFFFFFFF, 0xFFFFFFFF); REG_PATTERN_TEST(E1000_RDLEN(0), 0x000FFF80, 0x000FFFFF); REG_PATTERN_TEST(E1000_RDH(0), 0x0000FFFF, 0x0000FFFF); REG_PATTERN_TEST(E1000_RDT(0), 0x0000FFFF, 0x0000FFFF); REG_PATTERN_TEST(E1000_FCRTH, 0x0000FFF8, 0x0000FFF8); REG_PATTERN_TEST(E1000_FCTTV, 0x0000FFFF, 0x0000FFFF); REG_PATTERN_TEST(E1000_TIPG, 0x3FFFFFFF, 0x3FFFFFFF); REG_PATTERN_TEST(E1000_TDBAH(0), 0xFFFFFFFF, 0xFFFFFFFF); REG_PATTERN_TEST(E1000_TDLEN(0), 0x000FFF80, 0x000FFFFF); REG_SET_AND_CHECK(E1000_RCTL, 0xFFFFFFFF, 0x00000000); before = ((adapter->flags & FLAG_IS_ICH) ? 0x06C3B33E : 0x06DFB3FE); REG_SET_AND_CHECK(E1000_RCTL, before, 0x003FFFFB); REG_SET_AND_CHECK(E1000_TCTL, 0xFFFFFFFF, 0x00000000); REG_SET_AND_CHECK(E1000_RCTL, before, 0xFFFFFFFF); REG_PATTERN_TEST(E1000_RDBAL(0), 0xFFFFFFF0, 0xFFFFFFFF); if (!(adapter->flags & FLAG_IS_ICH)) REG_PATTERN_TEST(E1000_TXCW, 0xC000FFFF, 0x0000FFFF); REG_PATTERN_TEST(E1000_TDBAL(0), 0xFFFFFFF0, 0xFFFFFFFF); REG_PATTERN_TEST(E1000_TIDV, 0x0000FFFF, 0x0000FFFF); mask = 0x8003FFFF; switch (mac->type) { case e1000_ich10lan: case e1000_pchlan: case e1000_pch2lan: case e1000_pch_lpt: case e1000_pch_spt: case e1000_pch_cnp: case e1000_pch_tgp: case e1000_pch_adp: case e1000_pch_mtp: case e1000_pch_lnp: case e1000_pch_ptp: case e1000_pch_nvp: mask |= BIT(18); break; default: break; } if (mac->type >= e1000_pch_lpt) wlock_mac = (er32(FWSM) & E1000_FWSM_WLOCK_MAC_MASK) >> E1000_FWSM_WLOCK_MAC_SHIFT; for (i = 0; i < mac->rar_entry_count; i++) { if (mac->type >= e1000_pch_lpt) { /* Cannot test write-protected SHRAL[n] registers */ if ((wlock_mac == 1) || (wlock_mac && (i > wlock_mac))) continue; /* SHRAH[9] different than the others */ if (i == 10) mask |= BIT(30); else mask &= ~BIT(30); } if (mac->type == e1000_pch2lan) { /* SHRAH[0,1,2] different than previous */ if (i == 1) mask &= 0xFFF4FFFF; /* SHRAH[3] different than SHRAH[0,1,2] */ if (i == 4) mask |= BIT(30); /* RAR[1-6] owned by management engine - skipping */ if (i > 0) i += 6; } REG_PATTERN_TEST_ARRAY(E1000_RA, ((i << 1) + 1), mask, 0xFFFFFFFF); /* reset index to actual value */ if ((mac->type == e1000_pch2lan) && (i > 6)) i -= 6; } for (i = 0; i < mac->mta_reg_count; i++) REG_PATTERN_TEST_ARRAY(E1000_MTA, i, 0xFFFFFFFF, 0xFFFFFFFF); *data = 0; return 0; } static int e1000_eeprom_test(struct e1000_adapter *adapter, u64 *data) { u16 temp; u16 checksum = 0; u16 i; *data = 0; /* Read and add up the contents of the EEPROM */ for (i = 0; i < (NVM_CHECKSUM_REG + 1); i++) { if ((e1000_read_nvm(&adapter->hw, i, 1, &temp)) < 0) { *data = 1; return *data; } checksum += temp; } /* If Checksum is not Correct return error else test passed */ if ((checksum != (u16)NVM_SUM) && !(*data)) *data = 2; return *data; } static irqreturn_t e1000_test_intr(int __always_unused irq, void *data) { struct net_device *netdev = (struct net_device *)data; struct e1000_adapter *adapter = netdev_priv(netdev); struct e1000_hw *hw = &adapter->hw; adapter->test_icr |= er32(ICR); return IRQ_HANDLED; } static int e1000_intr_test(struct e1000_adapter *adapter, u64 *data) { struct net_device *netdev = adapter->netdev; struct e1000_hw *hw = &adapter->hw; u32 mask; u32 shared_int = 1; u32 irq = adapter->pdev->irq; int i; int ret_val = 0; int int_mode = E1000E_INT_MODE_LEGACY; *data = 0; /* NOTE: we don't test MSI/MSI-X interrupts here, yet */ if (adapter->int_mode == E1000E_INT_MODE_MSIX) { int_mode = adapter->int_mode; e1000e_reset_interrupt_capability(adapter); adapter->int_mode = E1000E_INT_MODE_LEGACY; e1000e_set_interrupt_capability(adapter); } /* Hook up test interrupt handler just for this test */ if (!request_irq(irq, e1000_test_intr, IRQF_PROBE_SHARED, netdev->name, netdev)) { shared_int = 0; } else if (request_irq(irq, e1000_test_intr, IRQF_SHARED, netdev->name, netdev)) { *data = 1; ret_val = -1; goto out; } e_info("testing %s interrupt\n", (shared_int ? "shared" : "unshared")); /* Disable all the interrupts */ ew32(IMC, 0xFFFFFFFF); e1e_flush(); usleep_range(10000, 11000); /* Test each interrupt */ for (i = 0; i < 10; i++) { /* Interrupt to test */ mask = BIT(i); if (adapter->flags & FLAG_IS_ICH) { switch (mask) { case E1000_ICR_RXSEQ: continue; case 0x00000100: if (adapter->hw.mac.type == e1000_ich8lan || adapter->hw.mac.type == e1000_ich9lan) continue; break; default: break; } } if (!shared_int) { /* Disable the interrupt to be reported in * the cause register and then force the same * interrupt and see if one gets posted. If * an interrupt was posted to the bus, the * test failed. */ adapter->test_icr = 0; ew32(IMC, mask); ew32(ICS, mask); e1e_flush(); usleep_range(10000, 11000); if (adapter->test_icr & mask) { *data = 3; break; } } /* Enable the interrupt to be reported in * the cause register and then force the same * interrupt and see if one gets posted. If * an interrupt was not posted to the bus, the * test failed. */ adapter->test_icr = 0; ew32(IMS, mask); ew32(ICS, mask); e1e_flush(); usleep_range(10000, 11000); if (!(adapter->test_icr & mask)) { *data = 4; break; } if (!shared_int) { /* Disable the other interrupts to be reported in * the cause register and then force the other * interrupts and see if any get posted. If * an interrupt was posted to the bus, the * test failed. */ adapter->test_icr = 0; ew32(IMC, ~mask & 0x00007FFF); ew32(ICS, ~mask & 0x00007FFF); e1e_flush(); usleep_range(10000, 11000); if (adapter->test_icr) { *data = 5; break; } } } /* Disable all the interrupts */ ew32(IMC, 0xFFFFFFFF); e1e_flush(); usleep_range(10000, 11000); /* Unhook test interrupt handler */ free_irq(irq, netdev); out: if (int_mode == E1000E_INT_MODE_MSIX) { e1000e_reset_interrupt_capability(adapter); adapter->int_mode = int_mode; e1000e_set_interrupt_capability(adapter); } return ret_val; } static void e1000_free_desc_rings(struct e1000_adapter *adapter) { struct e1000_ring *tx_ring = &adapter->test_tx_ring; struct e1000_ring *rx_ring = &adapter->test_rx_ring; struct pci_dev *pdev = adapter->pdev; struct e1000_buffer *buffer_info; int i; if (tx_ring->desc && tx_ring->buffer_info) { for (i = 0; i < tx_ring->count; i++) { buffer_info = &tx_ring->buffer_info[i]; if (buffer_info->dma) dma_unmap_single(&pdev->dev, buffer_info->dma, buffer_info->length, DMA_TO_DEVICE); dev_kfree_skb(buffer_info->skb); } } if (rx_ring->desc && rx_ring->buffer_info) { for (i = 0; i < rx_ring->count; i++) { buffer_info = &rx_ring->buffer_info[i]; if (buffer_info->dma) dma_unmap_single(&pdev->dev, buffer_info->dma, 2048, DMA_FROM_DEVICE); dev_kfree_skb(buffer_info->skb); } } if (tx_ring->desc) { dma_free_coherent(&pdev->dev, tx_ring->size, tx_ring->desc, tx_ring->dma); tx_ring->desc = NULL; } if (rx_ring->desc) { dma_free_coherent(&pdev->dev, rx_ring->size, rx_ring->desc, rx_ring->dma); rx_ring->desc = NULL; } kfree(tx_ring->buffer_info); tx_ring->buffer_info = NULL; kfree(rx_ring->buffer_info); rx_ring->buffer_info = NULL; } static int e1000_setup_desc_rings(struct e1000_adapter *adapter) { struct e1000_ring *tx_ring = &adapter->test_tx_ring; struct e1000_ring *rx_ring = &adapter->test_rx_ring; struct pci_dev *pdev = adapter->pdev; struct e1000_hw *hw = &adapter->hw; u32 rctl; int i; int ret_val; /* Setup Tx descriptor ring and Tx buffers */ if (!tx_ring->count) tx_ring->count = E1000_DEFAULT_TXD; tx_ring->buffer_info = kcalloc(tx_ring->count, sizeof(struct e1000_buffer), GFP_KERNEL); if (!tx_ring->buffer_info) { ret_val = 1; goto err_nomem; } tx_ring->size = tx_ring->count * sizeof(struct e1000_tx_desc); tx_ring->size = ALIGN(tx_ring->size, 4096); tx_ring->desc = dma_alloc_coherent(&pdev->dev, tx_ring->size, &tx_ring->dma, GFP_KERNEL); if (!tx_ring->desc) { ret_val = 2; goto err_nomem; } tx_ring->next_to_use = 0; tx_ring->next_to_clean = 0; ew32(TDBAL(0), ((u64)tx_ring->dma & 0x00000000FFFFFFFF)); ew32(TDBAH(0), ((u64)tx_ring->dma >> 32)); ew32(TDLEN(0), tx_ring->count * sizeof(struct e1000_tx_desc)); ew32(TDH(0), 0); ew32(TDT(0), 0); ew32(TCTL, E1000_TCTL_PSP | E1000_TCTL_EN | E1000_TCTL_MULR | E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT | E1000_COLLISION_DISTANCE << E1000_COLD_SHIFT); for (i = 0; i < tx_ring->count; i++) { struct e1000_tx_desc *tx_desc = E1000_TX_DESC(*tx_ring, i); struct sk_buff *skb; unsigned int skb_size = 1024; skb = alloc_skb(skb_size, GFP_KERNEL); if (!skb) { ret_val = 3; goto err_nomem; } skb_put(skb, skb_size); tx_ring->buffer_info[i].skb = skb; tx_ring->buffer_info[i].length = skb->len; tx_ring->buffer_info[i].dma = dma_map_single(&pdev->dev, skb->data, skb->len, DMA_TO_DEVICE); if (dma_mapping_error(&pdev->dev, tx_ring->buffer_info[i].dma)) { ret_val = 4; goto err_nomem; } tx_desc->buffer_addr = cpu_to_le64(tx_ring->buffer_info[i].dma); tx_desc->lower.data = cpu_to_le32(skb->len); tx_desc->lower.data |= cpu_to_le32(E1000_TXD_CMD_EOP | E1000_TXD_CMD_IFCS | E1000_TXD_CMD_RS); tx_desc->upper.data = 0; } /* Setup Rx descriptor ring and Rx buffers */ if (!rx_ring->count) rx_ring->count = E1000_DEFAULT_RXD; rx_ring->buffer_info = kcalloc(rx_ring->count, sizeof(struct e1000_buffer), GFP_KERNEL); if (!rx_ring->buffer_info) { ret_val = 5; goto err_nomem; } rx_ring->size = rx_ring->count * sizeof(union e1000_rx_desc_extended); rx_ring->desc = dma_alloc_coherent(&pdev->dev, rx_ring->size, &rx_ring->dma, GFP_KERNEL); if (!rx_ring->desc) { ret_val = 6; goto err_nomem; } rx_ring->next_to_use = 0; rx_ring->next_to_clean = 0; rctl = er32(RCTL); if (!(adapter->flags2 & FLAG2_NO_DISABLE_RX)) ew32(RCTL, rctl & ~E1000_RCTL_EN); ew32(RDBAL(0), ((u64)rx_ring->dma & 0xFFFFFFFF)); ew32(RDBAH(0), ((u64)rx_ring->dma >> 32)); ew32(RDLEN(0), rx_ring->size); ew32(RDH(0), 0); ew32(RDT(0), 0); rctl = E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_SZ_2048 | E1000_RCTL_UPE | E1000_RCTL_MPE | E1000_RCTL_LPE | E1000_RCTL_SBP | E1000_RCTL_SECRC | E1000_RCTL_LBM_NO | E1000_RCTL_RDMTS_HALF | (adapter->hw.mac.mc_filter_type << E1000_RCTL_MO_SHIFT); ew32(RCTL, rctl); for (i = 0; i < rx_ring->count; i++) { union e1000_rx_desc_extended *rx_desc; struct sk_buff *skb; skb = alloc_skb(2048 + NET_IP_ALIGN, GFP_KERNEL); if (!skb) { ret_val = 7; goto err_nomem; } skb_reserve(skb, NET_IP_ALIGN); rx_ring->buffer_info[i].skb = skb; rx_ring->buffer_info[i].dma = dma_map_single(&pdev->dev, skb->data, 2048, DMA_FROM_DEVICE); if (dma_mapping_error(&pdev->dev, rx_ring->buffer_info[i].dma)) { ret_val = 8; goto err_nomem; } rx_desc = E1000_RX_DESC_EXT(*rx_ring, i); rx_desc->read.buffer_addr = cpu_to_le64(rx_ring->buffer_info[i].dma); memset(skb->data, 0x00, skb->len); } return 0; err_nomem: e1000_free_desc_rings(adapter); return ret_val; } static void e1000_phy_disable_receiver(struct e1000_adapter *adapter) { /* Write out to PHY registers 29 and 30 to disable the Receiver. */ e1e_wphy(&adapter->hw, 29, 0x001F); e1e_wphy(&adapter->hw, 30, 0x8FFC); e1e_wphy(&adapter->hw, 29, 0x001A); e1e_wphy(&adapter->hw, 30, 0x8FF0); } static int e1000_integrated_phy_loopback(struct e1000_adapter *adapter) { struct e1000_hw *hw = &adapter->hw; u32 ctrl_reg = 0; u16 phy_reg = 0; s32 ret_val = 0; hw->mac.autoneg = 0; if (hw->phy.type == e1000_phy_ife) { /* force 100, set loopback */ e1e_wphy(hw, MII_BMCR, 0x6100); /* Now set up the MAC to the same speed/duplex as the PHY. */ ctrl_reg = er32(CTRL); ctrl_reg &= ~E1000_CTRL_SPD_SEL; /* Clear the speed sel bits */ ctrl_reg |= (E1000_CTRL_FRCSPD | /* Set the Force Speed Bit */ E1000_CTRL_FRCDPX | /* Set the Force Duplex Bit */ E1000_CTRL_SPD_100 |/* Force Speed to 100 */ E1000_CTRL_FD); /* Force Duplex to FULL */ ew32(CTRL, ctrl_reg); e1e_flush(); usleep_range(500, 1000); return 0; } /* Specific PHY configuration for loopback */ switch (hw->phy.type) { case e1000_phy_m88: /* Auto-MDI/MDIX Off */ e1e_wphy(hw, M88E1000_PHY_SPEC_CTRL, 0x0808); /* reset to update Auto-MDI/MDIX */ e1e_wphy(hw, MII_BMCR, 0x9140); /* autoneg off */ e1e_wphy(hw, MII_BMCR, 0x8140); break; case e1000_phy_gg82563: e1e_wphy(hw, GG82563_PHY_KMRN_MODE_CTRL, 0x1CC); break; case e1000_phy_bm: /* Set Default MAC Interface speed to 1GB */ e1e_rphy(hw, PHY_REG(2, 21), &phy_reg); phy_reg &= ~0x0007; phy_reg |= 0x006; e1e_wphy(hw, PHY_REG(2, 21), phy_reg); /* Assert SW reset for above settings to take effect */ hw->phy.ops.commit(hw); usleep_range(1000, 2000); /* Force Full Duplex */ e1e_rphy(hw, PHY_REG(769, 16), &phy_reg); e1e_wphy(hw, PHY_REG(769, 16), phy_reg | 0x000C); /* Set Link Up (in force link) */ e1e_rphy(hw, PHY_REG(776, 16), &phy_reg); e1e_wphy(hw, PHY_REG(776, 16), phy_reg | 0x0040); /* Force Link */ e1e_rphy(hw, PHY_REG(769, 16), &phy_reg); e1e_wphy(hw, PHY_REG(769, 16), phy_reg | 0x0040); /* Set Early Link Enable */ e1e_rphy(hw, PHY_REG(769, 20), &phy_reg); e1e_wphy(hw, PHY_REG(769, 20), phy_reg | 0x0400); break; case e1000_phy_82577: case e1000_phy_82578: /* Workaround: K1 must be disabled for stable 1Gbps operation */ ret_val = hw->phy.ops.acquire(hw); if (ret_val) { e_err("Cannot setup 1Gbps loopback.\n"); return ret_val; } e1000_configure_k1_ich8lan(hw, false); hw->phy.ops.release(hw); break; case e1000_phy_82579: /* Disable PHY energy detect power down */ e1e_rphy(hw, PHY_REG(0, 21), &phy_reg); e1e_wphy(hw, PHY_REG(0, 21), phy_reg & ~BIT(3)); /* Disable full chip energy detect */ e1e_rphy(hw, PHY_REG(776, 18), &phy_reg); e1e_wphy(hw, PHY_REG(776, 18), phy_reg | 1); /* Enable loopback on the PHY */ e1e_wphy(hw, I82577_PHY_LBK_CTRL, 0x8001); break; default: break; } /* force 1000, set loopback */ e1e_wphy(hw, MII_BMCR, 0x4140); msleep(250); /* Now set up the MAC to the same speed/duplex as the PHY. */ ctrl_reg = er32(CTRL); ctrl_reg &= ~E1000_CTRL_SPD_SEL; /* Clear the speed sel bits */ ctrl_reg |= (E1000_CTRL_FRCSPD | /* Set the Force Speed Bit */ E1000_CTRL_FRCDPX | /* Set the Force Duplex Bit */ E1000_CTRL_SPD_1000 |/* Force Speed to 1000 */ E1000_CTRL_FD); /* Force Duplex to FULL */ if (adapter->flags & FLAG_IS_ICH) ctrl_reg |= E1000_CTRL_SLU; /* Set Link Up */ if (hw->phy.media_type == e1000_media_type_copper && hw->phy.type == e1000_phy_m88) { ctrl_reg |= E1000_CTRL_ILOS; /* Invert Loss of Signal */ } else { /* Set the ILOS bit on the fiber Nic if half duplex link is * detected. */ if ((er32(STATUS) & E1000_STATUS_FD) == 0) ctrl_reg |= (E1000_CTRL_ILOS | E1000_CTRL_SLU); } ew32(CTRL, ctrl_reg); /* Disable the receiver on the PHY so when a cable is plugged in, the * PHY does not begin to autoneg when a cable is reconnected to the NIC. */ if (hw->phy.type == e1000_phy_m88) e1000_phy_disable_receiver(adapter); usleep_range(500, 1000); return 0; } static int e1000_set_82571_fiber_loopback(struct e1000_adapter *adapter) { struct e1000_hw *hw = &adapter->hw; u32 ctrl = er32(CTRL); int link; /* special requirements for 82571/82572 fiber adapters */ /* jump through hoops to make sure link is up because serdes * link is hardwired up */ ctrl |= E1000_CTRL_SLU; ew32(CTRL, ctrl); /* disable autoneg */ ctrl = er32(TXCW); ctrl &= ~BIT(31); ew32(TXCW, ctrl); link = (er32(STATUS) & E1000_STATUS_LU); if (!link) { /* set invert loss of signal */ ctrl = er32(CTRL); ctrl |= E1000_CTRL_ILOS; ew32(CTRL, ctrl); } /* special write to serdes control register to enable SerDes analog * loopback */ ew32(SCTL, E1000_SCTL_ENABLE_SERDES_LOOPBACK); e1e_flush(); usleep_range(10000, 11000); return 0; } /* only call this for fiber/serdes connections to es2lan */ static int e1000_set_es2lan_mac_loopback(struct e1000_adapter *adapter) { struct e1000_hw *hw = &adapter->hw; u32 ctrlext = er32(CTRL_EXT); u32 ctrl = er32(CTRL); /* save CTRL_EXT to restore later, reuse an empty variable (unused * on mac_type 80003es2lan) */ adapter->tx_fifo_head = ctrlext; /* clear the serdes mode bits, putting the device into mac loopback */ ctrlext &= ~E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES; ew32(CTRL_EXT, ctrlext); /* force speed to 1000/FD, link up */ ctrl &= ~(E1000_CTRL_SPD_1000 | E1000_CTRL_SPD_100); ctrl |= (E1000_CTRL_SLU | E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX | E1000_CTRL_SPD_1000 | E1000_CTRL_FD); ew32(CTRL, ctrl); /* set mac loopback */ ctrl = er32(RCTL); ctrl |= E1000_RCTL_LBM_MAC; ew32(RCTL, ctrl); /* set testing mode parameters (no need to reset later) */ #define KMRNCTRLSTA_OPMODE (0x1F << 16) #define KMRNCTRLSTA_OPMODE_1GB_FD_GMII 0x0582 ew32(KMRNCTRLSTA, (KMRNCTRLSTA_OPMODE | KMRNCTRLSTA_OPMODE_1GB_FD_GMII)); return 0; } static int e1000_setup_loopback_test(struct e1000_adapter *adapter) { struct e1000_hw *hw = &adapter->hw; u32 rctl, fext_nvm11, tarc0; if (hw->mac.type >= e1000_pch_spt) { fext_nvm11 = er32(FEXTNVM11); fext_nvm11 |= E1000_FEXTNVM11_DISABLE_MULR_FIX; ew32(FEXTNVM11, fext_nvm11); tarc0 = er32(TARC(0)); /* clear bits 28 & 29 (control of MULR concurrent requests) */ tarc0 &= 0xcfffffff; /* set bit 29 (value of MULR requests is now 2) */ tarc0 |= 0x20000000; ew32(TARC(0), tarc0); } if (hw->phy.media_type == e1000_media_type_fiber || hw->phy.media_type == e1000_media_type_internal_serdes) { switch (hw->mac.type) { case e1000_80003es2lan: return e1000_set_es2lan_mac_loopback(adapter); case e1000_82571: case e1000_82572: return e1000_set_82571_fiber_loopback(adapter); default: rctl = er32(RCTL); rctl |= E1000_RCTL_LBM_TCVR; ew32(RCTL, rctl); return 0; } } else if (hw->phy.media_type == e1000_media_type_copper) { return e1000_integrated_phy_loopback(adapter); } return 7; } static void e1000_loopback_cleanup(struct e1000_adapter *adapter) { struct e1000_hw *hw = &adapter->hw; u32 rctl, fext_nvm11, tarc0; u16 phy_reg; rctl = er32(RCTL); rctl &= ~(E1000_RCTL_LBM_TCVR | E1000_RCTL_LBM_MAC); ew32(RCTL, rctl); switch (hw->mac.type) { case e1000_pch_spt: case e1000_pch_cnp: case e1000_pch_tgp: case e1000_pch_adp: case e1000_pch_mtp: case e1000_pch_lnp: case e1000_pch_ptp: case e1000_pch_nvp: fext_nvm11 = er32(FEXTNVM11); fext_nvm11 &= ~E1000_FEXTNVM11_DISABLE_MULR_FIX; ew32(FEXTNVM11, fext_nvm11); tarc0 = er32(TARC(0)); /* clear bits 28 & 29 (control of MULR concurrent requests) */ /* set bit 29 (value of MULR requests is now 0) */ tarc0 &= 0xcfffffff; ew32(TARC(0), tarc0); fallthrough; case e1000_80003es2lan: if (hw->phy.media_type == e1000_media_type_fiber || hw->phy.media_type == e1000_media_type_internal_serdes) { /* restore CTRL_EXT, stealing space from tx_fifo_head */ ew32(CTRL_EXT, adapter->tx_fifo_head); adapter->tx_fifo_head = 0; } fallthrough; case e1000_82571: case e1000_82572: if (hw->phy.media_type == e1000_media_type_fiber || hw->phy.media_type == e1000_media_type_internal_serdes) { ew32(SCTL, E1000_SCTL_DISABLE_SERDES_LOOPBACK); e1e_flush(); usleep_range(10000, 11000); break; } fallthrough; default: hw->mac.autoneg = 1; if (hw->phy.type == e1000_phy_gg82563) e1e_wphy(hw, GG82563_PHY_KMRN_MODE_CTRL, 0x180); e1e_rphy(hw, MII_BMCR, &phy_reg); if (phy_reg & BMCR_LOOPBACK) { phy_reg &= ~BMCR_LOOPBACK; e1e_wphy(hw, MII_BMCR, phy_reg); if (hw->phy.ops.commit) hw->phy.ops.commit(hw); } break; } } static void e1000_create_lbtest_frame(struct sk_buff *skb, unsigned int frame_size) { memset(skb->data, 0xFF, frame_size); frame_size &= ~1; memset(&skb->data[frame_size / 2], 0xAA, frame_size / 2 - 1); skb->data[frame_size / 2 + 10] = 0xBE; skb->data[frame_size / 2 + 12] = 0xAF; } static int e1000_check_lbtest_frame(struct sk_buff *skb, unsigned int frame_size) { frame_size &= ~1; if (*(skb->data + 3) == 0xFF) if ((*(skb->data + frame_size / 2 + 10) == 0xBE) && (*(skb->data + frame_size / 2 + 12) == 0xAF)) return 0; return 13; } static int e1000_run_loopback_test(struct e1000_adapter *adapter) { struct e1000_ring *tx_ring = &adapter->test_tx_ring; struct e1000_ring *rx_ring = &adapter->test_rx_ring; struct pci_dev *pdev = adapter->pdev; struct e1000_hw *hw = &adapter->hw; struct e1000_buffer *buffer_info; int i, j, k, l; int lc; int good_cnt; int ret_val = 0; unsigned long time; ew32(RDT(0), rx_ring->count - 1); /* Calculate the loop count based on the largest descriptor ring * The idea is to wrap the largest ring a number of times using 64 * send/receive pairs during each loop */ if (rx_ring->count <= tx_ring->count) lc = ((tx_ring->count / 64) * 2) + 1; else lc = ((rx_ring->count / 64) * 2) + 1; k = 0; l = 0; /* loop count loop */ for (j = 0; j <= lc; j++) { /* send the packets */ for (i = 0; i < 64; i++) { buffer_info = &tx_ring->buffer_info[k]; e1000_create_lbtest_frame(buffer_info->skb, 1024); dma_sync_single_for_device(&pdev->dev, buffer_info->dma, buffer_info->length, DMA_TO_DEVICE); k++; if (k == tx_ring->count) k = 0; } ew32(TDT(0), k); e1e_flush(); msleep(200); time = jiffies; /* set the start time for the receive */ good_cnt = 0; /* receive the sent packets */ do { buffer_info = &rx_ring->buffer_info[l]; dma_sync_single_for_cpu(&pdev->dev, buffer_info->dma, 2048, DMA_FROM_DEVICE); ret_val = e1000_check_lbtest_frame(buffer_info->skb, 1024); if (!ret_val) good_cnt++; l++; if (l == rx_ring->count) l = 0; /* time + 20 msecs (200 msecs on 2.4) is more than * enough time to complete the receives, if it's * exceeded, break and error off */ } while ((good_cnt < 64) && !time_after(jiffies, time + 20)); if (good_cnt != 64) { ret_val = 13; /* ret_val is the same as mis-compare */ break; } if (time_after(jiffies, time + 20)) { ret_val = 14; /* error code for time out error */ break; } } return ret_val; } static int e1000_loopback_test(struct e1000_adapter *adapter, u64 *data) { struct e1000_hw *hw = &adapter->hw; /* PHY loopback cannot be performed if SoL/IDER sessions are active */ if (hw->phy.ops.check_reset_block && hw->phy.ops.check_reset_block(hw)) { e_err("Cannot do PHY loopback test when SoL/IDER is active.\n"); *data = 0; goto out; } *data = e1000_setup_desc_rings(adapter); if (*data) goto out; *data = e1000_setup_loopback_test(adapter); if (*data) goto err_loopback; *data = e1000_run_loopback_test(adapter); e1000_loopback_cleanup(adapter); err_loopback: e1000_free_desc_rings(adapter); out: return *data; } static int e1000_link_test(struct e1000_adapter *adapter, u64 *data) { struct e1000_hw *hw = &adapter->hw; *data = 0; if (hw->phy.media_type == e1000_media_type_internal_serdes) { int i = 0; hw->mac.serdes_has_link = false; /* On some blade server designs, link establishment * could take as long as 2-3 minutes */ do { hw->mac.ops.check_for_link(hw); if (hw->mac.serdes_has_link) return *data; msleep(20); } while (i++ < 3750); *data = 1; } else { hw->mac.ops.check_for_link(hw); if (hw->mac.autoneg) /* On some Phy/switch combinations, link establishment * can take a few seconds more than expected. */ msleep_interruptible(5000); if (!(er32(STATUS) & E1000_STATUS_LU)) *data = 1; } return *data; } static int e1000e_get_sset_count(struct net_device __always_unused *netdev, int sset) { switch (sset) { case ETH_SS_TEST: return E1000_TEST_LEN; case ETH_SS_STATS: return E1000_STATS_LEN; case ETH_SS_PRIV_FLAGS: return E1000E_PRIV_FLAGS_STR_LEN; default: return -EOPNOTSUPP; } } static void e1000_diag_test(struct net_device *netdev, struct ethtool_test *eth_test, u64 *data) { struct e1000_adapter *adapter = netdev_priv(netdev); u16 autoneg_advertised; u8 forced_speed_duplex; u8 autoneg; bool if_running = netif_running(netdev); pm_runtime_get_sync(netdev->dev.parent); set_bit(__E1000_TESTING, &adapter->state); if (!if_running) { /* Get control of and reset hardware */ if (adapter->flags & FLAG_HAS_AMT) e1000e_get_hw_control(adapter); e1000e_power_up_phy(adapter); adapter->hw.phy.autoneg_wait_to_complete = 1; e1000e_reset(adapter); adapter->hw.phy.autoneg_wait_to_complete = 0; } if (eth_test->flags == ETH_TEST_FL_OFFLINE) { /* Offline tests */ /* save speed, duplex, autoneg settings */ autoneg_advertised = adapter->hw.phy.autoneg_advertised; forced_speed_duplex = adapter->hw.mac.forced_speed_duplex; autoneg = adapter->hw.mac.autoneg; e_info("offline testing starting\n"); if (if_running) /* indicate we're in test mode */ e1000e_close(netdev); if (e1000_reg_test(adapter, &data[0])) eth_test->flags |= ETH_TEST_FL_FAILED; e1000e_reset(adapter); if (e1000_eeprom_test(adapter, &data[1])) eth_test->flags |= ETH_TEST_FL_FAILED; e1000e_reset(adapter); if (e1000_intr_test(adapter, &data[2])) eth_test->flags |= ETH_TEST_FL_FAILED; e1000e_reset(adapter); if (e1000_loopback_test(adapter, &data[3])) eth_test->flags |= ETH_TEST_FL_FAILED; /* force this routine to wait until autoneg complete/timeout */ adapter->hw.phy.autoneg_wait_to_complete = 1; e1000e_reset(adapter); adapter->hw.phy.autoneg_wait_to_complete = 0; if (e1000_link_test(adapter, &data[4])) eth_test->flags |= ETH_TEST_FL_FAILED; /* restore speed, duplex, autoneg settings */ adapter->hw.phy.autoneg_advertised = autoneg_advertised; adapter->hw.mac.forced_speed_duplex = forced_speed_duplex; adapter->hw.mac.autoneg = autoneg; e1000e_reset(adapter); clear_bit(__E1000_TESTING, &adapter->state); if (if_running) e1000e_open(netdev); } else { /* Online tests */ e_info("online testing starting\n"); /* register, eeprom, intr and loopback tests not run online */ data[0] = 0; data[1] = 0; data[2] = 0; data[3] = 0; if (e1000_link_test(adapter, &data[4])) eth_test->flags |= ETH_TEST_FL_FAILED; clear_bit(__E1000_TESTING, &adapter->state); } if (!if_running) { e1000e_reset(adapter); if (adapter->flags & FLAG_HAS_AMT) e1000e_release_hw_control(adapter); } msleep_interruptible(4 * 1000); pm_runtime_put_sync(netdev->dev.parent); } static void e1000_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) { struct e1000_adapter *adapter = netdev_priv(netdev); wol->supported = 0; wol->wolopts = 0; if (!(adapter->flags & FLAG_HAS_WOL) || !device_can_wakeup(&adapter->pdev->dev)) return; wol->supported = WAKE_UCAST | WAKE_MCAST | WAKE_BCAST | WAKE_MAGIC | WAKE_PHY; /* apply any specific unsupported masks here */ if (adapter->flags & FLAG_NO_WAKE_UCAST) { wol->supported &= ~WAKE_UCAST; if (adapter->wol & E1000_WUFC_EX) e_err("Interface does not support directed (unicast) frame wake-up packets\n"); } if (adapter->wol & E1000_WUFC_EX) wol->wolopts |= WAKE_UCAST; if (adapter->wol & E1000_WUFC_MC) wol->wolopts |= WAKE_MCAST; if (adapter->wol & E1000_WUFC_BC) wol->wolopts |= WAKE_BCAST; if (adapter->wol & E1000_WUFC_MAG) wol->wolopts |= WAKE_MAGIC; if (adapter->wol & E1000_WUFC_LNKC) wol->wolopts |= WAKE_PHY; } static int e1000_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) { struct e1000_adapter *adapter = netdev_priv(netdev); if (!(adapter->flags & FLAG_HAS_WOL) || !device_can_wakeup(&adapter->pdev->dev) || (wol->wolopts & ~(WAKE_UCAST | WAKE_MCAST | WAKE_BCAST | WAKE_MAGIC | WAKE_PHY))) return -EOPNOTSUPP; /* these settings will always override what we currently have */ adapter->wol = 0; if (wol->wolopts & WAKE_UCAST) adapter->wol |= E1000_WUFC_EX; if (wol->wolopts & WAKE_MCAST) adapter->wol |= E1000_WUFC_MC; if (wol->wolopts & WAKE_BCAST) adapter->wol |= E1000_WUFC_BC; if (wol->wolopts & WAKE_MAGIC) adapter->wol |= E1000_WUFC_MAG; if (wol->wolopts & WAKE_PHY) adapter->wol |= E1000_WUFC_LNKC; device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol); return 0; } static int e1000_set_phys_id(struct net_device *netdev, enum ethtool_phys_id_state state) { struct e1000_adapter *adapter = netdev_priv(netdev); struct e1000_hw *hw = &adapter->hw; switch (state) { case ETHTOOL_ID_ACTIVE: pm_runtime_get_sync(netdev->dev.parent); if (!hw->mac.ops.blink_led) return 2; /* cycle on/off twice per second */ hw->mac.ops.blink_led(hw); break; case ETHTOOL_ID_INACTIVE: if (hw->phy.type == e1000_phy_ife) e1e_wphy(hw, IFE_PHY_SPECIAL_CONTROL_LED, 0); hw->mac.ops.led_off(hw); hw->mac.ops.cleanup_led(hw); pm_runtime_put_sync(netdev->dev.parent); break; case ETHTOOL_ID_ON: hw->mac.ops.led_on(hw); break; case ETHTOOL_ID_OFF: hw->mac.ops.led_off(hw); break; } return 0; } static int e1000_get_coalesce(struct net_device *netdev, struct ethtool_coalesce *ec, struct kernel_ethtool_coalesce *kernel_coal, struct netlink_ext_ack *extack) { struct e1000_adapter *adapter = netdev_priv(netdev); if (adapter->itr_setting <= 4) ec->rx_coalesce_usecs = adapter->itr_setting; else ec->rx_coalesce_usecs = 1000000 / adapter->itr_setting; return 0; } static int e1000_set_coalesce(struct net_device *netdev, struct ethtool_coalesce *ec, struct kernel_ethtool_coalesce *kernel_coal, struct netlink_ext_ack *extack) { struct e1000_adapter *adapter = netdev_priv(netdev); if ((ec->rx_coalesce_usecs > E1000_MAX_ITR_USECS) || ((ec->rx_coalesce_usecs > 4) && (ec->rx_coalesce_usecs < E1000_MIN_ITR_USECS)) || (ec->rx_coalesce_usecs == 2)) return -EINVAL; if (ec->rx_coalesce_usecs == 4) { adapter->itr_setting = 4; adapter->itr = adapter->itr_setting; } else if (ec->rx_coalesce_usecs <= 3) { adapter->itr = 20000; adapter->itr_setting = ec->rx_coalesce_usecs; } else { adapter->itr = (1000000 / ec->rx_coalesce_usecs); adapter->itr_setting = adapter->itr & ~3; } pm_runtime_get_sync(netdev->dev.parent); if (adapter->itr_setting != 0) e1000e_write_itr(adapter, adapter->itr); else e1000e_write_itr(adapter, 0); pm_runtime_put_sync(netdev->dev.parent); return 0; } static int e1000_nway_reset(struct net_device *netdev) { struct e1000_adapter *adapter = netdev_priv(netdev); if (!netif_running(netdev)) return -EAGAIN; if (!adapter->hw.mac.autoneg) return -EINVAL; pm_runtime_get_sync(netdev->dev.parent); e1000e_reinit_locked(adapter); pm_runtime_put_sync(netdev->dev.parent); return 0; } static void e1000_get_ethtool_stats(struct net_device *netdev, struct ethtool_stats __always_unused *stats, u64 *data) { struct e1000_adapter *adapter = netdev_priv(netdev); struct rtnl_link_stats64 net_stats; int i; char *p = NULL; pm_runtime_get_sync(netdev->dev.parent); dev_get_stats(netdev, &net_stats); pm_runtime_put_sync(netdev->dev.parent); for (i = 0; i < E1000_GLOBAL_STATS_LEN; i++) { switch (e1000_gstrings_stats[i].type) { case NETDEV_STATS: p = (char *)&net_stats + e1000_gstrings_stats[i].stat_offset; break; case E1000_STATS: p = (char *)adapter + e1000_gstrings_stats[i].stat_offset; break; default: data[i] = 0; continue; } data[i] = (e1000_gstrings_stats[i].sizeof_stat == sizeof(u64)) ? *(u64 *)p : *(u32 *)p; } } static void e1000_get_strings(struct net_device __always_unused *netdev, u32 stringset, u8 *data) { u8 *p = data; int i; switch (stringset) { case ETH_SS_TEST: memcpy(data, e1000_gstrings_test, sizeof(e1000_gstrings_test)); break; case ETH_SS_STATS: for (i = 0; i < E1000_GLOBAL_STATS_LEN; i++) { memcpy(p, e1000_gstrings_stats[i].stat_string, ETH_GSTRING_LEN); p += ETH_GSTRING_LEN; } break; case ETH_SS_PRIV_FLAGS: memcpy(data, e1000e_priv_flags_strings, E1000E_PRIV_FLAGS_STR_LEN * ETH_GSTRING_LEN); break; } } static int e1000_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *info, u32 __always_unused *rule_locs) { info->data = 0; switch (info->cmd) { case ETHTOOL_GRXFH: { struct e1000_adapter *adapter = netdev_priv(netdev); struct e1000_hw *hw = &adapter->hw; u32 mrqc; pm_runtime_get_sync(netdev->dev.parent); mrqc = er32(MRQC); pm_runtime_put_sync(netdev->dev.parent); if (!(mrqc & E1000_MRQC_RSS_FIELD_MASK)) return 0; switch (info->flow_type) { case TCP_V4_FLOW: if (mrqc & E1000_MRQC_RSS_FIELD_IPV4_TCP) info->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; fallthrough; case UDP_V4_FLOW: case SCTP_V4_FLOW: case AH_ESP_V4_FLOW: case IPV4_FLOW: if (mrqc & E1000_MRQC_RSS_FIELD_IPV4) info->data |= RXH_IP_SRC | RXH_IP_DST; break; case TCP_V6_FLOW: if (mrqc & E1000_MRQC_RSS_FIELD_IPV6_TCP) info->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; fallthrough; case UDP_V6_FLOW: case SCTP_V6_FLOW: case AH_ESP_V6_FLOW: case IPV6_FLOW: if (mrqc & E1000_MRQC_RSS_FIELD_IPV6) info->data |= RXH_IP_SRC | RXH_IP_DST; break; default: break; } return 0; } default: return -EOPNOTSUPP; } } static int e1000e_get_eee(struct net_device *netdev, struct ethtool_eee *edata) { struct e1000_adapter *adapter = netdev_priv(netdev); struct e1000_hw *hw = &adapter->hw; u16 cap_addr, lpa_addr, pcs_stat_addr, phy_data; u32 ret_val; if (!(adapter->flags2 & FLAG2_HAS_EEE)) return -EOPNOTSUPP; switch (hw->phy.type) { case e1000_phy_82579: cap_addr = I82579_EEE_CAPABILITY; lpa_addr = I82579_EEE_LP_ABILITY; pcs_stat_addr = I82579_EEE_PCS_STATUS; break; case e1000_phy_i217: cap_addr = I217_EEE_CAPABILITY; lpa_addr = I217_EEE_LP_ABILITY; pcs_stat_addr = I217_EEE_PCS_STATUS; break; default: return -EOPNOTSUPP; } pm_runtime_get_sync(netdev->dev.parent); ret_val = hw->phy.ops.acquire(hw); if (ret_val) { pm_runtime_put_sync(netdev->dev.parent); return -EBUSY; } /* EEE Capability */ ret_val = e1000_read_emi_reg_locked(hw, cap_addr, &phy_data); if (ret_val) goto release; edata->supported = mmd_eee_cap_to_ethtool_sup_t(phy_data); /* EEE Advertised */ edata->advertised = mmd_eee_adv_to_ethtool_adv_t(adapter->eee_advert); /* EEE Link Partner Advertised */ ret_val = e1000_read_emi_reg_locked(hw, lpa_addr, &phy_data); if (ret_val) goto release; edata->lp_advertised = mmd_eee_adv_to_ethtool_adv_t(phy_data); /* EEE PCS Status */ ret_val = e1000_read_emi_reg_locked(hw, pcs_stat_addr, &phy_data); if (ret_val) goto release; if (hw->phy.type == e1000_phy_82579) phy_data <<= 8; /* Result of the EEE auto negotiation - there is no register that * has the status of the EEE negotiation so do a best-guess based * on whether Tx or Rx LPI indications have been received. */ if (phy_data & (E1000_EEE_TX_LPI_RCVD | E1000_EEE_RX_LPI_RCVD)) edata->eee_active = true; edata->eee_enabled = !hw->dev_spec.ich8lan.eee_disable; edata->tx_lpi_enabled = true; edata->tx_lpi_timer = er32(LPIC) >> E1000_LPIC_LPIET_SHIFT; release: hw->phy.ops.release(hw); if (ret_val) ret_val = -ENODATA; pm_runtime_put_sync(netdev->dev.parent); return ret_val; } static int e1000e_set_eee(struct net_device *netdev, struct ethtool_eee *edata) { struct e1000_adapter *adapter = netdev_priv(netdev); struct e1000_hw *hw = &adapter->hw; struct ethtool_eee eee_curr; s32 ret_val; ret_val = e1000e_get_eee(netdev, &eee_curr); if (ret_val) return ret_val; if (eee_curr.tx_lpi_enabled != edata->tx_lpi_enabled) { e_err("Setting EEE tx-lpi is not supported\n"); return -EINVAL; } if (eee_curr.tx_lpi_timer != edata->tx_lpi_timer) { e_err("Setting EEE Tx LPI timer is not supported\n"); return -EINVAL; } if (edata->advertised & ~(ADVERTISE_100_FULL | ADVERTISE_1000_FULL)) { e_err("EEE advertisement supports only 100TX and/or 1000T full-duplex\n"); return -EINVAL; } adapter->eee_advert = ethtool_adv_to_mmd_eee_adv_t(edata->advertised); hw->dev_spec.ich8lan.eee_disable = !edata->eee_enabled; pm_runtime_get_sync(netdev->dev.parent); /* reset the link */ if (netif_running(netdev)) e1000e_reinit_locked(adapter); else e1000e_reset(adapter); pm_runtime_put_sync(netdev->dev.parent); return 0; } static int e1000e_get_ts_info(struct net_device *netdev, struct ethtool_ts_info *info) { struct e1000_adapter *adapter = netdev_priv(netdev); ethtool_op_get_ts_info(netdev, info); if (!(adapter->flags & FLAG_HAS_HW_TIMESTAMP)) return 0; info->so_timestamping |= (SOF_TIMESTAMPING_TX_HARDWARE | SOF_TIMESTAMPING_RX_HARDWARE | SOF_TIMESTAMPING_RAW_HARDWARE); info->tx_types = BIT(HWTSTAMP_TX_OFF) | BIT(HWTSTAMP_TX_ON); info->rx_filters = (BIT(HWTSTAMP_FILTER_NONE) | BIT(HWTSTAMP_FILTER_PTP_V1_L4_SYNC) | BIT(HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ) | BIT(HWTSTAMP_FILTER_PTP_V2_L4_SYNC) | BIT(HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ) | BIT(HWTSTAMP_FILTER_PTP_V2_L2_SYNC) | BIT(HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ) | BIT(HWTSTAMP_FILTER_PTP_V2_EVENT) | BIT(HWTSTAMP_FILTER_PTP_V2_SYNC) | BIT(HWTSTAMP_FILTER_PTP_V2_DELAY_REQ) | BIT(HWTSTAMP_FILTER_ALL)); if (adapter->ptp_clock) info->phc_index = ptp_clock_index(adapter->ptp_clock); return 0; } static u32 e1000e_get_priv_flags(struct net_device *netdev) { struct e1000_adapter *adapter = netdev_priv(netdev); u32 priv_flags = 0; if (adapter->flags2 & FLAG2_ENABLE_S0IX_FLOWS) priv_flags |= E1000E_PRIV_FLAGS_S0IX_ENABLED; return priv_flags; } static int e1000e_set_priv_flags(struct net_device *netdev, u32 priv_flags) { struct e1000_adapter *adapter = netdev_priv(netdev); unsigned int flags2 = adapter->flags2; flags2 &= ~FLAG2_ENABLE_S0IX_FLOWS; if (priv_flags & E1000E_PRIV_FLAGS_S0IX_ENABLED) { struct e1000_hw *hw = &adapter->hw; if (hw->mac.type < e1000_pch_cnp) return -EINVAL; flags2 |= FLAG2_ENABLE_S0IX_FLOWS; } if (flags2 != adapter->flags2) adapter->flags2 = flags2; return 0; } static const struct ethtool_ops e1000_ethtool_ops = { .supported_coalesce_params = ETHTOOL_COALESCE_RX_USECS, .get_drvinfo = e1000_get_drvinfo, .get_regs_len = e1000_get_regs_len, .get_regs = e1000_get_regs, .get_wol = e1000_get_wol, .set_wol = e1000_set_wol, .get_msglevel = e1000_get_msglevel, .set_msglevel = e1000_set_msglevel, .nway_reset = e1000_nway_reset, .get_link = ethtool_op_get_link, .get_eeprom_len = e1000_get_eeprom_len, .get_eeprom = e1000_get_eeprom, .set_eeprom = e1000_set_eeprom, .get_ringparam = e1000_get_ringparam, .set_ringparam = e1000_set_ringparam, .get_pauseparam = e1000_get_pauseparam, .set_pauseparam = e1000_set_pauseparam, .self_test = e1000_diag_test, .get_strings = e1000_get_strings, .set_phys_id = e1000_set_phys_id, .get_ethtool_stats = e1000_get_ethtool_stats, .get_sset_count = e1000e_get_sset_count, .get_coalesce = e1000_get_coalesce, .set_coalesce = e1000_set_coalesce, .get_rxnfc = e1000_get_rxnfc, .get_ts_info = e1000e_get_ts_info, .get_eee = e1000e_get_eee, .set_eee = e1000e_set_eee, .get_link_ksettings = e1000_get_link_ksettings, .set_link_ksettings = e1000_set_link_ksettings, .get_priv_flags = e1000e_get_priv_flags, .set_priv_flags = e1000e_set_priv_flags, }; void e1000e_set_ethtool_ops(struct net_device *netdev) { netdev->ethtool_ops = &e1000_ethtool_ops; }
linux-master
drivers/net/ethernet/intel/e1000e/ethtool.c
// SPDX-License-Identifier: GPL-2.0 /* Copyright(c) 1999 - 2018 Intel Corporation. */ #include "e1000.h" /** * e1000_calculate_checksum - Calculate checksum for buffer * @buffer: pointer to EEPROM * @length: size of EEPROM to calculate a checksum for * * Calculates the checksum for some buffer on a specified length. The * checksum calculated is returned. **/ static u8 e1000_calculate_checksum(u8 *buffer, u32 length) { u32 i; u8 sum = 0; if (!buffer) return 0; for (i = 0; i < length; i++) sum += buffer[i]; return (u8)(0 - sum); } /** * e1000_mng_enable_host_if - Checks host interface is enabled * @hw: pointer to the HW structure * * Returns 0 upon success, else -E1000_ERR_HOST_INTERFACE_COMMAND * * This function checks whether the HOST IF is enabled for command operation * and also checks whether the previous command is completed. It busy waits * in case of previous command is not completed. **/ static s32 e1000_mng_enable_host_if(struct e1000_hw *hw) { u32 hicr; u8 i; if (!hw->mac.arc_subsystem_valid) { e_dbg("ARC subsystem not valid.\n"); return -E1000_ERR_HOST_INTERFACE_COMMAND; } /* Check that the host interface is enabled. */ hicr = er32(HICR); if (!(hicr & E1000_HICR_EN)) { e_dbg("E1000_HOST_EN bit disabled.\n"); return -E1000_ERR_HOST_INTERFACE_COMMAND; } /* check the previous command is completed */ for (i = 0; i < E1000_MNG_DHCP_COMMAND_TIMEOUT; i++) { hicr = er32(HICR); if (!(hicr & E1000_HICR_C)) break; mdelay(1); } if (i == E1000_MNG_DHCP_COMMAND_TIMEOUT) { e_dbg("Previous command timeout failed.\n"); return -E1000_ERR_HOST_INTERFACE_COMMAND; } return 0; } /** * e1000e_check_mng_mode_generic - Generic check management mode * @hw: pointer to the HW structure * * Reads the firmware semaphore register and returns true (>0) if * manageability is enabled, else false (0). **/ bool e1000e_check_mng_mode_generic(struct e1000_hw *hw) { u32 fwsm = er32(FWSM); return (fwsm & E1000_FWSM_MODE_MASK) == (E1000_MNG_IAMT_MODE << E1000_FWSM_MODE_SHIFT); } /** * e1000e_enable_tx_pkt_filtering - Enable packet filtering on Tx * @hw: pointer to the HW structure * * Enables packet filtering on transmit packets if manageability is enabled * and host interface is enabled. **/ bool e1000e_enable_tx_pkt_filtering(struct e1000_hw *hw) { struct e1000_host_mng_dhcp_cookie *hdr = &hw->mng_cookie; u32 *buffer = (u32 *)&hw->mng_cookie; u32 offset; s32 ret_val, hdr_csum, csum; u8 i, len; hw->mac.tx_pkt_filtering = true; /* No manageability, no filtering */ if (!hw->mac.ops.check_mng_mode(hw)) { hw->mac.tx_pkt_filtering = false; return hw->mac.tx_pkt_filtering; } /* If we can't read from the host interface for whatever * reason, disable filtering. */ ret_val = e1000_mng_enable_host_if(hw); if (ret_val) { hw->mac.tx_pkt_filtering = false; return hw->mac.tx_pkt_filtering; } /* Read in the header. Length and offset are in dwords. */ len = E1000_MNG_DHCP_COOKIE_LENGTH >> 2; offset = E1000_MNG_DHCP_COOKIE_OFFSET >> 2; for (i = 0; i < len; i++) *(buffer + i) = E1000_READ_REG_ARRAY(hw, E1000_HOST_IF, offset + i); hdr_csum = hdr->checksum; hdr->checksum = 0; csum = e1000_calculate_checksum((u8 *)hdr, E1000_MNG_DHCP_COOKIE_LENGTH); /* If either the checksums or signature don't match, then * the cookie area isn't considered valid, in which case we * take the safe route of assuming Tx filtering is enabled. */ if ((hdr_csum != csum) || (hdr->signature != E1000_IAMT_SIGNATURE)) { hw->mac.tx_pkt_filtering = true; return hw->mac.tx_pkt_filtering; } /* Cookie area is valid, make the final check for filtering. */ if (!(hdr->status & E1000_MNG_DHCP_COOKIE_STATUS_PARSING)) hw->mac.tx_pkt_filtering = false; return hw->mac.tx_pkt_filtering; } /** * e1000_mng_write_cmd_header - Writes manageability command header * @hw: pointer to the HW structure * @hdr: pointer to the host interface command header * * Writes the command header after does the checksum calculation. **/ static s32 e1000_mng_write_cmd_header(struct e1000_hw *hw, struct e1000_host_mng_command_header *hdr) { u16 i, length = sizeof(struct e1000_host_mng_command_header); /* Write the whole command header structure with new checksum. */ hdr->checksum = e1000_calculate_checksum((u8 *)hdr, length); length >>= 2; /* Write the relevant command block into the ram area. */ for (i = 0; i < length; i++) { E1000_WRITE_REG_ARRAY(hw, E1000_HOST_IF, i, *((u32 *)hdr + i)); e1e_flush(); } return 0; } /** * e1000_mng_host_if_write - Write to the manageability host interface * @hw: pointer to the HW structure * @buffer: pointer to the host interface buffer * @length: size of the buffer * @offset: location in the buffer to write to * @sum: sum of the data (not checksum) * * This function writes the buffer content at the offset given on the host if. * It also does alignment considerations to do the writes in most efficient * way. Also fills up the sum of the buffer in *buffer parameter. **/ static s32 e1000_mng_host_if_write(struct e1000_hw *hw, u8 *buffer, u16 length, u16 offset, u8 *sum) { u8 *tmp; u8 *bufptr = buffer; u32 data = 0; u16 remaining, i, j, prev_bytes; /* sum = only sum of the data and it is not checksum */ if (length == 0 || offset + length > E1000_HI_MAX_MNG_DATA_LENGTH) return -E1000_ERR_PARAM; tmp = (u8 *)&data; prev_bytes = offset & 0x3; offset >>= 2; if (prev_bytes) { data = E1000_READ_REG_ARRAY(hw, E1000_HOST_IF, offset); for (j = prev_bytes; j < sizeof(u32); j++) { *(tmp + j) = *bufptr++; *sum += *(tmp + j); } E1000_WRITE_REG_ARRAY(hw, E1000_HOST_IF, offset, data); length -= j - prev_bytes; offset++; } remaining = length & 0x3; length -= remaining; /* Calculate length in DWORDs */ length >>= 2; /* The device driver writes the relevant command block into the * ram area. */ for (i = 0; i < length; i++) { for (j = 0; j < sizeof(u32); j++) { *(tmp + j) = *bufptr++; *sum += *(tmp + j); } E1000_WRITE_REG_ARRAY(hw, E1000_HOST_IF, offset + i, data); } if (remaining) { for (j = 0; j < sizeof(u32); j++) { if (j < remaining) *(tmp + j) = *bufptr++; else *(tmp + j) = 0; *sum += *(tmp + j); } E1000_WRITE_REG_ARRAY(hw, E1000_HOST_IF, offset + i, data); } return 0; } /** * e1000e_mng_write_dhcp_info - Writes DHCP info to host interface * @hw: pointer to the HW structure * @buffer: pointer to the host interface * @length: size of the buffer * * Writes the DHCP information to the host interface. **/ s32 e1000e_mng_write_dhcp_info(struct e1000_hw *hw, u8 *buffer, u16 length) { struct e1000_host_mng_command_header hdr; s32 ret_val; u32 hicr; hdr.command_id = E1000_MNG_DHCP_TX_PAYLOAD_CMD; hdr.command_length = length; hdr.reserved1 = 0; hdr.reserved2 = 0; hdr.checksum = 0; /* Enable the host interface */ ret_val = e1000_mng_enable_host_if(hw); if (ret_val) return ret_val; /* Populate the host interface with the contents of "buffer". */ ret_val = e1000_mng_host_if_write(hw, buffer, length, sizeof(hdr), &(hdr.checksum)); if (ret_val) return ret_val; /* Write the manageability command header */ ret_val = e1000_mng_write_cmd_header(hw, &hdr); if (ret_val) return ret_val; /* Tell the ARC a new command is pending. */ hicr = er32(HICR); ew32(HICR, hicr | E1000_HICR_C); return 0; } /** * e1000e_enable_mng_pass_thru - Check if management passthrough is needed * @hw: pointer to the HW structure * * Verifies the hardware needs to leave interface enabled so that frames can * be directed to and from the management interface. **/ bool e1000e_enable_mng_pass_thru(struct e1000_hw *hw) { u32 manc; u32 fwsm, factps; manc = er32(MANC); if (!(manc & E1000_MANC_RCV_TCO_EN)) return false; if (hw->mac.has_fwsm) { fwsm = er32(FWSM); factps = er32(FACTPS); if (!(factps & E1000_FACTPS_MNGCG) && ((fwsm & E1000_FWSM_MODE_MASK) == (e1000_mng_mode_pt << E1000_FWSM_MODE_SHIFT))) return true; } else if ((hw->mac.type == e1000_82574) || (hw->mac.type == e1000_82583)) { u16 data; s32 ret_val; factps = er32(FACTPS); ret_val = e1000_read_nvm(hw, NVM_INIT_CONTROL2_REG, 1, &data); if (ret_val) return false; if (!(factps & E1000_FACTPS_MNGCG) && ((data & E1000_NVM_INIT_CTRL2_MNGM) == (e1000_mng_mode_pt << 13))) return true; } else if ((manc & E1000_MANC_SMBUS_EN) && !(manc & E1000_MANC_ASF_EN)) { return true; } return false; }
linux-master
drivers/net/ethernet/intel/e1000e/manage.c
// SPDX-License-Identifier: GPL-2.0 /* Copyright(c) 1999 - 2018 Intel Corporation. */ #include "e1000.h" #include <linux/ethtool.h> static s32 e1000_wait_autoneg(struct e1000_hw *hw); static s32 e1000_access_phy_wakeup_reg_bm(struct e1000_hw *hw, u32 offset, u16 *data, bool read, bool page_set); static u32 e1000_get_phy_addr_for_hv_page(u32 page); static s32 e1000_access_phy_debug_regs_hv(struct e1000_hw *hw, u32 offset, u16 *data, bool read); /* Cable length tables */ static const u16 e1000_m88_cable_length_table[] = { 0, 50, 80, 110, 140, 140, E1000_CABLE_LENGTH_UNDEFINED }; #define M88E1000_CABLE_LENGTH_TABLE_SIZE \ ARRAY_SIZE(e1000_m88_cable_length_table) static const u16 e1000_igp_2_cable_length_table[] = { 0, 0, 0, 0, 0, 0, 0, 0, 3, 5, 8, 11, 13, 16, 18, 21, 0, 0, 0, 3, 6, 10, 13, 16, 19, 23, 26, 29, 32, 35, 38, 41, 6, 10, 14, 18, 22, 26, 30, 33, 37, 41, 44, 48, 51, 54, 58, 61, 21, 26, 31, 35, 40, 44, 49, 53, 57, 61, 65, 68, 72, 75, 79, 82, 40, 45, 51, 56, 61, 66, 70, 75, 79, 83, 87, 91, 94, 98, 101, 104, 60, 66, 72, 77, 82, 87, 92, 96, 100, 104, 108, 111, 114, 117, 119, 121, 83, 89, 95, 100, 105, 109, 113, 116, 119, 122, 124, 104, 109, 114, 118, 121, 124 }; #define IGP02E1000_CABLE_LENGTH_TABLE_SIZE \ ARRAY_SIZE(e1000_igp_2_cable_length_table) /** * e1000e_check_reset_block_generic - Check if PHY reset is blocked * @hw: pointer to the HW structure * * Read the PHY management control register and check whether a PHY reset * is blocked. If a reset is not blocked return 0, otherwise * return E1000_BLK_PHY_RESET (12). **/ s32 e1000e_check_reset_block_generic(struct e1000_hw *hw) { u32 manc; manc = er32(MANC); return (manc & E1000_MANC_BLK_PHY_RST_ON_IDE) ? E1000_BLK_PHY_RESET : 0; } /** * e1000e_get_phy_id - Retrieve the PHY ID and revision * @hw: pointer to the HW structure * * Reads the PHY registers and stores the PHY ID and possibly the PHY * revision in the hardware structure. **/ s32 e1000e_get_phy_id(struct e1000_hw *hw) { struct e1000_phy_info *phy = &hw->phy; s32 ret_val = 0; u16 phy_id; u16 retry_count = 0; if (!phy->ops.read_reg) return 0; while (retry_count < 2) { ret_val = e1e_rphy(hw, MII_PHYSID1, &phy_id); if (ret_val) return ret_val; phy->id = (u32)(phy_id << 16); usleep_range(20, 40); ret_val = e1e_rphy(hw, MII_PHYSID2, &phy_id); if (ret_val) return ret_val; phy->id |= (u32)(phy_id & PHY_REVISION_MASK); phy->revision = (u32)(phy_id & ~PHY_REVISION_MASK); if (phy->id != 0 && phy->id != PHY_REVISION_MASK) return 0; retry_count++; } return 0; } /** * e1000e_phy_reset_dsp - Reset PHY DSP * @hw: pointer to the HW structure * * Reset the digital signal processor. **/ s32 e1000e_phy_reset_dsp(struct e1000_hw *hw) { s32 ret_val; ret_val = e1e_wphy(hw, M88E1000_PHY_GEN_CONTROL, 0xC1); if (ret_val) return ret_val; return e1e_wphy(hw, M88E1000_PHY_GEN_CONTROL, 0); } /** * e1000e_read_phy_reg_mdic - Read MDI control register * @hw: pointer to the HW structure * @offset: register offset to be read * @data: pointer to the read data * * Reads the MDI control register in the PHY at offset and stores the * information read to data. **/ s32 e1000e_read_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 *data) { struct e1000_phy_info *phy = &hw->phy; u32 i, mdic = 0; if (offset > MAX_PHY_REG_ADDRESS) { e_dbg("PHY Address %d is out of range\n", offset); return -E1000_ERR_PARAM; } /* Set up Op-code, Phy Address, and register offset in the MDI * Control register. The MAC will take care of interfacing with the * PHY to retrieve the desired data. */ mdic = ((offset << E1000_MDIC_REG_SHIFT) | (phy->addr << E1000_MDIC_PHY_SHIFT) | (E1000_MDIC_OP_READ)); ew32(MDIC, mdic); /* Poll the ready bit to see if the MDI read completed * Increasing the time out as testing showed failures with * the lower time out */ for (i = 0; i < (E1000_GEN_POLL_TIMEOUT * 3); i++) { udelay(50); mdic = er32(MDIC); if (mdic & E1000_MDIC_READY) break; } if (!(mdic & E1000_MDIC_READY)) { e_dbg("MDI Read PHY Reg Address %d did not complete\n", offset); return -E1000_ERR_PHY; } if (mdic & E1000_MDIC_ERROR) { e_dbg("MDI Read PHY Reg Address %d Error\n", offset); return -E1000_ERR_PHY; } if (((mdic & E1000_MDIC_REG_MASK) >> E1000_MDIC_REG_SHIFT) != offset) { e_dbg("MDI Read offset error - requested %d, returned %d\n", offset, (mdic & E1000_MDIC_REG_MASK) >> E1000_MDIC_REG_SHIFT); return -E1000_ERR_PHY; } *data = (u16)mdic; /* Allow some time after each MDIC transaction to avoid * reading duplicate data in the next MDIC transaction. */ if (hw->mac.type == e1000_pch2lan) udelay(100); return 0; } /** * e1000e_write_phy_reg_mdic - Write MDI control register * @hw: pointer to the HW structure * @offset: register offset to write to * @data: data to write to register at offset * * Writes data to MDI control register in the PHY at offset. **/ s32 e1000e_write_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 data) { struct e1000_phy_info *phy = &hw->phy; u32 i, mdic = 0; if (offset > MAX_PHY_REG_ADDRESS) { e_dbg("PHY Address %d is out of range\n", offset); return -E1000_ERR_PARAM; } /* Set up Op-code, Phy Address, and register offset in the MDI * Control register. The MAC will take care of interfacing with the * PHY to retrieve the desired data. */ mdic = (((u32)data) | (offset << E1000_MDIC_REG_SHIFT) | (phy->addr << E1000_MDIC_PHY_SHIFT) | (E1000_MDIC_OP_WRITE)); ew32(MDIC, mdic); /* Poll the ready bit to see if the MDI read completed * Increasing the time out as testing showed failures with * the lower time out */ for (i = 0; i < (E1000_GEN_POLL_TIMEOUT * 3); i++) { udelay(50); mdic = er32(MDIC); if (mdic & E1000_MDIC_READY) break; } if (!(mdic & E1000_MDIC_READY)) { e_dbg("MDI Write PHY Reg Address %d did not complete\n", offset); return -E1000_ERR_PHY; } if (mdic & E1000_MDIC_ERROR) { e_dbg("MDI Write PHY Red Address %d Error\n", offset); return -E1000_ERR_PHY; } if (((mdic & E1000_MDIC_REG_MASK) >> E1000_MDIC_REG_SHIFT) != offset) { e_dbg("MDI Write offset error - requested %d, returned %d\n", offset, (mdic & E1000_MDIC_REG_MASK) >> E1000_MDIC_REG_SHIFT); return -E1000_ERR_PHY; } /* Allow some time after each MDIC transaction to avoid * reading duplicate data in the next MDIC transaction. */ if (hw->mac.type == e1000_pch2lan) udelay(100); return 0; } /** * e1000e_read_phy_reg_m88 - Read m88 PHY register * @hw: pointer to the HW structure * @offset: register offset to be read * @data: pointer to the read data * * Acquires semaphore, if necessary, then reads the PHY register at offset * and storing the retrieved information in data. Release any acquired * semaphores before exiting. **/ s32 e1000e_read_phy_reg_m88(struct e1000_hw *hw, u32 offset, u16 *data) { s32 ret_val; ret_val = hw->phy.ops.acquire(hw); if (ret_val) return ret_val; ret_val = e1000e_read_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset, data); hw->phy.ops.release(hw); return ret_val; } /** * e1000e_write_phy_reg_m88 - Write m88 PHY register * @hw: pointer to the HW structure * @offset: register offset to write to * @data: data to write at register offset * * Acquires semaphore, if necessary, then writes the data to PHY register * at the offset. Release any acquired semaphores before exiting. **/ s32 e1000e_write_phy_reg_m88(struct e1000_hw *hw, u32 offset, u16 data) { s32 ret_val; ret_val = hw->phy.ops.acquire(hw); if (ret_val) return ret_val; ret_val = e1000e_write_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset, data); hw->phy.ops.release(hw); return ret_val; } /** * e1000_set_page_igp - Set page as on IGP-like PHY(s) * @hw: pointer to the HW structure * @page: page to set (shifted left when necessary) * * Sets PHY page required for PHY register access. Assumes semaphore is * already acquired. Note, this function sets phy.addr to 1 so the caller * must set it appropriately (if necessary) after this function returns. **/ s32 e1000_set_page_igp(struct e1000_hw *hw, u16 page) { e_dbg("Setting page 0x%x\n", page); hw->phy.addr = 1; return e1000e_write_phy_reg_mdic(hw, IGP01E1000_PHY_PAGE_SELECT, page); } /** * __e1000e_read_phy_reg_igp - Read igp PHY register * @hw: pointer to the HW structure * @offset: register offset to be read * @data: pointer to the read data * @locked: semaphore has already been acquired or not * * Acquires semaphore, if necessary, then reads the PHY register at offset * and stores the retrieved information in data. Release any acquired * semaphores before exiting. **/ static s32 __e1000e_read_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 *data, bool locked) { s32 ret_val = 0; if (!locked) { if (!hw->phy.ops.acquire) return 0; ret_val = hw->phy.ops.acquire(hw); if (ret_val) return ret_val; } if (offset > MAX_PHY_MULTI_PAGE_REG) ret_val = e1000e_write_phy_reg_mdic(hw, IGP01E1000_PHY_PAGE_SELECT, (u16)offset); if (!ret_val) ret_val = e1000e_read_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset, data); if (!locked) hw->phy.ops.release(hw); return ret_val; } /** * e1000e_read_phy_reg_igp - Read igp PHY register * @hw: pointer to the HW structure * @offset: register offset to be read * @data: pointer to the read data * * Acquires semaphore then reads the PHY register at offset and stores the * retrieved information in data. * Release the acquired semaphore before exiting. **/ s32 e1000e_read_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 *data) { return __e1000e_read_phy_reg_igp(hw, offset, data, false); } /** * e1000e_read_phy_reg_igp_locked - Read igp PHY register * @hw: pointer to the HW structure * @offset: register offset to be read * @data: pointer to the read data * * Reads the PHY register at offset and stores the retrieved information * in data. Assumes semaphore already acquired. **/ s32 e1000e_read_phy_reg_igp_locked(struct e1000_hw *hw, u32 offset, u16 *data) { return __e1000e_read_phy_reg_igp(hw, offset, data, true); } /** * __e1000e_write_phy_reg_igp - Write igp PHY register * @hw: pointer to the HW structure * @offset: register offset to write to * @data: data to write at register offset * @locked: semaphore has already been acquired or not * * Acquires semaphore, if necessary, then writes the data to PHY register * at the offset. Release any acquired semaphores before exiting. **/ static s32 __e1000e_write_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 data, bool locked) { s32 ret_val = 0; if (!locked) { if (!hw->phy.ops.acquire) return 0; ret_val = hw->phy.ops.acquire(hw); if (ret_val) return ret_val; } if (offset > MAX_PHY_MULTI_PAGE_REG) ret_val = e1000e_write_phy_reg_mdic(hw, IGP01E1000_PHY_PAGE_SELECT, (u16)offset); if (!ret_val) ret_val = e1000e_write_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset, data); if (!locked) hw->phy.ops.release(hw); return ret_val; } /** * e1000e_write_phy_reg_igp - Write igp PHY register * @hw: pointer to the HW structure * @offset: register offset to write to * @data: data to write at register offset * * Acquires semaphore then writes the data to PHY register * at the offset. Release any acquired semaphores before exiting. **/ s32 e1000e_write_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 data) { return __e1000e_write_phy_reg_igp(hw, offset, data, false); } /** * e1000e_write_phy_reg_igp_locked - Write igp PHY register * @hw: pointer to the HW structure * @offset: register offset to write to * @data: data to write at register offset * * Writes the data to PHY register at the offset. * Assumes semaphore already acquired. **/ s32 e1000e_write_phy_reg_igp_locked(struct e1000_hw *hw, u32 offset, u16 data) { return __e1000e_write_phy_reg_igp(hw, offset, data, true); } /** * __e1000_read_kmrn_reg - Read kumeran register * @hw: pointer to the HW structure * @offset: register offset to be read * @data: pointer to the read data * @locked: semaphore has already been acquired or not * * Acquires semaphore, if necessary. Then reads the PHY register at offset * using the kumeran interface. The information retrieved is stored in data. * Release any acquired semaphores before exiting. **/ static s32 __e1000_read_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 *data, bool locked) { u32 kmrnctrlsta; if (!locked) { s32 ret_val = 0; if (!hw->phy.ops.acquire) return 0; ret_val = hw->phy.ops.acquire(hw); if (ret_val) return ret_val; } kmrnctrlsta = ((offset << E1000_KMRNCTRLSTA_OFFSET_SHIFT) & E1000_KMRNCTRLSTA_OFFSET) | E1000_KMRNCTRLSTA_REN; ew32(KMRNCTRLSTA, kmrnctrlsta); e1e_flush(); udelay(2); kmrnctrlsta = er32(KMRNCTRLSTA); *data = (u16)kmrnctrlsta; if (!locked) hw->phy.ops.release(hw); return 0; } /** * e1000e_read_kmrn_reg - Read kumeran register * @hw: pointer to the HW structure * @offset: register offset to be read * @data: pointer to the read data * * Acquires semaphore then reads the PHY register at offset using the * kumeran interface. The information retrieved is stored in data. * Release the acquired semaphore before exiting. **/ s32 e1000e_read_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 *data) { return __e1000_read_kmrn_reg(hw, offset, data, false); } /** * e1000e_read_kmrn_reg_locked - Read kumeran register * @hw: pointer to the HW structure * @offset: register offset to be read * @data: pointer to the read data * * Reads the PHY register at offset using the kumeran interface. The * information retrieved is stored in data. * Assumes semaphore already acquired. **/ s32 e1000e_read_kmrn_reg_locked(struct e1000_hw *hw, u32 offset, u16 *data) { return __e1000_read_kmrn_reg(hw, offset, data, true); } /** * __e1000_write_kmrn_reg - Write kumeran register * @hw: pointer to the HW structure * @offset: register offset to write to * @data: data to write at register offset * @locked: semaphore has already been acquired or not * * Acquires semaphore, if necessary. Then write the data to PHY register * at the offset using the kumeran interface. Release any acquired semaphores * before exiting. **/ static s32 __e1000_write_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 data, bool locked) { u32 kmrnctrlsta; if (!locked) { s32 ret_val = 0; if (!hw->phy.ops.acquire) return 0; ret_val = hw->phy.ops.acquire(hw); if (ret_val) return ret_val; } kmrnctrlsta = ((offset << E1000_KMRNCTRLSTA_OFFSET_SHIFT) & E1000_KMRNCTRLSTA_OFFSET) | data; ew32(KMRNCTRLSTA, kmrnctrlsta); e1e_flush(); udelay(2); if (!locked) hw->phy.ops.release(hw); return 0; } /** * e1000e_write_kmrn_reg - Write kumeran register * @hw: pointer to the HW structure * @offset: register offset to write to * @data: data to write at register offset * * Acquires semaphore then writes the data to the PHY register at the offset * using the kumeran interface. Release the acquired semaphore before exiting. **/ s32 e1000e_write_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 data) { return __e1000_write_kmrn_reg(hw, offset, data, false); } /** * e1000e_write_kmrn_reg_locked - Write kumeran register * @hw: pointer to the HW structure * @offset: register offset to write to * @data: data to write at register offset * * Write the data to PHY register at the offset using the kumeran interface. * Assumes semaphore already acquired. **/ s32 e1000e_write_kmrn_reg_locked(struct e1000_hw *hw, u32 offset, u16 data) { return __e1000_write_kmrn_reg(hw, offset, data, true); } /** * e1000_set_master_slave_mode - Setup PHY for Master/slave mode * @hw: pointer to the HW structure * * Sets up Master/slave mode **/ static s32 e1000_set_master_slave_mode(struct e1000_hw *hw) { s32 ret_val; u16 phy_data; /* Resolve Master/Slave mode */ ret_val = e1e_rphy(hw, MII_CTRL1000, &phy_data); if (ret_val) return ret_val; /* load defaults for future use */ hw->phy.original_ms_type = (phy_data & CTL1000_ENABLE_MASTER) ? ((phy_data & CTL1000_AS_MASTER) ? e1000_ms_force_master : e1000_ms_force_slave) : e1000_ms_auto; switch (hw->phy.ms_type) { case e1000_ms_force_master: phy_data |= (CTL1000_ENABLE_MASTER | CTL1000_AS_MASTER); break; case e1000_ms_force_slave: phy_data |= CTL1000_ENABLE_MASTER; phy_data &= ~(CTL1000_AS_MASTER); break; case e1000_ms_auto: phy_data &= ~CTL1000_ENABLE_MASTER; fallthrough; default: break; } return e1e_wphy(hw, MII_CTRL1000, phy_data); } /** * e1000_copper_link_setup_82577 - Setup 82577 PHY for copper link * @hw: pointer to the HW structure * * Sets up Carrier-sense on Transmit and downshift values. **/ s32 e1000_copper_link_setup_82577(struct e1000_hw *hw) { s32 ret_val; u16 phy_data; /* Enable CRS on Tx. This must be set for half-duplex operation. */ ret_val = e1e_rphy(hw, I82577_CFG_REG, &phy_data); if (ret_val) return ret_val; phy_data |= I82577_CFG_ASSERT_CRS_ON_TX; /* Enable downshift */ phy_data |= I82577_CFG_ENABLE_DOWNSHIFT; ret_val = e1e_wphy(hw, I82577_CFG_REG, phy_data); if (ret_val) return ret_val; /* Set MDI/MDIX mode */ ret_val = e1e_rphy(hw, I82577_PHY_CTRL_2, &phy_data); if (ret_val) return ret_val; phy_data &= ~I82577_PHY_CTRL2_MDIX_CFG_MASK; /* Options: * 0 - Auto (default) * 1 - MDI mode * 2 - MDI-X mode */ switch (hw->phy.mdix) { case 1: break; case 2: phy_data |= I82577_PHY_CTRL2_MANUAL_MDIX; break; case 0: default: phy_data |= I82577_PHY_CTRL2_AUTO_MDI_MDIX; break; } ret_val = e1e_wphy(hw, I82577_PHY_CTRL_2, phy_data); if (ret_val) return ret_val; return e1000_set_master_slave_mode(hw); } /** * e1000e_copper_link_setup_m88 - Setup m88 PHY's for copper link * @hw: pointer to the HW structure * * Sets up MDI/MDI-X and polarity for m88 PHY's. If necessary, transmit clock * and downshift values are set also. **/ s32 e1000e_copper_link_setup_m88(struct e1000_hw *hw) { struct e1000_phy_info *phy = &hw->phy; s32 ret_val; u16 phy_data; /* Enable CRS on Tx. This must be set for half-duplex operation. */ ret_val = e1e_rphy(hw, M88E1000_PHY_SPEC_CTRL, &phy_data); if (ret_val) return ret_val; /* For BM PHY this bit is downshift enable */ if (phy->type != e1000_phy_bm) phy_data |= M88E1000_PSCR_ASSERT_CRS_ON_TX; /* Options: * MDI/MDI-X = 0 (default) * 0 - Auto for all speeds * 1 - MDI mode * 2 - MDI-X mode * 3 - Auto for 1000Base-T only (MDI-X for 10/100Base-T modes) */ phy_data &= ~M88E1000_PSCR_AUTO_X_MODE; switch (phy->mdix) { case 1: phy_data |= M88E1000_PSCR_MDI_MANUAL_MODE; break; case 2: phy_data |= M88E1000_PSCR_MDIX_MANUAL_MODE; break; case 3: phy_data |= M88E1000_PSCR_AUTO_X_1000T; break; case 0: default: phy_data |= M88E1000_PSCR_AUTO_X_MODE; break; } /* Options: * disable_polarity_correction = 0 (default) * Automatic Correction for Reversed Cable Polarity * 0 - Disabled * 1 - Enabled */ phy_data &= ~M88E1000_PSCR_POLARITY_REVERSAL; if (phy->disable_polarity_correction) phy_data |= M88E1000_PSCR_POLARITY_REVERSAL; /* Enable downshift on BM (disabled by default) */ if (phy->type == e1000_phy_bm) { /* For 82574/82583, first disable then enable downshift */ if (phy->id == BME1000_E_PHY_ID_R2) { phy_data &= ~BME1000_PSCR_ENABLE_DOWNSHIFT; ret_val = e1e_wphy(hw, M88E1000_PHY_SPEC_CTRL, phy_data); if (ret_val) return ret_val; /* Commit the changes. */ ret_val = phy->ops.commit(hw); if (ret_val) { e_dbg("Error committing the PHY changes\n"); return ret_val; } } phy_data |= BME1000_PSCR_ENABLE_DOWNSHIFT; } ret_val = e1e_wphy(hw, M88E1000_PHY_SPEC_CTRL, phy_data); if (ret_val) return ret_val; if ((phy->type == e1000_phy_m88) && (phy->revision < E1000_REVISION_4) && (phy->id != BME1000_E_PHY_ID_R2)) { /* Force TX_CLK in the Extended PHY Specific Control Register * to 25MHz clock. */ ret_val = e1e_rphy(hw, M88E1000_EXT_PHY_SPEC_CTRL, &phy_data); if (ret_val) return ret_val; phy_data |= M88E1000_EPSCR_TX_CLK_25; if ((phy->revision == 2) && (phy->id == M88E1111_I_PHY_ID)) { /* 82573L PHY - set the downshift counter to 5x. */ phy_data &= ~M88EC018_EPSCR_DOWNSHIFT_COUNTER_MASK; phy_data |= M88EC018_EPSCR_DOWNSHIFT_COUNTER_5X; } else { /* Configure Master and Slave downshift values */ phy_data &= ~(M88E1000_EPSCR_MASTER_DOWNSHIFT_MASK | M88E1000_EPSCR_SLAVE_DOWNSHIFT_MASK); phy_data |= (M88E1000_EPSCR_MASTER_DOWNSHIFT_1X | M88E1000_EPSCR_SLAVE_DOWNSHIFT_1X); } ret_val = e1e_wphy(hw, M88E1000_EXT_PHY_SPEC_CTRL, phy_data); if (ret_val) return ret_val; } if ((phy->type == e1000_phy_bm) && (phy->id == BME1000_E_PHY_ID_R2)) { /* Set PHY page 0, register 29 to 0x0003 */ ret_val = e1e_wphy(hw, 29, 0x0003); if (ret_val) return ret_val; /* Set PHY page 0, register 30 to 0x0000 */ ret_val = e1e_wphy(hw, 30, 0x0000); if (ret_val) return ret_val; } /* Commit the changes. */ if (phy->ops.commit) { ret_val = phy->ops.commit(hw); if (ret_val) { e_dbg("Error committing the PHY changes\n"); return ret_val; } } if (phy->type == e1000_phy_82578) { ret_val = e1e_rphy(hw, M88E1000_EXT_PHY_SPEC_CTRL, &phy_data); if (ret_val) return ret_val; /* 82578 PHY - set the downshift count to 1x. */ phy_data |= I82578_EPSCR_DOWNSHIFT_ENABLE; phy_data &= ~I82578_EPSCR_DOWNSHIFT_COUNTER_MASK; ret_val = e1e_wphy(hw, M88E1000_EXT_PHY_SPEC_CTRL, phy_data); if (ret_val) return ret_val; } return 0; } /** * e1000e_copper_link_setup_igp - Setup igp PHY's for copper link * @hw: pointer to the HW structure * * Sets up LPLU, MDI/MDI-X, polarity, Smartspeed and Master/Slave config for * igp PHY's. **/ s32 e1000e_copper_link_setup_igp(struct e1000_hw *hw) { struct e1000_phy_info *phy = &hw->phy; s32 ret_val; u16 data; ret_val = e1000_phy_hw_reset(hw); if (ret_val) { e_dbg("Error resetting the PHY.\n"); return ret_val; } /* Wait 100ms for MAC to configure PHY from NVM settings, to avoid * timeout issues when LFS is enabled. */ msleep(100); /* disable lplu d0 during driver init */ if (hw->phy.ops.set_d0_lplu_state) { ret_val = hw->phy.ops.set_d0_lplu_state(hw, false); if (ret_val) { e_dbg("Error Disabling LPLU D0\n"); return ret_val; } } /* Configure mdi-mdix settings */ ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CTRL, &data); if (ret_val) return ret_val; data &= ~IGP01E1000_PSCR_AUTO_MDIX; switch (phy->mdix) { case 1: data &= ~IGP01E1000_PSCR_FORCE_MDI_MDIX; break; case 2: data |= IGP01E1000_PSCR_FORCE_MDI_MDIX; break; case 0: default: data |= IGP01E1000_PSCR_AUTO_MDIX; break; } ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CTRL, data); if (ret_val) return ret_val; /* set auto-master slave resolution settings */ if (hw->mac.autoneg) { /* when autonegotiation advertisement is only 1000Mbps then we * should disable SmartSpeed and enable Auto MasterSlave * resolution as hardware default. */ if (phy->autoneg_advertised == ADVERTISE_1000_FULL) { /* Disable SmartSpeed */ ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CONFIG, &data); if (ret_val) return ret_val; data &= ~IGP01E1000_PSCFR_SMART_SPEED; ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CONFIG, data); if (ret_val) return ret_val; /* Set auto Master/Slave resolution process */ ret_val = e1e_rphy(hw, MII_CTRL1000, &data); if (ret_val) return ret_val; data &= ~CTL1000_ENABLE_MASTER; ret_val = e1e_wphy(hw, MII_CTRL1000, data); if (ret_val) return ret_val; } ret_val = e1000_set_master_slave_mode(hw); } return ret_val; } /** * e1000_phy_setup_autoneg - Configure PHY for auto-negotiation * @hw: pointer to the HW structure * * Reads the MII auto-neg advertisement register and/or the 1000T control * register and if the PHY is already setup for auto-negotiation, then * return successful. Otherwise, setup advertisement and flow control to * the appropriate values for the wanted auto-negotiation. **/ static s32 e1000_phy_setup_autoneg(struct e1000_hw *hw) { struct e1000_phy_info *phy = &hw->phy; s32 ret_val; u16 mii_autoneg_adv_reg; u16 mii_1000t_ctrl_reg = 0; phy->autoneg_advertised &= phy->autoneg_mask; /* Read the MII Auto-Neg Advertisement Register (Address 4). */ ret_val = e1e_rphy(hw, MII_ADVERTISE, &mii_autoneg_adv_reg); if (ret_val) return ret_val; if (phy->autoneg_mask & ADVERTISE_1000_FULL) { /* Read the MII 1000Base-T Control Register (Address 9). */ ret_val = e1e_rphy(hw, MII_CTRL1000, &mii_1000t_ctrl_reg); if (ret_val) return ret_val; } /* Need to parse both autoneg_advertised and fc and set up * the appropriate PHY registers. First we will parse for * autoneg_advertised software override. Since we can advertise * a plethora of combinations, we need to check each bit * individually. */ /* First we clear all the 10/100 mb speed bits in the Auto-Neg * Advertisement Register (Address 4) and the 1000 mb speed bits in * the 1000Base-T Control Register (Address 9). */ mii_autoneg_adv_reg &= ~(ADVERTISE_100FULL | ADVERTISE_100HALF | ADVERTISE_10FULL | ADVERTISE_10HALF); mii_1000t_ctrl_reg &= ~(ADVERTISE_1000HALF | ADVERTISE_1000FULL); e_dbg("autoneg_advertised %x\n", phy->autoneg_advertised); /* Do we want to advertise 10 Mb Half Duplex? */ if (phy->autoneg_advertised & ADVERTISE_10_HALF) { e_dbg("Advertise 10mb Half duplex\n"); mii_autoneg_adv_reg |= ADVERTISE_10HALF; } /* Do we want to advertise 10 Mb Full Duplex? */ if (phy->autoneg_advertised & ADVERTISE_10_FULL) { e_dbg("Advertise 10mb Full duplex\n"); mii_autoneg_adv_reg |= ADVERTISE_10FULL; } /* Do we want to advertise 100 Mb Half Duplex? */ if (phy->autoneg_advertised & ADVERTISE_100_HALF) { e_dbg("Advertise 100mb Half duplex\n"); mii_autoneg_adv_reg |= ADVERTISE_100HALF; } /* Do we want to advertise 100 Mb Full Duplex? */ if (phy->autoneg_advertised & ADVERTISE_100_FULL) { e_dbg("Advertise 100mb Full duplex\n"); mii_autoneg_adv_reg |= ADVERTISE_100FULL; } /* We do not allow the Phy to advertise 1000 Mb Half Duplex */ if (phy->autoneg_advertised & ADVERTISE_1000_HALF) e_dbg("Advertise 1000mb Half duplex request denied!\n"); /* Do we want to advertise 1000 Mb Full Duplex? */ if (phy->autoneg_advertised & ADVERTISE_1000_FULL) { e_dbg("Advertise 1000mb Full duplex\n"); mii_1000t_ctrl_reg |= ADVERTISE_1000FULL; } /* Check for a software override of the flow control settings, and * setup the PHY advertisement registers accordingly. If * auto-negotiation is enabled, then software will have to set the * "PAUSE" bits to the correct value in the Auto-Negotiation * Advertisement Register (MII_ADVERTISE) and re-start auto- * negotiation. * * The possible values of the "fc" parameter are: * 0: Flow control is completely disabled * 1: Rx flow control is enabled (we can receive pause frames * but not send pause frames). * 2: Tx flow control is enabled (we can send pause frames * but we do not support receiving pause frames). * 3: Both Rx and Tx flow control (symmetric) are enabled. * other: No software override. The flow control configuration * in the EEPROM is used. */ switch (hw->fc.current_mode) { case e1000_fc_none: /* Flow control (Rx & Tx) is completely disabled by a * software over-ride. */ mii_autoneg_adv_reg &= ~(ADVERTISE_PAUSE_ASYM | ADVERTISE_PAUSE_CAP); phy->autoneg_advertised &= ~(ADVERTISED_Pause | ADVERTISED_Asym_Pause); break; case e1000_fc_rx_pause: /* Rx Flow control is enabled, and Tx Flow control is * disabled, by a software over-ride. * * Since there really isn't a way to advertise that we are * capable of Rx Pause ONLY, we will advertise that we * support both symmetric and asymmetric Rx PAUSE. Later * (in e1000e_config_fc_after_link_up) we will disable the * hw's ability to send PAUSE frames. */ mii_autoneg_adv_reg |= (ADVERTISE_PAUSE_ASYM | ADVERTISE_PAUSE_CAP); phy->autoneg_advertised |= (ADVERTISED_Pause | ADVERTISED_Asym_Pause); break; case e1000_fc_tx_pause: /* Tx Flow control is enabled, and Rx Flow control is * disabled, by a software over-ride. */ mii_autoneg_adv_reg |= ADVERTISE_PAUSE_ASYM; mii_autoneg_adv_reg &= ~ADVERTISE_PAUSE_CAP; phy->autoneg_advertised |= ADVERTISED_Asym_Pause; phy->autoneg_advertised &= ~ADVERTISED_Pause; break; case e1000_fc_full: /* Flow control (both Rx and Tx) is enabled by a software * over-ride. */ mii_autoneg_adv_reg |= (ADVERTISE_PAUSE_ASYM | ADVERTISE_PAUSE_CAP); phy->autoneg_advertised |= (ADVERTISED_Pause | ADVERTISED_Asym_Pause); break; default: e_dbg("Flow control param set incorrectly\n"); return -E1000_ERR_CONFIG; } ret_val = e1e_wphy(hw, MII_ADVERTISE, mii_autoneg_adv_reg); if (ret_val) return ret_val; e_dbg("Auto-Neg Advertising %x\n", mii_autoneg_adv_reg); if (phy->autoneg_mask & ADVERTISE_1000_FULL) ret_val = e1e_wphy(hw, MII_CTRL1000, mii_1000t_ctrl_reg); return ret_val; } /** * e1000_copper_link_autoneg - Setup/Enable autoneg for copper link * @hw: pointer to the HW structure * * Performs initial bounds checking on autoneg advertisement parameter, then * configure to advertise the full capability. Setup the PHY to autoneg * and restart the negotiation process between the link partner. If * autoneg_wait_to_complete, then wait for autoneg to complete before exiting. **/ static s32 e1000_copper_link_autoneg(struct e1000_hw *hw) { struct e1000_phy_info *phy = &hw->phy; s32 ret_val; u16 phy_ctrl; /* Perform some bounds checking on the autoneg advertisement * parameter. */ phy->autoneg_advertised &= phy->autoneg_mask; /* If autoneg_advertised is zero, we assume it was not defaulted * by the calling code so we set to advertise full capability. */ if (!phy->autoneg_advertised) phy->autoneg_advertised = phy->autoneg_mask; e_dbg("Reconfiguring auto-neg advertisement params\n"); ret_val = e1000_phy_setup_autoneg(hw); if (ret_val) { e_dbg("Error Setting up Auto-Negotiation\n"); return ret_val; } e_dbg("Restarting Auto-Neg\n"); /* Restart auto-negotiation by setting the Auto Neg Enable bit and * the Auto Neg Restart bit in the PHY control register. */ ret_val = e1e_rphy(hw, MII_BMCR, &phy_ctrl); if (ret_val) return ret_val; phy_ctrl |= (BMCR_ANENABLE | BMCR_ANRESTART); ret_val = e1e_wphy(hw, MII_BMCR, phy_ctrl); if (ret_val) return ret_val; /* Does the user want to wait for Auto-Neg to complete here, or * check at a later time (for example, callback routine). */ if (phy->autoneg_wait_to_complete) { ret_val = e1000_wait_autoneg(hw); if (ret_val) { e_dbg("Error while waiting for autoneg to complete\n"); return ret_val; } } hw->mac.get_link_status = true; return ret_val; } /** * e1000e_setup_copper_link - Configure copper link settings * @hw: pointer to the HW structure * * Calls the appropriate function to configure the link for auto-neg or forced * speed and duplex. Then we check for link, once link is established calls * to configure collision distance and flow control are called. If link is * not established, we return -E1000_ERR_PHY (-2). **/ s32 e1000e_setup_copper_link(struct e1000_hw *hw) { s32 ret_val; bool link; if (hw->mac.autoneg) { /* Setup autoneg and flow control advertisement and perform * autonegotiation. */ ret_val = e1000_copper_link_autoneg(hw); if (ret_val) return ret_val; } else { /* PHY will be set to 10H, 10F, 100H or 100F * depending on user settings. */ e_dbg("Forcing Speed and Duplex\n"); ret_val = hw->phy.ops.force_speed_duplex(hw); if (ret_val) { e_dbg("Error Forcing Speed and Duplex\n"); return ret_val; } } /* Check link status. Wait up to 100 microseconds for link to become * valid. */ ret_val = e1000e_phy_has_link_generic(hw, COPPER_LINK_UP_LIMIT, 10, &link); if (ret_val) return ret_val; if (link) { e_dbg("Valid link established!!!\n"); hw->mac.ops.config_collision_dist(hw); ret_val = e1000e_config_fc_after_link_up(hw); } else { e_dbg("Unable to establish link!!!\n"); } return ret_val; } /** * e1000e_phy_force_speed_duplex_igp - Force speed/duplex for igp PHY * @hw: pointer to the HW structure * * Calls the PHY setup function to force speed and duplex. Clears the * auto-crossover to force MDI manually. Waits for link and returns * successful if link up is successful, else -E1000_ERR_PHY (-2). **/ s32 e1000e_phy_force_speed_duplex_igp(struct e1000_hw *hw) { struct e1000_phy_info *phy = &hw->phy; s32 ret_val; u16 phy_data; bool link; ret_val = e1e_rphy(hw, MII_BMCR, &phy_data); if (ret_val) return ret_val; e1000e_phy_force_speed_duplex_setup(hw, &phy_data); ret_val = e1e_wphy(hw, MII_BMCR, phy_data); if (ret_val) return ret_val; /* Clear Auto-Crossover to force MDI manually. IGP requires MDI * forced whenever speed and duplex are forced. */ ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CTRL, &phy_data); if (ret_val) return ret_val; phy_data &= ~IGP01E1000_PSCR_AUTO_MDIX; phy_data &= ~IGP01E1000_PSCR_FORCE_MDI_MDIX; ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CTRL, phy_data); if (ret_val) return ret_val; e_dbg("IGP PSCR: %X\n", phy_data); udelay(1); if (phy->autoneg_wait_to_complete) { e_dbg("Waiting for forced speed/duplex link on IGP phy.\n"); ret_val = e1000e_phy_has_link_generic(hw, PHY_FORCE_LIMIT, 100000, &link); if (ret_val) return ret_val; if (!link) e_dbg("Link taking longer than expected.\n"); /* Try once more */ ret_val = e1000e_phy_has_link_generic(hw, PHY_FORCE_LIMIT, 100000, &link); } return ret_val; } /** * e1000e_phy_force_speed_duplex_m88 - Force speed/duplex for m88 PHY * @hw: pointer to the HW structure * * Calls the PHY setup function to force speed and duplex. Clears the * auto-crossover to force MDI manually. Resets the PHY to commit the * changes. If time expires while waiting for link up, we reset the DSP. * After reset, TX_CLK and CRS on Tx must be set. Return successful upon * successful completion, else return corresponding error code. **/ s32 e1000e_phy_force_speed_duplex_m88(struct e1000_hw *hw) { struct e1000_phy_info *phy = &hw->phy; s32 ret_val; u16 phy_data; bool link; /* Clear Auto-Crossover to force MDI manually. M88E1000 requires MDI * forced whenever speed and duplex are forced. */ ret_val = e1e_rphy(hw, M88E1000_PHY_SPEC_CTRL, &phy_data); if (ret_val) return ret_val; phy_data &= ~M88E1000_PSCR_AUTO_X_MODE; ret_val = e1e_wphy(hw, M88E1000_PHY_SPEC_CTRL, phy_data); if (ret_val) return ret_val; e_dbg("M88E1000 PSCR: %X\n", phy_data); ret_val = e1e_rphy(hw, MII_BMCR, &phy_data); if (ret_val) return ret_val; e1000e_phy_force_speed_duplex_setup(hw, &phy_data); ret_val = e1e_wphy(hw, MII_BMCR, phy_data); if (ret_val) return ret_val; /* Reset the phy to commit changes. */ if (hw->phy.ops.commit) { ret_val = hw->phy.ops.commit(hw); if (ret_val) return ret_val; } if (phy->autoneg_wait_to_complete) { e_dbg("Waiting for forced speed/duplex link on M88 phy.\n"); ret_val = e1000e_phy_has_link_generic(hw, PHY_FORCE_LIMIT, 100000, &link); if (ret_val) return ret_val; if (!link) { if (hw->phy.type != e1000_phy_m88) { e_dbg("Link taking longer than expected.\n"); } else { /* We didn't get link. * Reset the DSP and cross our fingers. */ ret_val = e1e_wphy(hw, M88E1000_PHY_PAGE_SELECT, 0x001d); if (ret_val) return ret_val; ret_val = e1000e_phy_reset_dsp(hw); if (ret_val) return ret_val; } } /* Try once more */ ret_val = e1000e_phy_has_link_generic(hw, PHY_FORCE_LIMIT, 100000, &link); if (ret_val) return ret_val; } if (hw->phy.type != e1000_phy_m88) return 0; ret_val = e1e_rphy(hw, M88E1000_EXT_PHY_SPEC_CTRL, &phy_data); if (ret_val) return ret_val; /* Resetting the phy means we need to re-force TX_CLK in the * Extended PHY Specific Control Register to 25MHz clock from * the reset value of 2.5MHz. */ phy_data |= M88E1000_EPSCR_TX_CLK_25; ret_val = e1e_wphy(hw, M88E1000_EXT_PHY_SPEC_CTRL, phy_data); if (ret_val) return ret_val; /* In addition, we must re-enable CRS on Tx for both half and full * duplex. */ ret_val = e1e_rphy(hw, M88E1000_PHY_SPEC_CTRL, &phy_data); if (ret_val) return ret_val; phy_data |= M88E1000_PSCR_ASSERT_CRS_ON_TX; ret_val = e1e_wphy(hw, M88E1000_PHY_SPEC_CTRL, phy_data); return ret_val; } /** * e1000_phy_force_speed_duplex_ife - Force PHY speed & duplex * @hw: pointer to the HW structure * * Forces the speed and duplex settings of the PHY. * This is a function pointer entry point only called by * PHY setup routines. **/ s32 e1000_phy_force_speed_duplex_ife(struct e1000_hw *hw) { struct e1000_phy_info *phy = &hw->phy; s32 ret_val; u16 data; bool link; ret_val = e1e_rphy(hw, MII_BMCR, &data); if (ret_val) return ret_val; e1000e_phy_force_speed_duplex_setup(hw, &data); ret_val = e1e_wphy(hw, MII_BMCR, data); if (ret_val) return ret_val; /* Disable MDI-X support for 10/100 */ ret_val = e1e_rphy(hw, IFE_PHY_MDIX_CONTROL, &data); if (ret_val) return ret_val; data &= ~IFE_PMC_AUTO_MDIX; data &= ~IFE_PMC_FORCE_MDIX; ret_val = e1e_wphy(hw, IFE_PHY_MDIX_CONTROL, data); if (ret_val) return ret_val; e_dbg("IFE PMC: %X\n", data); udelay(1); if (phy->autoneg_wait_to_complete) { e_dbg("Waiting for forced speed/duplex link on IFE phy.\n"); ret_val = e1000e_phy_has_link_generic(hw, PHY_FORCE_LIMIT, 100000, &link); if (ret_val) return ret_val; if (!link) e_dbg("Link taking longer than expected.\n"); /* Try once more */ ret_val = e1000e_phy_has_link_generic(hw, PHY_FORCE_LIMIT, 100000, &link); if (ret_val) return ret_val; } return 0; } /** * e1000e_phy_force_speed_duplex_setup - Configure forced PHY speed/duplex * @hw: pointer to the HW structure * @phy_ctrl: pointer to current value of MII_BMCR * * Forces speed and duplex on the PHY by doing the following: disable flow * control, force speed/duplex on the MAC, disable auto speed detection, * disable auto-negotiation, configure duplex, configure speed, configure * the collision distance, write configuration to CTRL register. The * caller must write to the MII_BMCR register for these settings to * take affect. **/ void e1000e_phy_force_speed_duplex_setup(struct e1000_hw *hw, u16 *phy_ctrl) { struct e1000_mac_info *mac = &hw->mac; u32 ctrl; /* Turn off flow control when forcing speed/duplex */ hw->fc.current_mode = e1000_fc_none; /* Force speed/duplex on the mac */ ctrl = er32(CTRL); ctrl |= (E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX); ctrl &= ~E1000_CTRL_SPD_SEL; /* Disable Auto Speed Detection */ ctrl &= ~E1000_CTRL_ASDE; /* Disable autoneg on the phy */ *phy_ctrl &= ~BMCR_ANENABLE; /* Forcing Full or Half Duplex? */ if (mac->forced_speed_duplex & E1000_ALL_HALF_DUPLEX) { ctrl &= ~E1000_CTRL_FD; *phy_ctrl &= ~BMCR_FULLDPLX; e_dbg("Half Duplex\n"); } else { ctrl |= E1000_CTRL_FD; *phy_ctrl |= BMCR_FULLDPLX; e_dbg("Full Duplex\n"); } /* Forcing 10mb or 100mb? */ if (mac->forced_speed_duplex & E1000_ALL_100_SPEED) { ctrl |= E1000_CTRL_SPD_100; *phy_ctrl |= BMCR_SPEED100; *phy_ctrl &= ~BMCR_SPEED1000; e_dbg("Forcing 100mb\n"); } else { ctrl &= ~(E1000_CTRL_SPD_1000 | E1000_CTRL_SPD_100); *phy_ctrl &= ~(BMCR_SPEED1000 | BMCR_SPEED100); e_dbg("Forcing 10mb\n"); } hw->mac.ops.config_collision_dist(hw); ew32(CTRL, ctrl); } /** * e1000e_set_d3_lplu_state - Sets low power link up state for D3 * @hw: pointer to the HW structure * @active: boolean used to enable/disable lplu * * Success returns 0, Failure returns 1 * * The low power link up (lplu) state is set to the power management level D3 * and SmartSpeed is disabled when active is true, else clear lplu for D3 * and enable Smartspeed. LPLU and Smartspeed are mutually exclusive. LPLU * is used during Dx states where the power conservation is most important. * During driver activity, SmartSpeed should be enabled so performance is * maintained. **/ s32 e1000e_set_d3_lplu_state(struct e1000_hw *hw, bool active) { struct e1000_phy_info *phy = &hw->phy; s32 ret_val; u16 data; ret_val = e1e_rphy(hw, IGP02E1000_PHY_POWER_MGMT, &data); if (ret_val) return ret_val; if (!active) { data &= ~IGP02E1000_PM_D3_LPLU; ret_val = e1e_wphy(hw, IGP02E1000_PHY_POWER_MGMT, data); if (ret_val) return ret_val; /* LPLU and SmartSpeed are mutually exclusive. LPLU is used * during Dx states where the power conservation is most * important. During driver activity we should enable * SmartSpeed, so performance is maintained. */ if (phy->smart_speed == e1000_smart_speed_on) { ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CONFIG, &data); if (ret_val) return ret_val; data |= IGP01E1000_PSCFR_SMART_SPEED; ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CONFIG, data); if (ret_val) return ret_val; } else if (phy->smart_speed == e1000_smart_speed_off) { ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CONFIG, &data); if (ret_val) return ret_val; data &= ~IGP01E1000_PSCFR_SMART_SPEED; ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CONFIG, data); if (ret_val) return ret_val; } } else if ((phy->autoneg_advertised == E1000_ALL_SPEED_DUPLEX) || (phy->autoneg_advertised == E1000_ALL_NOT_GIG) || (phy->autoneg_advertised == E1000_ALL_10_SPEED)) { data |= IGP02E1000_PM_D3_LPLU; ret_val = e1e_wphy(hw, IGP02E1000_PHY_POWER_MGMT, data); if (ret_val) return ret_val; /* When LPLU is enabled, we should disable SmartSpeed */ ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CONFIG, &data); if (ret_val) return ret_val; data &= ~IGP01E1000_PSCFR_SMART_SPEED; ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CONFIG, data); } return ret_val; } /** * e1000e_check_downshift - Checks whether a downshift in speed occurred * @hw: pointer to the HW structure * * Success returns 0, Failure returns 1 * * A downshift is detected by querying the PHY link health. **/ s32 e1000e_check_downshift(struct e1000_hw *hw) { struct e1000_phy_info *phy = &hw->phy; s32 ret_val; u16 phy_data, offset, mask; switch (phy->type) { case e1000_phy_m88: case e1000_phy_gg82563: case e1000_phy_bm: case e1000_phy_82578: offset = M88E1000_PHY_SPEC_STATUS; mask = M88E1000_PSSR_DOWNSHIFT; break; case e1000_phy_igp_2: case e1000_phy_igp_3: offset = IGP01E1000_PHY_LINK_HEALTH; mask = IGP01E1000_PLHR_SS_DOWNGRADE; break; default: /* speed downshift not supported */ phy->speed_downgraded = false; return 0; } ret_val = e1e_rphy(hw, offset, &phy_data); if (!ret_val) phy->speed_downgraded = !!(phy_data & mask); return ret_val; } /** * e1000_check_polarity_m88 - Checks the polarity. * @hw: pointer to the HW structure * * Success returns 0, Failure returns -E1000_ERR_PHY (-2) * * Polarity is determined based on the PHY specific status register. **/ s32 e1000_check_polarity_m88(struct e1000_hw *hw) { struct e1000_phy_info *phy = &hw->phy; s32 ret_val; u16 data; ret_val = e1e_rphy(hw, M88E1000_PHY_SPEC_STATUS, &data); if (!ret_val) phy->cable_polarity = ((data & M88E1000_PSSR_REV_POLARITY) ? e1000_rev_polarity_reversed : e1000_rev_polarity_normal); return ret_val; } /** * e1000_check_polarity_igp - Checks the polarity. * @hw: pointer to the HW structure * * Success returns 0, Failure returns -E1000_ERR_PHY (-2) * * Polarity is determined based on the PHY port status register, and the * current speed (since there is no polarity at 100Mbps). **/ s32 e1000_check_polarity_igp(struct e1000_hw *hw) { struct e1000_phy_info *phy = &hw->phy; s32 ret_val; u16 data, offset, mask; /* Polarity is determined based on the speed of * our connection. */ ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_STATUS, &data); if (ret_val) return ret_val; if ((data & IGP01E1000_PSSR_SPEED_MASK) == IGP01E1000_PSSR_SPEED_1000MBPS) { offset = IGP01E1000_PHY_PCS_INIT_REG; mask = IGP01E1000_PHY_POLARITY_MASK; } else { /* This really only applies to 10Mbps since * there is no polarity for 100Mbps (always 0). */ offset = IGP01E1000_PHY_PORT_STATUS; mask = IGP01E1000_PSSR_POLARITY_REVERSED; } ret_val = e1e_rphy(hw, offset, &data); if (!ret_val) phy->cable_polarity = ((data & mask) ? e1000_rev_polarity_reversed : e1000_rev_polarity_normal); return ret_val; } /** * e1000_check_polarity_ife - Check cable polarity for IFE PHY * @hw: pointer to the HW structure * * Polarity is determined on the polarity reversal feature being enabled. **/ s32 e1000_check_polarity_ife(struct e1000_hw *hw) { struct e1000_phy_info *phy = &hw->phy; s32 ret_val; u16 phy_data, offset, mask; /* Polarity is determined based on the reversal feature being enabled. */ if (phy->polarity_correction) { offset = IFE_PHY_EXTENDED_STATUS_CONTROL; mask = IFE_PESC_POLARITY_REVERSED; } else { offset = IFE_PHY_SPECIAL_CONTROL; mask = IFE_PSC_FORCE_POLARITY; } ret_val = e1e_rphy(hw, offset, &phy_data); if (!ret_val) phy->cable_polarity = ((phy_data & mask) ? e1000_rev_polarity_reversed : e1000_rev_polarity_normal); return ret_val; } /** * e1000_wait_autoneg - Wait for auto-neg completion * @hw: pointer to the HW structure * * Waits for auto-negotiation to complete or for the auto-negotiation time * limit to expire, which ever happens first. **/ static s32 e1000_wait_autoneg(struct e1000_hw *hw) { s32 ret_val = 0; u16 i, phy_status; /* Break after autoneg completes or PHY_AUTO_NEG_LIMIT expires. */ for (i = PHY_AUTO_NEG_LIMIT; i > 0; i--) { ret_val = e1e_rphy(hw, MII_BMSR, &phy_status); if (ret_val) break; ret_val = e1e_rphy(hw, MII_BMSR, &phy_status); if (ret_val) break; if (phy_status & BMSR_ANEGCOMPLETE) break; msleep(100); } /* PHY_AUTO_NEG_TIME expiration doesn't guarantee auto-negotiation * has completed. */ return ret_val; } /** * e1000e_phy_has_link_generic - Polls PHY for link * @hw: pointer to the HW structure * @iterations: number of times to poll for link * @usec_interval: delay between polling attempts * @success: pointer to whether polling was successful or not * * Polls the PHY status register for link, 'iterations' number of times. **/ s32 e1000e_phy_has_link_generic(struct e1000_hw *hw, u32 iterations, u32 usec_interval, bool *success) { s32 ret_val = 0; u16 i, phy_status; *success = false; for (i = 0; i < iterations; i++) { /* Some PHYs require the MII_BMSR register to be read * twice due to the link bit being sticky. No harm doing * it across the board. */ ret_val = e1e_rphy(hw, MII_BMSR, &phy_status); if (ret_val) { /* If the first read fails, another entity may have * ownership of the resources, wait and try again to * see if they have relinquished the resources yet. */ if (usec_interval >= 1000) msleep(usec_interval / 1000); else udelay(usec_interval); } ret_val = e1e_rphy(hw, MII_BMSR, &phy_status); if (ret_val) break; if (phy_status & BMSR_LSTATUS) { *success = true; break; } if (usec_interval >= 1000) msleep(usec_interval / 1000); else udelay(usec_interval); } return ret_val; } /** * e1000e_get_cable_length_m88 - Determine cable length for m88 PHY * @hw: pointer to the HW structure * * Reads the PHY specific status register to retrieve the cable length * information. The cable length is determined by averaging the minimum and * maximum values to get the "average" cable length. The m88 PHY has four * possible cable length values, which are: * Register Value Cable Length * 0 < 50 meters * 1 50 - 80 meters * 2 80 - 110 meters * 3 110 - 140 meters * 4 > 140 meters **/ s32 e1000e_get_cable_length_m88(struct e1000_hw *hw) { struct e1000_phy_info *phy = &hw->phy; s32 ret_val; u16 phy_data, index; ret_val = e1e_rphy(hw, M88E1000_PHY_SPEC_STATUS, &phy_data); if (ret_val) return ret_val; index = ((phy_data & M88E1000_PSSR_CABLE_LENGTH) >> M88E1000_PSSR_CABLE_LENGTH_SHIFT); if (index >= M88E1000_CABLE_LENGTH_TABLE_SIZE - 1) return -E1000_ERR_PHY; phy->min_cable_length = e1000_m88_cable_length_table[index]; phy->max_cable_length = e1000_m88_cable_length_table[index + 1]; phy->cable_length = (phy->min_cable_length + phy->max_cable_length) / 2; return 0; } /** * e1000e_get_cable_length_igp_2 - Determine cable length for igp2 PHY * @hw: pointer to the HW structure * * The automatic gain control (agc) normalizes the amplitude of the * received signal, adjusting for the attenuation produced by the * cable. By reading the AGC registers, which represent the * combination of coarse and fine gain value, the value can be put * into a lookup table to obtain the approximate cable length * for each channel. **/ s32 e1000e_get_cable_length_igp_2(struct e1000_hw *hw) { struct e1000_phy_info *phy = &hw->phy; s32 ret_val; u16 phy_data, i, agc_value = 0; u16 cur_agc_index, max_agc_index = 0; u16 min_agc_index = IGP02E1000_CABLE_LENGTH_TABLE_SIZE - 1; static const u16 agc_reg_array[IGP02E1000_PHY_CHANNEL_NUM] = { IGP02E1000_PHY_AGC_A, IGP02E1000_PHY_AGC_B, IGP02E1000_PHY_AGC_C, IGP02E1000_PHY_AGC_D }; /* Read the AGC registers for all channels */ for (i = 0; i < IGP02E1000_PHY_CHANNEL_NUM; i++) { ret_val = e1e_rphy(hw, agc_reg_array[i], &phy_data); if (ret_val) return ret_val; /* Getting bits 15:9, which represent the combination of * coarse and fine gain values. The result is a number * that can be put into the lookup table to obtain the * approximate cable length. */ cur_agc_index = ((phy_data >> IGP02E1000_AGC_LENGTH_SHIFT) & IGP02E1000_AGC_LENGTH_MASK); /* Array index bound check. */ if ((cur_agc_index >= IGP02E1000_CABLE_LENGTH_TABLE_SIZE) || (cur_agc_index == 0)) return -E1000_ERR_PHY; /* Remove min & max AGC values from calculation. */ if (e1000_igp_2_cable_length_table[min_agc_index] > e1000_igp_2_cable_length_table[cur_agc_index]) min_agc_index = cur_agc_index; if (e1000_igp_2_cable_length_table[max_agc_index] < e1000_igp_2_cable_length_table[cur_agc_index]) max_agc_index = cur_agc_index; agc_value += e1000_igp_2_cable_length_table[cur_agc_index]; } agc_value -= (e1000_igp_2_cable_length_table[min_agc_index] + e1000_igp_2_cable_length_table[max_agc_index]); agc_value /= (IGP02E1000_PHY_CHANNEL_NUM - 2); /* Calculate cable length with the error range of +/- 10 meters. */ phy->min_cable_length = (((agc_value - IGP02E1000_AGC_RANGE) > 0) ? (agc_value - IGP02E1000_AGC_RANGE) : 0); phy->max_cable_length = agc_value + IGP02E1000_AGC_RANGE; phy->cable_length = (phy->min_cable_length + phy->max_cable_length) / 2; return 0; } /** * e1000e_get_phy_info_m88 - Retrieve PHY information * @hw: pointer to the HW structure * * Valid for only copper links. Read the PHY status register (sticky read) * to verify that link is up. Read the PHY special control register to * determine the polarity and 10base-T extended distance. Read the PHY * special status register to determine MDI/MDIx and current speed. If * speed is 1000, then determine cable length, local and remote receiver. **/ s32 e1000e_get_phy_info_m88(struct e1000_hw *hw) { struct e1000_phy_info *phy = &hw->phy; s32 ret_val; u16 phy_data; bool link; if (phy->media_type != e1000_media_type_copper) { e_dbg("Phy info is only valid for copper media\n"); return -E1000_ERR_CONFIG; } ret_val = e1000e_phy_has_link_generic(hw, 1, 0, &link); if (ret_val) return ret_val; if (!link) { e_dbg("Phy info is only valid if link is up\n"); return -E1000_ERR_CONFIG; } ret_val = e1e_rphy(hw, M88E1000_PHY_SPEC_CTRL, &phy_data); if (ret_val) return ret_val; phy->polarity_correction = !!(phy_data & M88E1000_PSCR_POLARITY_REVERSAL); ret_val = e1000_check_polarity_m88(hw); if (ret_val) return ret_val; ret_val = e1e_rphy(hw, M88E1000_PHY_SPEC_STATUS, &phy_data); if (ret_val) return ret_val; phy->is_mdix = !!(phy_data & M88E1000_PSSR_MDIX); if ((phy_data & M88E1000_PSSR_SPEED) == M88E1000_PSSR_1000MBS) { ret_val = hw->phy.ops.get_cable_length(hw); if (ret_val) return ret_val; ret_val = e1e_rphy(hw, MII_STAT1000, &phy_data); if (ret_val) return ret_val; phy->local_rx = (phy_data & LPA_1000LOCALRXOK) ? e1000_1000t_rx_status_ok : e1000_1000t_rx_status_not_ok; phy->remote_rx = (phy_data & LPA_1000REMRXOK) ? e1000_1000t_rx_status_ok : e1000_1000t_rx_status_not_ok; } else { /* Set values to "undefined" */ phy->cable_length = E1000_CABLE_LENGTH_UNDEFINED; phy->local_rx = e1000_1000t_rx_status_undefined; phy->remote_rx = e1000_1000t_rx_status_undefined; } return ret_val; } /** * e1000e_get_phy_info_igp - Retrieve igp PHY information * @hw: pointer to the HW structure * * Read PHY status to determine if link is up. If link is up, then * set/determine 10base-T extended distance and polarity correction. Read * PHY port status to determine MDI/MDIx and speed. Based on the speed, * determine on the cable length, local and remote receiver. **/ s32 e1000e_get_phy_info_igp(struct e1000_hw *hw) { struct e1000_phy_info *phy = &hw->phy; s32 ret_val; u16 data; bool link; ret_val = e1000e_phy_has_link_generic(hw, 1, 0, &link); if (ret_val) return ret_val; if (!link) { e_dbg("Phy info is only valid if link is up\n"); return -E1000_ERR_CONFIG; } phy->polarity_correction = true; ret_val = e1000_check_polarity_igp(hw); if (ret_val) return ret_val; ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_STATUS, &data); if (ret_val) return ret_val; phy->is_mdix = !!(data & IGP01E1000_PSSR_MDIX); if ((data & IGP01E1000_PSSR_SPEED_MASK) == IGP01E1000_PSSR_SPEED_1000MBPS) { ret_val = phy->ops.get_cable_length(hw); if (ret_val) return ret_val; ret_val = e1e_rphy(hw, MII_STAT1000, &data); if (ret_val) return ret_val; phy->local_rx = (data & LPA_1000LOCALRXOK) ? e1000_1000t_rx_status_ok : e1000_1000t_rx_status_not_ok; phy->remote_rx = (data & LPA_1000REMRXOK) ? e1000_1000t_rx_status_ok : e1000_1000t_rx_status_not_ok; } else { phy->cable_length = E1000_CABLE_LENGTH_UNDEFINED; phy->local_rx = e1000_1000t_rx_status_undefined; phy->remote_rx = e1000_1000t_rx_status_undefined; } return ret_val; } /** * e1000_get_phy_info_ife - Retrieves various IFE PHY states * @hw: pointer to the HW structure * * Populates "phy" structure with various feature states. **/ s32 e1000_get_phy_info_ife(struct e1000_hw *hw) { struct e1000_phy_info *phy = &hw->phy; s32 ret_val; u16 data; bool link; ret_val = e1000e_phy_has_link_generic(hw, 1, 0, &link); if (ret_val) return ret_val; if (!link) { e_dbg("Phy info is only valid if link is up\n"); return -E1000_ERR_CONFIG; } ret_val = e1e_rphy(hw, IFE_PHY_SPECIAL_CONTROL, &data); if (ret_val) return ret_val; phy->polarity_correction = !(data & IFE_PSC_AUTO_POLARITY_DISABLE); if (phy->polarity_correction) { ret_val = e1000_check_polarity_ife(hw); if (ret_val) return ret_val; } else { /* Polarity is forced */ phy->cable_polarity = ((data & IFE_PSC_FORCE_POLARITY) ? e1000_rev_polarity_reversed : e1000_rev_polarity_normal); } ret_val = e1e_rphy(hw, IFE_PHY_MDIX_CONTROL, &data); if (ret_val) return ret_val; phy->is_mdix = !!(data & IFE_PMC_MDIX_STATUS); /* The following parameters are undefined for 10/100 operation. */ phy->cable_length = E1000_CABLE_LENGTH_UNDEFINED; phy->local_rx = e1000_1000t_rx_status_undefined; phy->remote_rx = e1000_1000t_rx_status_undefined; return 0; } /** * e1000e_phy_sw_reset - PHY software reset * @hw: pointer to the HW structure * * Does a software reset of the PHY by reading the PHY control register and * setting/write the control register reset bit to the PHY. **/ s32 e1000e_phy_sw_reset(struct e1000_hw *hw) { s32 ret_val; u16 phy_ctrl; ret_val = e1e_rphy(hw, MII_BMCR, &phy_ctrl); if (ret_val) return ret_val; phy_ctrl |= BMCR_RESET; ret_val = e1e_wphy(hw, MII_BMCR, phy_ctrl); if (ret_val) return ret_val; udelay(1); return ret_val; } /** * e1000e_phy_hw_reset_generic - PHY hardware reset * @hw: pointer to the HW structure * * Verify the reset block is not blocking us from resetting. Acquire * semaphore (if necessary) and read/set/write the device control reset * bit in the PHY. Wait the appropriate delay time for the device to * reset and release the semaphore (if necessary). **/ s32 e1000e_phy_hw_reset_generic(struct e1000_hw *hw) { struct e1000_phy_info *phy = &hw->phy; s32 ret_val; u32 ctrl; if (phy->ops.check_reset_block) { ret_val = phy->ops.check_reset_block(hw); if (ret_val) return 0; } ret_val = phy->ops.acquire(hw); if (ret_val) return ret_val; ctrl = er32(CTRL); ew32(CTRL, ctrl | E1000_CTRL_PHY_RST); e1e_flush(); udelay(phy->reset_delay_us); ew32(CTRL, ctrl); e1e_flush(); usleep_range(150, 300); phy->ops.release(hw); return phy->ops.get_cfg_done(hw); } /** * e1000e_get_cfg_done_generic - Generic configuration done * @hw: pointer to the HW structure * * Generic function to wait 10 milli-seconds for configuration to complete * and return success. **/ s32 e1000e_get_cfg_done_generic(struct e1000_hw __always_unused *hw) { mdelay(10); return 0; } /** * e1000e_phy_init_script_igp3 - Inits the IGP3 PHY * @hw: pointer to the HW structure * * Initializes a Intel Gigabit PHY3 when an EEPROM is not present. **/ s32 e1000e_phy_init_script_igp3(struct e1000_hw *hw) { e_dbg("Running IGP 3 PHY init script\n"); /* PHY init IGP 3 */ /* Enable rise/fall, 10-mode work in class-A */ e1e_wphy(hw, 0x2F5B, 0x9018); /* Remove all caps from Replica path filter */ e1e_wphy(hw, 0x2F52, 0x0000); /* Bias trimming for ADC, AFE and Driver (Default) */ e1e_wphy(hw, 0x2FB1, 0x8B24); /* Increase Hybrid poly bias */ e1e_wphy(hw, 0x2FB2, 0xF8F0); /* Add 4% to Tx amplitude in Gig mode */ e1e_wphy(hw, 0x2010, 0x10B0); /* Disable trimming (TTT) */ e1e_wphy(hw, 0x2011, 0x0000); /* Poly DC correction to 94.6% + 2% for all channels */ e1e_wphy(hw, 0x20DD, 0x249A); /* ABS DC correction to 95.9% */ e1e_wphy(hw, 0x20DE, 0x00D3); /* BG temp curve trim */ e1e_wphy(hw, 0x28B4, 0x04CE); /* Increasing ADC OPAMP stage 1 currents to max */ e1e_wphy(hw, 0x2F70, 0x29E4); /* Force 1000 ( required for enabling PHY regs configuration) */ e1e_wphy(hw, 0x0000, 0x0140); /* Set upd_freq to 6 */ e1e_wphy(hw, 0x1F30, 0x1606); /* Disable NPDFE */ e1e_wphy(hw, 0x1F31, 0xB814); /* Disable adaptive fixed FFE (Default) */ e1e_wphy(hw, 0x1F35, 0x002A); /* Enable FFE hysteresis */ e1e_wphy(hw, 0x1F3E, 0x0067); /* Fixed FFE for short cable lengths */ e1e_wphy(hw, 0x1F54, 0x0065); /* Fixed FFE for medium cable lengths */ e1e_wphy(hw, 0x1F55, 0x002A); /* Fixed FFE for long cable lengths */ e1e_wphy(hw, 0x1F56, 0x002A); /* Enable Adaptive Clip Threshold */ e1e_wphy(hw, 0x1F72, 0x3FB0); /* AHT reset limit to 1 */ e1e_wphy(hw, 0x1F76, 0xC0FF); /* Set AHT master delay to 127 msec */ e1e_wphy(hw, 0x1F77, 0x1DEC); /* Set scan bits for AHT */ e1e_wphy(hw, 0x1F78, 0xF9EF); /* Set AHT Preset bits */ e1e_wphy(hw, 0x1F79, 0x0210); /* Change integ_factor of channel A to 3 */ e1e_wphy(hw, 0x1895, 0x0003); /* Change prop_factor of channels BCD to 8 */ e1e_wphy(hw, 0x1796, 0x0008); /* Change cg_icount + enable integbp for channels BCD */ e1e_wphy(hw, 0x1798, 0xD008); /* Change cg_icount + enable integbp + change prop_factor_master * to 8 for channel A */ e1e_wphy(hw, 0x1898, 0xD918); /* Disable AHT in Slave mode on channel A */ e1e_wphy(hw, 0x187A, 0x0800); /* Enable LPLU and disable AN to 1000 in non-D0a states, * Enable SPD+B2B */ e1e_wphy(hw, 0x0019, 0x008D); /* Enable restart AN on an1000_dis change */ e1e_wphy(hw, 0x001B, 0x2080); /* Enable wh_fifo read clock in 10/100 modes */ e1e_wphy(hw, 0x0014, 0x0045); /* Restart AN, Speed selection is 1000 */ e1e_wphy(hw, 0x0000, 0x1340); return 0; } /** * e1000e_get_phy_type_from_id - Get PHY type from id * @phy_id: phy_id read from the phy * * Returns the phy type from the id. **/ enum e1000_phy_type e1000e_get_phy_type_from_id(u32 phy_id) { enum e1000_phy_type phy_type = e1000_phy_unknown; switch (phy_id) { case M88E1000_I_PHY_ID: case M88E1000_E_PHY_ID: case M88E1111_I_PHY_ID: case M88E1011_I_PHY_ID: phy_type = e1000_phy_m88; break; case IGP01E1000_I_PHY_ID: /* IGP 1 & 2 share this */ phy_type = e1000_phy_igp_2; break; case GG82563_E_PHY_ID: phy_type = e1000_phy_gg82563; break; case IGP03E1000_E_PHY_ID: phy_type = e1000_phy_igp_3; break; case IFE_E_PHY_ID: case IFE_PLUS_E_PHY_ID: case IFE_C_E_PHY_ID: phy_type = e1000_phy_ife; break; case BME1000_E_PHY_ID: case BME1000_E_PHY_ID_R2: phy_type = e1000_phy_bm; break; case I82578_E_PHY_ID: phy_type = e1000_phy_82578; break; case I82577_E_PHY_ID: phy_type = e1000_phy_82577; break; case I82579_E_PHY_ID: phy_type = e1000_phy_82579; break; case I217_E_PHY_ID: phy_type = e1000_phy_i217; break; default: phy_type = e1000_phy_unknown; break; } return phy_type; } /** * e1000e_determine_phy_address - Determines PHY address. * @hw: pointer to the HW structure * * This uses a trial and error method to loop through possible PHY * addresses. It tests each by reading the PHY ID registers and * checking for a match. **/ s32 e1000e_determine_phy_address(struct e1000_hw *hw) { u32 phy_addr = 0; u32 i; enum e1000_phy_type phy_type = e1000_phy_unknown; hw->phy.id = phy_type; for (phy_addr = 0; phy_addr < E1000_MAX_PHY_ADDR; phy_addr++) { hw->phy.addr = phy_addr; i = 0; do { e1000e_get_phy_id(hw); phy_type = e1000e_get_phy_type_from_id(hw->phy.id); /* If phy_type is valid, break - we found our * PHY address */ if (phy_type != e1000_phy_unknown) return 0; usleep_range(1000, 2000); i++; } while (i < 10); } return -E1000_ERR_PHY_TYPE; } /** * e1000_get_phy_addr_for_bm_page - Retrieve PHY page address * @page: page to access * @reg: register to check * * Returns the phy address for the page requested. **/ static u32 e1000_get_phy_addr_for_bm_page(u32 page, u32 reg) { u32 phy_addr = 2; if ((page >= 768) || (page == 0 && reg == 25) || (reg == 31)) phy_addr = 1; return phy_addr; } /** * e1000e_write_phy_reg_bm - Write BM PHY register * @hw: pointer to the HW structure * @offset: register offset to write to * @data: data to write at register offset * * Acquires semaphore, if necessary, then writes the data to PHY register * at the offset. Release any acquired semaphores before exiting. **/ s32 e1000e_write_phy_reg_bm(struct e1000_hw *hw, u32 offset, u16 data) { s32 ret_val; u32 page = offset >> IGP_PAGE_SHIFT; ret_val = hw->phy.ops.acquire(hw); if (ret_val) return ret_val; /* Page 800 works differently than the rest so it has its own func */ if (page == BM_WUC_PAGE) { ret_val = e1000_access_phy_wakeup_reg_bm(hw, offset, &data, false, false); goto release; } hw->phy.addr = e1000_get_phy_addr_for_bm_page(page, offset); if (offset > MAX_PHY_MULTI_PAGE_REG) { u32 page_shift, page_select; /* Page select is register 31 for phy address 1 and 22 for * phy address 2 and 3. Page select is shifted only for * phy address 1. */ if (hw->phy.addr == 1) { page_shift = IGP_PAGE_SHIFT; page_select = IGP01E1000_PHY_PAGE_SELECT; } else { page_shift = 0; page_select = BM_PHY_PAGE_SELECT; } /* Page is shifted left, PHY expects (page x 32) */ ret_val = e1000e_write_phy_reg_mdic(hw, page_select, (page << page_shift)); if (ret_val) goto release; } ret_val = e1000e_write_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset, data); release: hw->phy.ops.release(hw); return ret_val; } /** * e1000e_read_phy_reg_bm - Read BM PHY register * @hw: pointer to the HW structure * @offset: register offset to be read * @data: pointer to the read data * * Acquires semaphore, if necessary, then reads the PHY register at offset * and storing the retrieved information in data. Release any acquired * semaphores before exiting. **/ s32 e1000e_read_phy_reg_bm(struct e1000_hw *hw, u32 offset, u16 *data) { s32 ret_val; u32 page = offset >> IGP_PAGE_SHIFT; ret_val = hw->phy.ops.acquire(hw); if (ret_val) return ret_val; /* Page 800 works differently than the rest so it has its own func */ if (page == BM_WUC_PAGE) { ret_val = e1000_access_phy_wakeup_reg_bm(hw, offset, data, true, false); goto release; } hw->phy.addr = e1000_get_phy_addr_for_bm_page(page, offset); if (offset > MAX_PHY_MULTI_PAGE_REG) { u32 page_shift, page_select; /* Page select is register 31 for phy address 1 and 22 for * phy address 2 and 3. Page select is shifted only for * phy address 1. */ if (hw->phy.addr == 1) { page_shift = IGP_PAGE_SHIFT; page_select = IGP01E1000_PHY_PAGE_SELECT; } else { page_shift = 0; page_select = BM_PHY_PAGE_SELECT; } /* Page is shifted left, PHY expects (page x 32) */ ret_val = e1000e_write_phy_reg_mdic(hw, page_select, (page << page_shift)); if (ret_val) goto release; } ret_val = e1000e_read_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset, data); release: hw->phy.ops.release(hw); return ret_val; } /** * e1000e_read_phy_reg_bm2 - Read BM PHY register * @hw: pointer to the HW structure * @offset: register offset to be read * @data: pointer to the read data * * Acquires semaphore, if necessary, then reads the PHY register at offset * and storing the retrieved information in data. Release any acquired * semaphores before exiting. **/ s32 e1000e_read_phy_reg_bm2(struct e1000_hw *hw, u32 offset, u16 *data) { s32 ret_val; u16 page = (u16)(offset >> IGP_PAGE_SHIFT); ret_val = hw->phy.ops.acquire(hw); if (ret_val) return ret_val; /* Page 800 works differently than the rest so it has its own func */ if (page == BM_WUC_PAGE) { ret_val = e1000_access_phy_wakeup_reg_bm(hw, offset, data, true, false); goto release; } hw->phy.addr = 1; if (offset > MAX_PHY_MULTI_PAGE_REG) { /* Page is shifted left, PHY expects (page x 32) */ ret_val = e1000e_write_phy_reg_mdic(hw, BM_PHY_PAGE_SELECT, page); if (ret_val) goto release; } ret_val = e1000e_read_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset, data); release: hw->phy.ops.release(hw); return ret_val; } /** * e1000e_write_phy_reg_bm2 - Write BM PHY register * @hw: pointer to the HW structure * @offset: register offset to write to * @data: data to write at register offset * * Acquires semaphore, if necessary, then writes the data to PHY register * at the offset. Release any acquired semaphores before exiting. **/ s32 e1000e_write_phy_reg_bm2(struct e1000_hw *hw, u32 offset, u16 data) { s32 ret_val; u16 page = (u16)(offset >> IGP_PAGE_SHIFT); ret_val = hw->phy.ops.acquire(hw); if (ret_val) return ret_val; /* Page 800 works differently than the rest so it has its own func */ if (page == BM_WUC_PAGE) { ret_val = e1000_access_phy_wakeup_reg_bm(hw, offset, &data, false, false); goto release; } hw->phy.addr = 1; if (offset > MAX_PHY_MULTI_PAGE_REG) { /* Page is shifted left, PHY expects (page x 32) */ ret_val = e1000e_write_phy_reg_mdic(hw, BM_PHY_PAGE_SELECT, page); if (ret_val) goto release; } ret_val = e1000e_write_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset, data); release: hw->phy.ops.release(hw); return ret_val; } /** * e1000_enable_phy_wakeup_reg_access_bm - enable access to BM wakeup registers * @hw: pointer to the HW structure * @phy_reg: pointer to store original contents of BM_WUC_ENABLE_REG * * Assumes semaphore already acquired and phy_reg points to a valid memory * address to store contents of the BM_WUC_ENABLE_REG register. **/ s32 e1000_enable_phy_wakeup_reg_access_bm(struct e1000_hw *hw, u16 *phy_reg) { s32 ret_val; u16 temp; /* All page select, port ctrl and wakeup registers use phy address 1 */ hw->phy.addr = 1; /* Select Port Control Registers page */ ret_val = e1000_set_page_igp(hw, (BM_PORT_CTRL_PAGE << IGP_PAGE_SHIFT)); if (ret_val) { e_dbg("Could not set Port Control page\n"); return ret_val; } ret_val = e1000e_read_phy_reg_mdic(hw, BM_WUC_ENABLE_REG, phy_reg); if (ret_val) { e_dbg("Could not read PHY register %d.%d\n", BM_PORT_CTRL_PAGE, BM_WUC_ENABLE_REG); return ret_val; } /* Enable both PHY wakeup mode and Wakeup register page writes. * Prevent a power state change by disabling ME and Host PHY wakeup. */ temp = *phy_reg; temp |= BM_WUC_ENABLE_BIT; temp &= ~(BM_WUC_ME_WU_BIT | BM_WUC_HOST_WU_BIT); ret_val = e1000e_write_phy_reg_mdic(hw, BM_WUC_ENABLE_REG, temp); if (ret_val) { e_dbg("Could not write PHY register %d.%d\n", BM_PORT_CTRL_PAGE, BM_WUC_ENABLE_REG); return ret_val; } /* Select Host Wakeup Registers page - caller now able to write * registers on the Wakeup registers page */ return e1000_set_page_igp(hw, (BM_WUC_PAGE << IGP_PAGE_SHIFT)); } /** * e1000_disable_phy_wakeup_reg_access_bm - disable access to BM wakeup regs * @hw: pointer to the HW structure * @phy_reg: pointer to original contents of BM_WUC_ENABLE_REG * * Restore BM_WUC_ENABLE_REG to its original value. * * Assumes semaphore already acquired and *phy_reg is the contents of the * BM_WUC_ENABLE_REG before register(s) on BM_WUC_PAGE were accessed by * caller. **/ s32 e1000_disable_phy_wakeup_reg_access_bm(struct e1000_hw *hw, u16 *phy_reg) { s32 ret_val; /* Select Port Control Registers page */ ret_val = e1000_set_page_igp(hw, (BM_PORT_CTRL_PAGE << IGP_PAGE_SHIFT)); if (ret_val) { e_dbg("Could not set Port Control page\n"); return ret_val; } /* Restore 769.17 to its original value */ ret_val = e1000e_write_phy_reg_mdic(hw, BM_WUC_ENABLE_REG, *phy_reg); if (ret_val) e_dbg("Could not restore PHY register %d.%d\n", BM_PORT_CTRL_PAGE, BM_WUC_ENABLE_REG); return ret_val; } /** * e1000_access_phy_wakeup_reg_bm - Read/write BM PHY wakeup register * @hw: pointer to the HW structure * @offset: register offset to be read or written * @data: pointer to the data to read or write * @read: determines if operation is read or write * @page_set: BM_WUC_PAGE already set and access enabled * * Read the PHY register at offset and store the retrieved information in * data, or write data to PHY register at offset. Note the procedure to * access the PHY wakeup registers is different than reading the other PHY * registers. It works as such: * 1) Set 769.17.2 (page 769, register 17, bit 2) = 1 * 2) Set page to 800 for host (801 if we were manageability) * 3) Write the address using the address opcode (0x11) * 4) Read or write the data using the data opcode (0x12) * 5) Restore 769.17.2 to its original value * * Steps 1 and 2 are done by e1000_enable_phy_wakeup_reg_access_bm() and * step 5 is done by e1000_disable_phy_wakeup_reg_access_bm(). * * Assumes semaphore is already acquired. When page_set==true, assumes * the PHY page is set to BM_WUC_PAGE (i.e. a function in the call stack * is responsible for calls to e1000_[enable|disable]_phy_wakeup_reg_bm()). **/ static s32 e1000_access_phy_wakeup_reg_bm(struct e1000_hw *hw, u32 offset, u16 *data, bool read, bool page_set) { s32 ret_val; u16 reg = BM_PHY_REG_NUM(offset); u16 page = BM_PHY_REG_PAGE(offset); u16 phy_reg = 0; /* Gig must be disabled for MDIO accesses to Host Wakeup reg page */ if ((hw->mac.type == e1000_pchlan) && (!(er32(PHY_CTRL) & E1000_PHY_CTRL_GBE_DISABLE))) e_dbg("Attempting to access page %d while gig enabled.\n", page); if (!page_set) { /* Enable access to PHY wakeup registers */ ret_val = e1000_enable_phy_wakeup_reg_access_bm(hw, &phy_reg); if (ret_val) { e_dbg("Could not enable PHY wakeup reg access\n"); return ret_val; } } e_dbg("Accessing PHY page %d reg 0x%x\n", page, reg); /* Write the Wakeup register page offset value using opcode 0x11 */ ret_val = e1000e_write_phy_reg_mdic(hw, BM_WUC_ADDRESS_OPCODE, reg); if (ret_val) { e_dbg("Could not write address opcode to page %d\n", page); return ret_val; } if (read) { /* Read the Wakeup register page value using opcode 0x12 */ ret_val = e1000e_read_phy_reg_mdic(hw, BM_WUC_DATA_OPCODE, data); } else { /* Write the Wakeup register page value using opcode 0x12 */ ret_val = e1000e_write_phy_reg_mdic(hw, BM_WUC_DATA_OPCODE, *data); } if (ret_val) { e_dbg("Could not access PHY reg %d.%d\n", page, reg); return ret_val; } if (!page_set) ret_val = e1000_disable_phy_wakeup_reg_access_bm(hw, &phy_reg); return ret_val; } /** * e1000_power_up_phy_copper - Restore copper link in case of PHY power down * @hw: pointer to the HW structure * * In the case of a PHY power down to save power, or to turn off link during a * driver unload, or wake on lan is not enabled, restore the link to previous * settings. **/ void e1000_power_up_phy_copper(struct e1000_hw *hw) { u16 mii_reg = 0; int ret; /* The PHY will retain its settings across a power down/up cycle */ ret = e1e_rphy(hw, MII_BMCR, &mii_reg); if (ret) { e_dbg("Error reading PHY register\n"); return; } mii_reg &= ~BMCR_PDOWN; e1e_wphy(hw, MII_BMCR, mii_reg); } /** * e1000_power_down_phy_copper - Restore copper link in case of PHY power down * @hw: pointer to the HW structure * * In the case of a PHY power down to save power, or to turn off link during a * driver unload, or wake on lan is not enabled, restore the link to previous * settings. **/ void e1000_power_down_phy_copper(struct e1000_hw *hw) { u16 mii_reg = 0; int ret; /* The PHY will retain its settings across a power down/up cycle */ ret = e1e_rphy(hw, MII_BMCR, &mii_reg); if (ret) { e_dbg("Error reading PHY register\n"); return; } mii_reg |= BMCR_PDOWN; e1e_wphy(hw, MII_BMCR, mii_reg); usleep_range(1000, 2000); } /** * __e1000_read_phy_reg_hv - Read HV PHY register * @hw: pointer to the HW structure * @offset: register offset to be read * @data: pointer to the read data * @locked: semaphore has already been acquired or not * @page_set: BM_WUC_PAGE already set and access enabled * * Acquires semaphore, if necessary, then reads the PHY register at offset * and stores the retrieved information in data. Release any acquired * semaphore before exiting. **/ static s32 __e1000_read_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 *data, bool locked, bool page_set) { s32 ret_val; u16 page = BM_PHY_REG_PAGE(offset); u16 reg = BM_PHY_REG_NUM(offset); u32 phy_addr = hw->phy.addr = e1000_get_phy_addr_for_hv_page(page); if (!locked) { ret_val = hw->phy.ops.acquire(hw); if (ret_val) return ret_val; } /* Page 800 works differently than the rest so it has its own func */ if (page == BM_WUC_PAGE) { ret_val = e1000_access_phy_wakeup_reg_bm(hw, offset, data, true, page_set); goto out; } if (page > 0 && page < HV_INTC_FC_PAGE_START) { ret_val = e1000_access_phy_debug_regs_hv(hw, offset, data, true); goto out; } if (!page_set) { if (page == HV_INTC_FC_PAGE_START) page = 0; if (reg > MAX_PHY_MULTI_PAGE_REG) { /* Page is shifted left, PHY expects (page x 32) */ ret_val = e1000_set_page_igp(hw, (page << IGP_PAGE_SHIFT)); hw->phy.addr = phy_addr; if (ret_val) goto out; } } e_dbg("reading PHY page %d (or 0x%x shifted) reg 0x%x\n", page, page << IGP_PAGE_SHIFT, reg); ret_val = e1000e_read_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & reg, data); out: if (!locked) hw->phy.ops.release(hw); return ret_val; } /** * e1000_read_phy_reg_hv - Read HV PHY register * @hw: pointer to the HW structure * @offset: register offset to be read * @data: pointer to the read data * * Acquires semaphore then reads the PHY register at offset and stores * the retrieved information in data. Release the acquired semaphore * before exiting. **/ s32 e1000_read_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 *data) { return __e1000_read_phy_reg_hv(hw, offset, data, false, false); } /** * e1000_read_phy_reg_hv_locked - Read HV PHY register * @hw: pointer to the HW structure * @offset: register offset to be read * @data: pointer to the read data * * Reads the PHY register at offset and stores the retrieved information * in data. Assumes semaphore already acquired. **/ s32 e1000_read_phy_reg_hv_locked(struct e1000_hw *hw, u32 offset, u16 *data) { return __e1000_read_phy_reg_hv(hw, offset, data, true, false); } /** * e1000_read_phy_reg_page_hv - Read HV PHY register * @hw: pointer to the HW structure * @offset: register offset to write to * @data: data to write at register offset * * Reads the PHY register at offset and stores the retrieved information * in data. Assumes semaphore already acquired and page already set. **/ s32 e1000_read_phy_reg_page_hv(struct e1000_hw *hw, u32 offset, u16 *data) { return __e1000_read_phy_reg_hv(hw, offset, data, true, true); } /** * __e1000_write_phy_reg_hv - Write HV PHY register * @hw: pointer to the HW structure * @offset: register offset to write to * @data: data to write at register offset * @locked: semaphore has already been acquired or not * @page_set: BM_WUC_PAGE already set and access enabled * * Acquires semaphore, if necessary, then writes the data to PHY register * at the offset. Release any acquired semaphores before exiting. **/ static s32 __e1000_write_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 data, bool locked, bool page_set) { s32 ret_val; u16 page = BM_PHY_REG_PAGE(offset); u16 reg = BM_PHY_REG_NUM(offset); u32 phy_addr = hw->phy.addr = e1000_get_phy_addr_for_hv_page(page); if (!locked) { ret_val = hw->phy.ops.acquire(hw); if (ret_val) return ret_val; } /* Page 800 works differently than the rest so it has its own func */ if (page == BM_WUC_PAGE) { ret_val = e1000_access_phy_wakeup_reg_bm(hw, offset, &data, false, page_set); goto out; } if (page > 0 && page < HV_INTC_FC_PAGE_START) { ret_val = e1000_access_phy_debug_regs_hv(hw, offset, &data, false); goto out; } if (!page_set) { if (page == HV_INTC_FC_PAGE_START) page = 0; /* Workaround MDIO accesses being disabled after entering IEEE * Power Down (when bit 11 of the PHY Control register is set) */ if ((hw->phy.type == e1000_phy_82578) && (hw->phy.revision >= 1) && (hw->phy.addr == 2) && !(MAX_PHY_REG_ADDRESS & reg) && (data & BIT(11))) { u16 data2 = 0x7EFF; ret_val = e1000_access_phy_debug_regs_hv(hw, BIT(6) | 0x3, &data2, false); if (ret_val) goto out; } if (reg > MAX_PHY_MULTI_PAGE_REG) { /* Page is shifted left, PHY expects (page x 32) */ ret_val = e1000_set_page_igp(hw, (page << IGP_PAGE_SHIFT)); hw->phy.addr = phy_addr; if (ret_val) goto out; } } e_dbg("writing PHY page %d (or 0x%x shifted) reg 0x%x\n", page, page << IGP_PAGE_SHIFT, reg); ret_val = e1000e_write_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & reg, data); out: if (!locked) hw->phy.ops.release(hw); return ret_val; } /** * e1000_write_phy_reg_hv - Write HV PHY register * @hw: pointer to the HW structure * @offset: register offset to write to * @data: data to write at register offset * * Acquires semaphore then writes the data to PHY register at the offset. * Release the acquired semaphores before exiting. **/ s32 e1000_write_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 data) { return __e1000_write_phy_reg_hv(hw, offset, data, false, false); } /** * e1000_write_phy_reg_hv_locked - Write HV PHY register * @hw: pointer to the HW structure * @offset: register offset to write to * @data: data to write at register offset * * Writes the data to PHY register at the offset. Assumes semaphore * already acquired. **/ s32 e1000_write_phy_reg_hv_locked(struct e1000_hw *hw, u32 offset, u16 data) { return __e1000_write_phy_reg_hv(hw, offset, data, true, false); } /** * e1000_write_phy_reg_page_hv - Write HV PHY register * @hw: pointer to the HW structure * @offset: register offset to write to * @data: data to write at register offset * * Writes the data to PHY register at the offset. Assumes semaphore * already acquired and page already set. **/ s32 e1000_write_phy_reg_page_hv(struct e1000_hw *hw, u32 offset, u16 data) { return __e1000_write_phy_reg_hv(hw, offset, data, true, true); } /** * e1000_get_phy_addr_for_hv_page - Get PHY address based on page * @page: page to be accessed **/ static u32 e1000_get_phy_addr_for_hv_page(u32 page) { u32 phy_addr = 2; if (page >= HV_INTC_FC_PAGE_START) phy_addr = 1; return phy_addr; } /** * e1000_access_phy_debug_regs_hv - Read HV PHY vendor specific high registers * @hw: pointer to the HW structure * @offset: register offset to be read or written * @data: pointer to the data to be read or written * @read: determines if operation is read or write * * Reads the PHY register at offset and stores the retrieved information * in data. Assumes semaphore already acquired. Note that the procedure * to access these regs uses the address port and data port to read/write. * These accesses done with PHY address 2 and without using pages. **/ static s32 e1000_access_phy_debug_regs_hv(struct e1000_hw *hw, u32 offset, u16 *data, bool read) { s32 ret_val; u32 addr_reg; u32 data_reg; /* This takes care of the difference with desktop vs mobile phy */ addr_reg = ((hw->phy.type == e1000_phy_82578) ? I82578_ADDR_REG : I82577_ADDR_REG); data_reg = addr_reg + 1; /* All operations in this function are phy address 2 */ hw->phy.addr = 2; /* masking with 0x3F to remove the page from offset */ ret_val = e1000e_write_phy_reg_mdic(hw, addr_reg, (u16)offset & 0x3F); if (ret_val) { e_dbg("Could not write the Address Offset port register\n"); return ret_val; } /* Read or write the data value next */ if (read) ret_val = e1000e_read_phy_reg_mdic(hw, data_reg, data); else ret_val = e1000e_write_phy_reg_mdic(hw, data_reg, *data); if (ret_val) e_dbg("Could not access the Data port register\n"); return ret_val; } /** * e1000_link_stall_workaround_hv - Si workaround * @hw: pointer to the HW structure * * This function works around a Si bug where the link partner can get * a link up indication before the PHY does. If small packets are sent * by the link partner they can be placed in the packet buffer without * being properly accounted for by the PHY and will stall preventing * further packets from being received. The workaround is to clear the * packet buffer after the PHY detects link up. **/ s32 e1000_link_stall_workaround_hv(struct e1000_hw *hw) { s32 ret_val = 0; u16 data; if (hw->phy.type != e1000_phy_82578) return 0; /* Do not apply workaround if in PHY loopback bit 14 set */ ret_val = e1e_rphy(hw, MII_BMCR, &data); if (ret_val) { e_dbg("Error reading PHY register\n"); return ret_val; } if (data & BMCR_LOOPBACK) return 0; /* check if link is up and at 1Gbps */ ret_val = e1e_rphy(hw, BM_CS_STATUS, &data); if (ret_val) return ret_val; data &= (BM_CS_STATUS_LINK_UP | BM_CS_STATUS_RESOLVED | BM_CS_STATUS_SPEED_MASK); if (data != (BM_CS_STATUS_LINK_UP | BM_CS_STATUS_RESOLVED | BM_CS_STATUS_SPEED_1000)) return 0; msleep(200); /* flush the packets in the fifo buffer */ ret_val = e1e_wphy(hw, HV_MUX_DATA_CTRL, (HV_MUX_DATA_CTRL_GEN_TO_MAC | HV_MUX_DATA_CTRL_FORCE_SPEED)); if (ret_val) return ret_val; return e1e_wphy(hw, HV_MUX_DATA_CTRL, HV_MUX_DATA_CTRL_GEN_TO_MAC); } /** * e1000_check_polarity_82577 - Checks the polarity. * @hw: pointer to the HW structure * * Success returns 0, Failure returns -E1000_ERR_PHY (-2) * * Polarity is determined based on the PHY specific status register. **/ s32 e1000_check_polarity_82577(struct e1000_hw *hw) { struct e1000_phy_info *phy = &hw->phy; s32 ret_val; u16 data; ret_val = e1e_rphy(hw, I82577_PHY_STATUS_2, &data); if (!ret_val) phy->cable_polarity = ((data & I82577_PHY_STATUS2_REV_POLARITY) ? e1000_rev_polarity_reversed : e1000_rev_polarity_normal); return ret_val; } /** * e1000_phy_force_speed_duplex_82577 - Force speed/duplex for I82577 PHY * @hw: pointer to the HW structure * * Calls the PHY setup function to force speed and duplex. **/ s32 e1000_phy_force_speed_duplex_82577(struct e1000_hw *hw) { struct e1000_phy_info *phy = &hw->phy; s32 ret_val; u16 phy_data; bool link; ret_val = e1e_rphy(hw, MII_BMCR, &phy_data); if (ret_val) return ret_val; e1000e_phy_force_speed_duplex_setup(hw, &phy_data); ret_val = e1e_wphy(hw, MII_BMCR, phy_data); if (ret_val) return ret_val; udelay(1); if (phy->autoneg_wait_to_complete) { e_dbg("Waiting for forced speed/duplex link on 82577 phy\n"); ret_val = e1000e_phy_has_link_generic(hw, PHY_FORCE_LIMIT, 100000, &link); if (ret_val) return ret_val; if (!link) e_dbg("Link taking longer than expected.\n"); /* Try once more */ ret_val = e1000e_phy_has_link_generic(hw, PHY_FORCE_LIMIT, 100000, &link); } return ret_val; } /** * e1000_get_phy_info_82577 - Retrieve I82577 PHY information * @hw: pointer to the HW structure * * Read PHY status to determine if link is up. If link is up, then * set/determine 10base-T extended distance and polarity correction. Read * PHY port status to determine MDI/MDIx and speed. Based on the speed, * determine on the cable length, local and remote receiver. **/ s32 e1000_get_phy_info_82577(struct e1000_hw *hw) { struct e1000_phy_info *phy = &hw->phy; s32 ret_val; u16 data; bool link; ret_val = e1000e_phy_has_link_generic(hw, 1, 0, &link); if (ret_val) return ret_val; if (!link) { e_dbg("Phy info is only valid if link is up\n"); return -E1000_ERR_CONFIG; } phy->polarity_correction = true; ret_val = e1000_check_polarity_82577(hw); if (ret_val) return ret_val; ret_val = e1e_rphy(hw, I82577_PHY_STATUS_2, &data); if (ret_val) return ret_val; phy->is_mdix = !!(data & I82577_PHY_STATUS2_MDIX); if ((data & I82577_PHY_STATUS2_SPEED_MASK) == I82577_PHY_STATUS2_SPEED_1000MBPS) { ret_val = hw->phy.ops.get_cable_length(hw); if (ret_val) return ret_val; ret_val = e1e_rphy(hw, MII_STAT1000, &data); if (ret_val) return ret_val; phy->local_rx = (data & LPA_1000LOCALRXOK) ? e1000_1000t_rx_status_ok : e1000_1000t_rx_status_not_ok; phy->remote_rx = (data & LPA_1000REMRXOK) ? e1000_1000t_rx_status_ok : e1000_1000t_rx_status_not_ok; } else { phy->cable_length = E1000_CABLE_LENGTH_UNDEFINED; phy->local_rx = e1000_1000t_rx_status_undefined; phy->remote_rx = e1000_1000t_rx_status_undefined; } return 0; } /** * e1000_get_cable_length_82577 - Determine cable length for 82577 PHY * @hw: pointer to the HW structure * * Reads the diagnostic status register and verifies result is valid before * placing it in the phy_cable_length field. **/ s32 e1000_get_cable_length_82577(struct e1000_hw *hw) { struct e1000_phy_info *phy = &hw->phy; s32 ret_val; u16 phy_data, length; ret_val = e1e_rphy(hw, I82577_PHY_DIAG_STATUS, &phy_data); if (ret_val) return ret_val; length = ((phy_data & I82577_DSTATUS_CABLE_LENGTH) >> I82577_DSTATUS_CABLE_LENGTH_SHIFT); if (length == E1000_CABLE_LENGTH_UNDEFINED) return -E1000_ERR_PHY; phy->cable_length = length; return 0; }
linux-master
drivers/net/ethernet/intel/e1000e/phy.c
// SPDX-License-Identifier: GPL-2.0 /* Copyright(c) 1999 - 2018 Intel Corporation. */ /* PTP 1588 Hardware Clock (PHC) * Derived from PTP Hardware Clock driver for Intel 82576 and 82580 (igb) * Copyright (C) 2011 Richard Cochran <[email protected]> */ #include "e1000.h" #ifdef CONFIG_E1000E_HWTS #include <linux/clocksource.h> #include <linux/ktime.h> #include <asm/tsc.h> #endif /** * e1000e_phc_adjfine - adjust the frequency of the hardware clock * @ptp: ptp clock structure * @delta: Desired frequency chance in scaled parts per million * * Adjust the frequency of the PHC cycle counter by the indicated delta from * the base frequency. * * Scaled parts per million is ppm but with a 16 bit binary fractional field. **/ static int e1000e_phc_adjfine(struct ptp_clock_info *ptp, long delta) { struct e1000_adapter *adapter = container_of(ptp, struct e1000_adapter, ptp_clock_info); struct e1000_hw *hw = &adapter->hw; unsigned long flags; u64 incvalue; u32 timinca; s32 ret_val; /* Get the System Time Register SYSTIM base frequency */ ret_val = e1000e_get_base_timinca(adapter, &timinca); if (ret_val) return ret_val; spin_lock_irqsave(&adapter->systim_lock, flags); incvalue = timinca & E1000_TIMINCA_INCVALUE_MASK; incvalue = adjust_by_scaled_ppm(incvalue, delta); timinca &= ~E1000_TIMINCA_INCVALUE_MASK; timinca |= incvalue; ew32(TIMINCA, timinca); adapter->ptp_delta = delta; spin_unlock_irqrestore(&adapter->systim_lock, flags); return 0; } /** * e1000e_phc_adjtime - Shift the time of the hardware clock * @ptp: ptp clock structure * @delta: Desired change in nanoseconds * * Adjust the timer by resetting the timecounter structure. **/ static int e1000e_phc_adjtime(struct ptp_clock_info *ptp, s64 delta) { struct e1000_adapter *adapter = container_of(ptp, struct e1000_adapter, ptp_clock_info); unsigned long flags; spin_lock_irqsave(&adapter->systim_lock, flags); timecounter_adjtime(&adapter->tc, delta); spin_unlock_irqrestore(&adapter->systim_lock, flags); return 0; } #ifdef CONFIG_E1000E_HWTS #define MAX_HW_WAIT_COUNT (3) /** * e1000e_phc_get_syncdevicetime - Callback given to timekeeping code reads system/device registers * @device: current device time * @system: system counter value read synchronously with device time * @ctx: context provided by timekeeping code * * Read device and system (ART) clock simultaneously and return the corrected * clock values in ns. **/ static int e1000e_phc_get_syncdevicetime(ktime_t *device, struct system_counterval_t *system, void *ctx) { struct e1000_adapter *adapter = (struct e1000_adapter *)ctx; struct e1000_hw *hw = &adapter->hw; unsigned long flags; int i; u32 tsync_ctrl; u64 dev_cycles; u64 sys_cycles; tsync_ctrl = er32(TSYNCTXCTL); tsync_ctrl |= E1000_TSYNCTXCTL_START_SYNC | E1000_TSYNCTXCTL_MAX_ALLOWED_DLY_MASK; ew32(TSYNCTXCTL, tsync_ctrl); for (i = 0; i < MAX_HW_WAIT_COUNT; ++i) { udelay(1); tsync_ctrl = er32(TSYNCTXCTL); if (tsync_ctrl & E1000_TSYNCTXCTL_SYNC_COMP) break; } if (i == MAX_HW_WAIT_COUNT) return -ETIMEDOUT; dev_cycles = er32(SYSSTMPH); dev_cycles <<= 32; dev_cycles |= er32(SYSSTMPL); spin_lock_irqsave(&adapter->systim_lock, flags); *device = ns_to_ktime(timecounter_cyc2time(&adapter->tc, dev_cycles)); spin_unlock_irqrestore(&adapter->systim_lock, flags); sys_cycles = er32(PLTSTMPH); sys_cycles <<= 32; sys_cycles |= er32(PLTSTMPL); *system = convert_art_to_tsc(sys_cycles); return 0; } /** * e1000e_phc_getcrosststamp - Reads the current system/device cross timestamp * @ptp: ptp clock structure * @xtstamp: structure containing timestamp * * Read device and system (ART) clock simultaneously and return the scaled * clock values in ns. **/ static int e1000e_phc_getcrosststamp(struct ptp_clock_info *ptp, struct system_device_crosststamp *xtstamp) { struct e1000_adapter *adapter = container_of(ptp, struct e1000_adapter, ptp_clock_info); return get_device_system_crosststamp(e1000e_phc_get_syncdevicetime, adapter, NULL, xtstamp); } #endif/*CONFIG_E1000E_HWTS*/ /** * e1000e_phc_gettimex - Reads the current time from the hardware clock and * system clock * @ptp: ptp clock structure * @ts: timespec structure to hold the current PHC time * @sts: structure to hold the current system time * * Read the timecounter and return the correct value in ns after converting * it into a struct timespec. **/ static int e1000e_phc_gettimex(struct ptp_clock_info *ptp, struct timespec64 *ts, struct ptp_system_timestamp *sts) { struct e1000_adapter *adapter = container_of(ptp, struct e1000_adapter, ptp_clock_info); unsigned long flags; u64 cycles, ns; spin_lock_irqsave(&adapter->systim_lock, flags); /* NOTE: Non-monotonic SYSTIM readings may be returned */ cycles = e1000e_read_systim(adapter, sts); ns = timecounter_cyc2time(&adapter->tc, cycles); spin_unlock_irqrestore(&adapter->systim_lock, flags); *ts = ns_to_timespec64(ns); return 0; } /** * e1000e_phc_settime - Set the current time on the hardware clock * @ptp: ptp clock structure * @ts: timespec containing the new time for the cycle counter * * Reset the timecounter to use a new base value instead of the kernel * wall timer value. **/ static int e1000e_phc_settime(struct ptp_clock_info *ptp, const struct timespec64 *ts) { struct e1000_adapter *adapter = container_of(ptp, struct e1000_adapter, ptp_clock_info); unsigned long flags; u64 ns; ns = timespec64_to_ns(ts); /* reset the timecounter */ spin_lock_irqsave(&adapter->systim_lock, flags); timecounter_init(&adapter->tc, &adapter->cc, ns); spin_unlock_irqrestore(&adapter->systim_lock, flags); return 0; } /** * e1000e_phc_enable - enable or disable an ancillary feature * @ptp: ptp clock structure * @request: Desired resource to enable or disable * @on: Caller passes one to enable or zero to disable * * Enable (or disable) ancillary features of the PHC subsystem. * Currently, no ancillary features are supported. **/ static int e1000e_phc_enable(struct ptp_clock_info __always_unused *ptp, struct ptp_clock_request __always_unused *request, int __always_unused on) { return -EOPNOTSUPP; } static void e1000e_systim_overflow_work(struct work_struct *work) { struct e1000_adapter *adapter = container_of(work, struct e1000_adapter, systim_overflow_work.work); struct e1000_hw *hw = &adapter->hw; struct timespec64 ts; u64 ns; /* Update the timecounter */ ns = timecounter_read(&adapter->tc); ts = ns_to_timespec64(ns); e_dbg("SYSTIM overflow check at %lld.%09lu\n", (long long) ts.tv_sec, ts.tv_nsec); schedule_delayed_work(&adapter->systim_overflow_work, E1000_SYSTIM_OVERFLOW_PERIOD); } static const struct ptp_clock_info e1000e_ptp_clock_info = { .owner = THIS_MODULE, .n_alarm = 0, .n_ext_ts = 0, .n_per_out = 0, .n_pins = 0, .pps = 0, .adjfine = e1000e_phc_adjfine, .adjtime = e1000e_phc_adjtime, .gettimex64 = e1000e_phc_gettimex, .settime64 = e1000e_phc_settime, .enable = e1000e_phc_enable, }; /** * e1000e_ptp_init - initialize PTP for devices which support it * @adapter: board private structure * * This function performs the required steps for enabling PTP support. * If PTP support has already been loaded it simply calls the cyclecounter * init routine and exits. **/ void e1000e_ptp_init(struct e1000_adapter *adapter) { struct e1000_hw *hw = &adapter->hw; adapter->ptp_clock = NULL; if (!(adapter->flags & FLAG_HAS_HW_TIMESTAMP)) return; adapter->ptp_clock_info = e1000e_ptp_clock_info; snprintf(adapter->ptp_clock_info.name, sizeof(adapter->ptp_clock_info.name), "%pm", adapter->netdev->perm_addr); switch (hw->mac.type) { case e1000_pch2lan: case e1000_pch_lpt: case e1000_pch_spt: case e1000_pch_cnp: case e1000_pch_tgp: case e1000_pch_adp: case e1000_pch_mtp: case e1000_pch_lnp: case e1000_pch_ptp: case e1000_pch_nvp: if ((hw->mac.type < e1000_pch_lpt) || (er32(TSYNCRXCTL) & E1000_TSYNCRXCTL_SYSCFI)) { adapter->ptp_clock_info.max_adj = 24000000 - 1; break; } fallthrough; case e1000_82574: case e1000_82583: adapter->ptp_clock_info.max_adj = 600000000 - 1; break; default: break; } #ifdef CONFIG_E1000E_HWTS /* CPU must have ART and GBe must be from Sunrise Point or greater */ if (hw->mac.type >= e1000_pch_spt && boot_cpu_has(X86_FEATURE_ART)) adapter->ptp_clock_info.getcrosststamp = e1000e_phc_getcrosststamp; #endif/*CONFIG_E1000E_HWTS*/ INIT_DELAYED_WORK(&adapter->systim_overflow_work, e1000e_systim_overflow_work); schedule_delayed_work(&adapter->systim_overflow_work, E1000_SYSTIM_OVERFLOW_PERIOD); adapter->ptp_clock = ptp_clock_register(&adapter->ptp_clock_info, &adapter->pdev->dev); if (IS_ERR(adapter->ptp_clock)) { adapter->ptp_clock = NULL; e_err("ptp_clock_register failed\n"); } else if (adapter->ptp_clock) { e_info("registered PHC clock\n"); } } /** * e1000e_ptp_remove - disable PTP device and stop the overflow check * @adapter: board private structure * * Stop the PTP support, and cancel the delayed work. **/ void e1000e_ptp_remove(struct e1000_adapter *adapter) { if (!(adapter->flags & FLAG_HAS_HW_TIMESTAMP)) return; cancel_delayed_work_sync(&adapter->systim_overflow_work); if (adapter->ptp_clock) { ptp_clock_unregister(adapter->ptp_clock); adapter->ptp_clock = NULL; e_info("removed PHC\n"); } }
linux-master
drivers/net/ethernet/intel/e1000e/ptp.c
// SPDX-License-Identifier: GPL-2.0 /* Copyright(c) 1999 - 2018 Intel Corporation. */ /* 82571EB Gigabit Ethernet Controller * 82571EB Gigabit Ethernet Controller (Copper) * 82571EB Gigabit Ethernet Controller (Fiber) * 82571EB Dual Port Gigabit Mezzanine Adapter * 82571EB Quad Port Gigabit Mezzanine Adapter * 82571PT Gigabit PT Quad Port Server ExpressModule * 82572EI Gigabit Ethernet Controller (Copper) * 82572EI Gigabit Ethernet Controller (Fiber) * 82572EI Gigabit Ethernet Controller * 82573V Gigabit Ethernet Controller (Copper) * 82573E Gigabit Ethernet Controller (Copper) * 82573L Gigabit Ethernet Controller * 82574L Gigabit Network Connection * 82583V Gigabit Network Connection */ #include "e1000.h" static s32 e1000_get_phy_id_82571(struct e1000_hw *hw); static s32 e1000_setup_copper_link_82571(struct e1000_hw *hw); static s32 e1000_setup_fiber_serdes_link_82571(struct e1000_hw *hw); static s32 e1000_check_for_serdes_link_82571(struct e1000_hw *hw); static s32 e1000_write_nvm_eewr_82571(struct e1000_hw *hw, u16 offset, u16 words, u16 *data); static s32 e1000_fix_nvm_checksum_82571(struct e1000_hw *hw); static void e1000_initialize_hw_bits_82571(struct e1000_hw *hw); static void e1000_clear_hw_cntrs_82571(struct e1000_hw *hw); static bool e1000_check_mng_mode_82574(struct e1000_hw *hw); static s32 e1000_led_on_82574(struct e1000_hw *hw); static void e1000_put_hw_semaphore_82571(struct e1000_hw *hw); static void e1000_power_down_phy_copper_82571(struct e1000_hw *hw); static void e1000_put_hw_semaphore_82573(struct e1000_hw *hw); static s32 e1000_get_hw_semaphore_82574(struct e1000_hw *hw); static void e1000_put_hw_semaphore_82574(struct e1000_hw *hw); static s32 e1000_set_d0_lplu_state_82574(struct e1000_hw *hw, bool active); static s32 e1000_set_d3_lplu_state_82574(struct e1000_hw *hw, bool active); /** * e1000_init_phy_params_82571 - Init PHY func ptrs. * @hw: pointer to the HW structure **/ static s32 e1000_init_phy_params_82571(struct e1000_hw *hw) { struct e1000_phy_info *phy = &hw->phy; s32 ret_val; if (hw->phy.media_type != e1000_media_type_copper) { phy->type = e1000_phy_none; return 0; } phy->addr = 1; phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT; phy->reset_delay_us = 100; phy->ops.power_up = e1000_power_up_phy_copper; phy->ops.power_down = e1000_power_down_phy_copper_82571; switch (hw->mac.type) { case e1000_82571: case e1000_82572: phy->type = e1000_phy_igp_2; break; case e1000_82573: phy->type = e1000_phy_m88; break; case e1000_82574: case e1000_82583: phy->type = e1000_phy_bm; phy->ops.acquire = e1000_get_hw_semaphore_82574; phy->ops.release = e1000_put_hw_semaphore_82574; phy->ops.set_d0_lplu_state = e1000_set_d0_lplu_state_82574; phy->ops.set_d3_lplu_state = e1000_set_d3_lplu_state_82574; break; default: return -E1000_ERR_PHY; } /* This can only be done after all function pointers are setup. */ ret_val = e1000_get_phy_id_82571(hw); if (ret_val) { e_dbg("Error getting PHY ID\n"); return ret_val; } /* Verify phy id */ switch (hw->mac.type) { case e1000_82571: case e1000_82572: if (phy->id != IGP01E1000_I_PHY_ID) ret_val = -E1000_ERR_PHY; break; case e1000_82573: if (phy->id != M88E1111_I_PHY_ID) ret_val = -E1000_ERR_PHY; break; case e1000_82574: case e1000_82583: if (phy->id != BME1000_E_PHY_ID_R2) ret_val = -E1000_ERR_PHY; break; default: ret_val = -E1000_ERR_PHY; break; } if (ret_val) e_dbg("PHY ID unknown: type = 0x%08x\n", phy->id); return ret_val; } /** * e1000_init_nvm_params_82571 - Init NVM func ptrs. * @hw: pointer to the HW structure **/ static s32 e1000_init_nvm_params_82571(struct e1000_hw *hw) { struct e1000_nvm_info *nvm = &hw->nvm; u32 eecd = er32(EECD); u16 size; nvm->opcode_bits = 8; nvm->delay_usec = 1; switch (nvm->override) { case e1000_nvm_override_spi_large: nvm->page_size = 32; nvm->address_bits = 16; break; case e1000_nvm_override_spi_small: nvm->page_size = 8; nvm->address_bits = 8; break; default: nvm->page_size = eecd & E1000_EECD_ADDR_BITS ? 32 : 8; nvm->address_bits = eecd & E1000_EECD_ADDR_BITS ? 16 : 8; break; } switch (hw->mac.type) { case e1000_82573: case e1000_82574: case e1000_82583: if (((eecd >> 15) & 0x3) == 0x3) { nvm->type = e1000_nvm_flash_hw; nvm->word_size = 2048; /* Autonomous Flash update bit must be cleared due * to Flash update issue. */ eecd &= ~E1000_EECD_AUPDEN; ew32(EECD, eecd); break; } fallthrough; default: nvm->type = e1000_nvm_eeprom_spi; size = (u16)((eecd & E1000_EECD_SIZE_EX_MASK) >> E1000_EECD_SIZE_EX_SHIFT); /* Added to a constant, "size" becomes the left-shift value * for setting word_size. */ size += NVM_WORD_SIZE_BASE_SHIFT; /* EEPROM access above 16k is unsupported */ if (size > 14) size = 14; nvm->word_size = BIT(size); break; } /* Function Pointers */ switch (hw->mac.type) { case e1000_82574: case e1000_82583: nvm->ops.acquire = e1000_get_hw_semaphore_82574; nvm->ops.release = e1000_put_hw_semaphore_82574; break; default: break; } return 0; } /** * e1000_init_mac_params_82571 - Init MAC func ptrs. * @hw: pointer to the HW structure **/ static s32 e1000_init_mac_params_82571(struct e1000_hw *hw) { struct e1000_mac_info *mac = &hw->mac; u32 swsm = 0; u32 swsm2 = 0; bool force_clear_smbi = false; /* Set media type and media-dependent function pointers */ switch (hw->adapter->pdev->device) { case E1000_DEV_ID_82571EB_FIBER: case E1000_DEV_ID_82572EI_FIBER: case E1000_DEV_ID_82571EB_QUAD_FIBER: hw->phy.media_type = e1000_media_type_fiber; mac->ops.setup_physical_interface = e1000_setup_fiber_serdes_link_82571; mac->ops.check_for_link = e1000e_check_for_fiber_link; mac->ops.get_link_up_info = e1000e_get_speed_and_duplex_fiber_serdes; break; case E1000_DEV_ID_82571EB_SERDES: case E1000_DEV_ID_82571EB_SERDES_DUAL: case E1000_DEV_ID_82571EB_SERDES_QUAD: case E1000_DEV_ID_82572EI_SERDES: hw->phy.media_type = e1000_media_type_internal_serdes; mac->ops.setup_physical_interface = e1000_setup_fiber_serdes_link_82571; mac->ops.check_for_link = e1000_check_for_serdes_link_82571; mac->ops.get_link_up_info = e1000e_get_speed_and_duplex_fiber_serdes; break; default: hw->phy.media_type = e1000_media_type_copper; mac->ops.setup_physical_interface = e1000_setup_copper_link_82571; mac->ops.check_for_link = e1000e_check_for_copper_link; mac->ops.get_link_up_info = e1000e_get_speed_and_duplex_copper; break; } /* Set mta register count */ mac->mta_reg_count = 128; /* Set rar entry count */ mac->rar_entry_count = E1000_RAR_ENTRIES; /* Adaptive IFS supported */ mac->adaptive_ifs = true; /* MAC-specific function pointers */ switch (hw->mac.type) { case e1000_82573: mac->ops.set_lan_id = e1000_set_lan_id_single_port; mac->ops.check_mng_mode = e1000e_check_mng_mode_generic; mac->ops.led_on = e1000e_led_on_generic; mac->ops.blink_led = e1000e_blink_led_generic; /* FWSM register */ mac->has_fwsm = true; /* ARC supported; valid only if manageability features are * enabled. */ mac->arc_subsystem_valid = !!(er32(FWSM) & E1000_FWSM_MODE_MASK); break; case e1000_82574: case e1000_82583: mac->ops.set_lan_id = e1000_set_lan_id_single_port; mac->ops.check_mng_mode = e1000_check_mng_mode_82574; mac->ops.led_on = e1000_led_on_82574; break; default: mac->ops.check_mng_mode = e1000e_check_mng_mode_generic; mac->ops.led_on = e1000e_led_on_generic; mac->ops.blink_led = e1000e_blink_led_generic; /* FWSM register */ mac->has_fwsm = true; break; } /* Ensure that the inter-port SWSM.SMBI lock bit is clear before * first NVM or PHY access. This should be done for single-port * devices, and for one port only on dual-port devices so that * for those devices we can still use the SMBI lock to synchronize * inter-port accesses to the PHY & NVM. */ switch (hw->mac.type) { case e1000_82571: case e1000_82572: swsm2 = er32(SWSM2); if (!(swsm2 & E1000_SWSM2_LOCK)) { /* Only do this for the first interface on this card */ ew32(SWSM2, swsm2 | E1000_SWSM2_LOCK); force_clear_smbi = true; } else { force_clear_smbi = false; } break; default: force_clear_smbi = true; break; } if (force_clear_smbi) { /* Make sure SWSM.SMBI is clear */ swsm = er32(SWSM); if (swsm & E1000_SWSM_SMBI) { /* This bit should not be set on a first interface, and * indicates that the bootagent or EFI code has * improperly left this bit enabled */ e_dbg("Please update your 82571 Bootagent\n"); } ew32(SWSM, swsm & ~E1000_SWSM_SMBI); } /* Initialize device specific counter of SMBI acquisition timeouts. */ hw->dev_spec.e82571.smb_counter = 0; return 0; } static s32 e1000_get_variants_82571(struct e1000_adapter *adapter) { struct e1000_hw *hw = &adapter->hw; static int global_quad_port_a; /* global port a indication */ struct pci_dev *pdev = adapter->pdev; int is_port_b = er32(STATUS) & E1000_STATUS_FUNC_1; s32 rc; rc = e1000_init_mac_params_82571(hw); if (rc) return rc; rc = e1000_init_nvm_params_82571(hw); if (rc) return rc; rc = e1000_init_phy_params_82571(hw); if (rc) return rc; /* tag quad port adapters first, it's used below */ switch (pdev->device) { case E1000_DEV_ID_82571EB_QUAD_COPPER: case E1000_DEV_ID_82571EB_QUAD_FIBER: case E1000_DEV_ID_82571EB_QUAD_COPPER_LP: case E1000_DEV_ID_82571PT_QUAD_COPPER: adapter->flags |= FLAG_IS_QUAD_PORT; /* mark the first port */ if (global_quad_port_a == 0) adapter->flags |= FLAG_IS_QUAD_PORT_A; /* Reset for multiple quad port adapters */ global_quad_port_a++; if (global_quad_port_a == 4) global_quad_port_a = 0; break; default: break; } switch (adapter->hw.mac.type) { case e1000_82571: /* these dual ports don't have WoL on port B at all */ if (((pdev->device == E1000_DEV_ID_82571EB_FIBER) || (pdev->device == E1000_DEV_ID_82571EB_SERDES) || (pdev->device == E1000_DEV_ID_82571EB_COPPER)) && (is_port_b)) adapter->flags &= ~FLAG_HAS_WOL; /* quad ports only support WoL on port A */ if (adapter->flags & FLAG_IS_QUAD_PORT && (!(adapter->flags & FLAG_IS_QUAD_PORT_A))) adapter->flags &= ~FLAG_HAS_WOL; /* Does not support WoL on any port */ if (pdev->device == E1000_DEV_ID_82571EB_SERDES_QUAD) adapter->flags &= ~FLAG_HAS_WOL; break; case e1000_82573: if (pdev->device == E1000_DEV_ID_82573L) { adapter->flags |= FLAG_HAS_JUMBO_FRAMES; adapter->max_hw_frame_size = DEFAULT_JUMBO; } break; default: break; } return 0; } /** * e1000_get_phy_id_82571 - Retrieve the PHY ID and revision * @hw: pointer to the HW structure * * Reads the PHY registers and stores the PHY ID and possibly the PHY * revision in the hardware structure. **/ static s32 e1000_get_phy_id_82571(struct e1000_hw *hw) { struct e1000_phy_info *phy = &hw->phy; s32 ret_val; u16 phy_id = 0; switch (hw->mac.type) { case e1000_82571: case e1000_82572: /* The 82571 firmware may still be configuring the PHY. * In this case, we cannot access the PHY until the * configuration is done. So we explicitly set the * PHY ID. */ phy->id = IGP01E1000_I_PHY_ID; break; case e1000_82573: return e1000e_get_phy_id(hw); case e1000_82574: case e1000_82583: ret_val = e1e_rphy(hw, MII_PHYSID1, &phy_id); if (ret_val) return ret_val; phy->id = (u32)(phy_id << 16); usleep_range(20, 40); ret_val = e1e_rphy(hw, MII_PHYSID2, &phy_id); if (ret_val) return ret_val; phy->id |= (u32)(phy_id); phy->revision = (u32)(phy_id & ~PHY_REVISION_MASK); break; default: return -E1000_ERR_PHY; } return 0; } /** * e1000_get_hw_semaphore_82571 - Acquire hardware semaphore * @hw: pointer to the HW structure * * Acquire the HW semaphore to access the PHY or NVM **/ static s32 e1000_get_hw_semaphore_82571(struct e1000_hw *hw) { u32 swsm; s32 sw_timeout = hw->nvm.word_size + 1; s32 fw_timeout = hw->nvm.word_size + 1; s32 i = 0; /* If we have timedout 3 times on trying to acquire * the inter-port SMBI semaphore, there is old code * operating on the other port, and it is not * releasing SMBI. Modify the number of times that * we try for the semaphore to interwork with this * older code. */ if (hw->dev_spec.e82571.smb_counter > 2) sw_timeout = 1; /* Get the SW semaphore */ while (i < sw_timeout) { swsm = er32(SWSM); if (!(swsm & E1000_SWSM_SMBI)) break; usleep_range(50, 100); i++; } if (i == sw_timeout) { e_dbg("Driver can't access device - SMBI bit is set.\n"); hw->dev_spec.e82571.smb_counter++; } /* Get the FW semaphore. */ for (i = 0; i < fw_timeout; i++) { swsm = er32(SWSM); ew32(SWSM, swsm | E1000_SWSM_SWESMBI); /* Semaphore acquired if bit latched */ if (er32(SWSM) & E1000_SWSM_SWESMBI) break; usleep_range(50, 100); } if (i == fw_timeout) { /* Release semaphores */ e1000_put_hw_semaphore_82571(hw); e_dbg("Driver can't access the NVM\n"); return -E1000_ERR_NVM; } return 0; } /** * e1000_put_hw_semaphore_82571 - Release hardware semaphore * @hw: pointer to the HW structure * * Release hardware semaphore used to access the PHY or NVM **/ static void e1000_put_hw_semaphore_82571(struct e1000_hw *hw) { u32 swsm; swsm = er32(SWSM); swsm &= ~(E1000_SWSM_SMBI | E1000_SWSM_SWESMBI); ew32(SWSM, swsm); } /** * e1000_get_hw_semaphore_82573 - Acquire hardware semaphore * @hw: pointer to the HW structure * * Acquire the HW semaphore during reset. * **/ static s32 e1000_get_hw_semaphore_82573(struct e1000_hw *hw) { u32 extcnf_ctrl; s32 i = 0; extcnf_ctrl = er32(EXTCNF_CTRL); do { extcnf_ctrl |= E1000_EXTCNF_CTRL_MDIO_SW_OWNERSHIP; ew32(EXTCNF_CTRL, extcnf_ctrl); extcnf_ctrl = er32(EXTCNF_CTRL); if (extcnf_ctrl & E1000_EXTCNF_CTRL_MDIO_SW_OWNERSHIP) break; usleep_range(2000, 4000); i++; } while (i < MDIO_OWNERSHIP_TIMEOUT); if (i == MDIO_OWNERSHIP_TIMEOUT) { /* Release semaphores */ e1000_put_hw_semaphore_82573(hw); e_dbg("Driver can't access the PHY\n"); return -E1000_ERR_PHY; } return 0; } /** * e1000_put_hw_semaphore_82573 - Release hardware semaphore * @hw: pointer to the HW structure * * Release hardware semaphore used during reset. * **/ static void e1000_put_hw_semaphore_82573(struct e1000_hw *hw) { u32 extcnf_ctrl; extcnf_ctrl = er32(EXTCNF_CTRL); extcnf_ctrl &= ~E1000_EXTCNF_CTRL_MDIO_SW_OWNERSHIP; ew32(EXTCNF_CTRL, extcnf_ctrl); } static DEFINE_MUTEX(swflag_mutex); /** * e1000_get_hw_semaphore_82574 - Acquire hardware semaphore * @hw: pointer to the HW structure * * Acquire the HW semaphore to access the PHY or NVM. * **/ static s32 e1000_get_hw_semaphore_82574(struct e1000_hw *hw) { s32 ret_val; mutex_lock(&swflag_mutex); ret_val = e1000_get_hw_semaphore_82573(hw); if (ret_val) mutex_unlock(&swflag_mutex); return ret_val; } /** * e1000_put_hw_semaphore_82574 - Release hardware semaphore * @hw: pointer to the HW structure * * Release hardware semaphore used to access the PHY or NVM * **/ static void e1000_put_hw_semaphore_82574(struct e1000_hw *hw) { e1000_put_hw_semaphore_82573(hw); mutex_unlock(&swflag_mutex); } /** * e1000_set_d0_lplu_state_82574 - Set Low Power Linkup D0 state * @hw: pointer to the HW structure * @active: true to enable LPLU, false to disable * * Sets the LPLU D0 state according to the active flag. * LPLU will not be activated unless the * device autonegotiation advertisement meets standards of * either 10 or 10/100 or 10/100/1000 at all duplexes. * This is a function pointer entry point only called by * PHY setup routines. **/ static s32 e1000_set_d0_lplu_state_82574(struct e1000_hw *hw, bool active) { u32 data = er32(POEMB); if (active) data |= E1000_PHY_CTRL_D0A_LPLU; else data &= ~E1000_PHY_CTRL_D0A_LPLU; ew32(POEMB, data); return 0; } /** * e1000_set_d3_lplu_state_82574 - Sets low power link up state for D3 * @hw: pointer to the HW structure * @active: boolean used to enable/disable lplu * * The low power link up (lplu) state is set to the power management level D3 * when active is true, else clear lplu for D3. LPLU * is used during Dx states where the power conservation is most important. * During driver activity, SmartSpeed should be enabled so performance is * maintained. **/ static s32 e1000_set_d3_lplu_state_82574(struct e1000_hw *hw, bool active) { u32 data = er32(POEMB); if (!active) { data &= ~E1000_PHY_CTRL_NOND0A_LPLU; } else if ((hw->phy.autoneg_advertised == E1000_ALL_SPEED_DUPLEX) || (hw->phy.autoneg_advertised == E1000_ALL_NOT_GIG) || (hw->phy.autoneg_advertised == E1000_ALL_10_SPEED)) { data |= E1000_PHY_CTRL_NOND0A_LPLU; } ew32(POEMB, data); return 0; } /** * e1000_acquire_nvm_82571 - Request for access to the EEPROM * @hw: pointer to the HW structure * * To gain access to the EEPROM, first we must obtain a hardware semaphore. * Then for non-82573 hardware, set the EEPROM access request bit and wait * for EEPROM access grant bit. If the access grant bit is not set, release * hardware semaphore. **/ static s32 e1000_acquire_nvm_82571(struct e1000_hw *hw) { s32 ret_val; ret_val = e1000_get_hw_semaphore_82571(hw); if (ret_val) return ret_val; switch (hw->mac.type) { case e1000_82573: break; default: ret_val = e1000e_acquire_nvm(hw); break; } if (ret_val) e1000_put_hw_semaphore_82571(hw); return ret_val; } /** * e1000_release_nvm_82571 - Release exclusive access to EEPROM * @hw: pointer to the HW structure * * Stop any current commands to the EEPROM and clear the EEPROM request bit. **/ static void e1000_release_nvm_82571(struct e1000_hw *hw) { e1000e_release_nvm(hw); e1000_put_hw_semaphore_82571(hw); } /** * e1000_write_nvm_82571 - Write to EEPROM using appropriate interface * @hw: pointer to the HW structure * @offset: offset within the EEPROM to be written to * @words: number of words to write * @data: 16 bit word(s) to be written to the EEPROM * * For non-82573 silicon, write data to EEPROM at offset using SPI interface. * * If e1000e_update_nvm_checksum is not called after this function, the * EEPROM will most likely contain an invalid checksum. **/ static s32 e1000_write_nvm_82571(struct e1000_hw *hw, u16 offset, u16 words, u16 *data) { s32 ret_val; switch (hw->mac.type) { case e1000_82573: case e1000_82574: case e1000_82583: ret_val = e1000_write_nvm_eewr_82571(hw, offset, words, data); break; case e1000_82571: case e1000_82572: ret_val = e1000e_write_nvm_spi(hw, offset, words, data); break; default: ret_val = -E1000_ERR_NVM; break; } return ret_val; } /** * e1000_update_nvm_checksum_82571 - Update EEPROM checksum * @hw: pointer to the HW structure * * Updates the EEPROM checksum by reading/adding each word of the EEPROM * up to the checksum. Then calculates the EEPROM checksum and writes the * value to the EEPROM. **/ static s32 e1000_update_nvm_checksum_82571(struct e1000_hw *hw) { u32 eecd; s32 ret_val; u16 i; ret_val = e1000e_update_nvm_checksum_generic(hw); if (ret_val) return ret_val; /* If our nvm is an EEPROM, then we're done * otherwise, commit the checksum to the flash NVM. */ if (hw->nvm.type != e1000_nvm_flash_hw) return 0; /* Check for pending operations. */ for (i = 0; i < E1000_FLASH_UPDATES; i++) { usleep_range(1000, 2000); if (!(er32(EECD) & E1000_EECD_FLUPD)) break; } if (i == E1000_FLASH_UPDATES) return -E1000_ERR_NVM; /* Reset the firmware if using STM opcode. */ if ((er32(FLOP) & 0xFF00) == E1000_STM_OPCODE) { /* The enabling of and the actual reset must be done * in two write cycles. */ ew32(HICR, E1000_HICR_FW_RESET_ENABLE); e1e_flush(); ew32(HICR, E1000_HICR_FW_RESET); } /* Commit the write to flash */ eecd = er32(EECD) | E1000_EECD_FLUPD; ew32(EECD, eecd); for (i = 0; i < E1000_FLASH_UPDATES; i++) { usleep_range(1000, 2000); if (!(er32(EECD) & E1000_EECD_FLUPD)) break; } if (i == E1000_FLASH_UPDATES) return -E1000_ERR_NVM; return 0; } /** * e1000_validate_nvm_checksum_82571 - Validate EEPROM checksum * @hw: pointer to the HW structure * * Calculates the EEPROM checksum by reading/adding each word of the EEPROM * and then verifies that the sum of the EEPROM is equal to 0xBABA. **/ static s32 e1000_validate_nvm_checksum_82571(struct e1000_hw *hw) { if (hw->nvm.type == e1000_nvm_flash_hw) e1000_fix_nvm_checksum_82571(hw); return e1000e_validate_nvm_checksum_generic(hw); } /** * e1000_write_nvm_eewr_82571 - Write to EEPROM for 82573 silicon * @hw: pointer to the HW structure * @offset: offset within the EEPROM to be written to * @words: number of words to write * @data: 16 bit word(s) to be written to the EEPROM * * After checking for invalid values, poll the EEPROM to ensure the previous * command has completed before trying to write the next word. After write * poll for completion. * * If e1000e_update_nvm_checksum is not called after this function, the * EEPROM will most likely contain an invalid checksum. **/ static s32 e1000_write_nvm_eewr_82571(struct e1000_hw *hw, u16 offset, u16 words, u16 *data) { struct e1000_nvm_info *nvm = &hw->nvm; u32 i, eewr = 0; s32 ret_val = 0; /* A check for invalid values: offset too large, too many words, * and not enough words. */ if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) || (words == 0)) { e_dbg("nvm parameter(s) out of bounds\n"); return -E1000_ERR_NVM; } for (i = 0; i < words; i++) { eewr = ((data[i] << E1000_NVM_RW_REG_DATA) | ((offset + i) << E1000_NVM_RW_ADDR_SHIFT) | E1000_NVM_RW_REG_START); ret_val = e1000e_poll_eerd_eewr_done(hw, E1000_NVM_POLL_WRITE); if (ret_val) break; ew32(EEWR, eewr); ret_val = e1000e_poll_eerd_eewr_done(hw, E1000_NVM_POLL_WRITE); if (ret_val) break; } return ret_val; } /** * e1000_get_cfg_done_82571 - Poll for configuration done * @hw: pointer to the HW structure * * Reads the management control register for the config done bit to be set. **/ static s32 e1000_get_cfg_done_82571(struct e1000_hw *hw) { s32 timeout = PHY_CFG_TIMEOUT; while (timeout) { if (er32(EEMNGCTL) & E1000_NVM_CFG_DONE_PORT_0) break; usleep_range(1000, 2000); timeout--; } if (!timeout) { e_dbg("MNG configuration cycle has not completed.\n"); return -E1000_ERR_RESET; } return 0; } /** * e1000_set_d0_lplu_state_82571 - Set Low Power Linkup D0 state * @hw: pointer to the HW structure * @active: true to enable LPLU, false to disable * * Sets the LPLU D0 state according to the active flag. When activating LPLU * this function also disables smart speed and vice versa. LPLU will not be * activated unless the device autonegotiation advertisement meets standards * of either 10 or 10/100 or 10/100/1000 at all duplexes. This is a function * pointer entry point only called by PHY setup routines. **/ static s32 e1000_set_d0_lplu_state_82571(struct e1000_hw *hw, bool active) { struct e1000_phy_info *phy = &hw->phy; s32 ret_val; u16 data; ret_val = e1e_rphy(hw, IGP02E1000_PHY_POWER_MGMT, &data); if (ret_val) return ret_val; if (active) { data |= IGP02E1000_PM_D0_LPLU; ret_val = e1e_wphy(hw, IGP02E1000_PHY_POWER_MGMT, data); if (ret_val) return ret_val; /* When LPLU is enabled, we should disable SmartSpeed */ ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CONFIG, &data); if (ret_val) return ret_val; data &= ~IGP01E1000_PSCFR_SMART_SPEED; ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CONFIG, data); if (ret_val) return ret_val; } else { data &= ~IGP02E1000_PM_D0_LPLU; ret_val = e1e_wphy(hw, IGP02E1000_PHY_POWER_MGMT, data); if (ret_val) return ret_val; /* LPLU and SmartSpeed are mutually exclusive. LPLU is used * during Dx states where the power conservation is most * important. During driver activity we should enable * SmartSpeed, so performance is maintained. */ if (phy->smart_speed == e1000_smart_speed_on) { ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CONFIG, &data); if (ret_val) return ret_val; data |= IGP01E1000_PSCFR_SMART_SPEED; ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CONFIG, data); if (ret_val) return ret_val; } else if (phy->smart_speed == e1000_smart_speed_off) { ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CONFIG, &data); if (ret_val) return ret_val; data &= ~IGP01E1000_PSCFR_SMART_SPEED; ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CONFIG, data); if (ret_val) return ret_val; } } return 0; } /** * e1000_reset_hw_82571 - Reset hardware * @hw: pointer to the HW structure * * This resets the hardware into a known state. **/ static s32 e1000_reset_hw_82571(struct e1000_hw *hw) { u32 ctrl, ctrl_ext, eecd, tctl; s32 ret_val; /* Prevent the PCI-E bus from sticking if there is no TLP connection * on the last TLP read/write transaction when MAC is reset. */ ret_val = e1000e_disable_pcie_master(hw); if (ret_val) e_dbg("PCI-E Master disable polling has failed.\n"); e_dbg("Masking off all interrupts\n"); ew32(IMC, 0xffffffff); ew32(RCTL, 0); tctl = er32(TCTL); tctl &= ~E1000_TCTL_EN; ew32(TCTL, tctl); e1e_flush(); usleep_range(10000, 11000); /* Must acquire the MDIO ownership before MAC reset. * Ownership defaults to firmware after a reset. */ switch (hw->mac.type) { case e1000_82573: ret_val = e1000_get_hw_semaphore_82573(hw); break; case e1000_82574: case e1000_82583: ret_val = e1000_get_hw_semaphore_82574(hw); break; default: break; } ctrl = er32(CTRL); e_dbg("Issuing a global reset to MAC\n"); ew32(CTRL, ctrl | E1000_CTRL_RST); /* Must release MDIO ownership and mutex after MAC reset. */ switch (hw->mac.type) { case e1000_82573: /* Release mutex only if the hw semaphore is acquired */ if (!ret_val) e1000_put_hw_semaphore_82573(hw); break; case e1000_82574: case e1000_82583: /* Release mutex only if the hw semaphore is acquired */ if (!ret_val) e1000_put_hw_semaphore_82574(hw); break; default: break; } if (hw->nvm.type == e1000_nvm_flash_hw) { usleep_range(10, 20); ctrl_ext = er32(CTRL_EXT); ctrl_ext |= E1000_CTRL_EXT_EE_RST; ew32(CTRL_EXT, ctrl_ext); e1e_flush(); } ret_val = e1000e_get_auto_rd_done(hw); if (ret_val) /* We don't want to continue accessing MAC registers. */ return ret_val; /* Phy configuration from NVM just starts after EECD_AUTO_RD is set. * Need to wait for Phy configuration completion before accessing * NVM and Phy. */ switch (hw->mac.type) { case e1000_82571: case e1000_82572: /* REQ and GNT bits need to be cleared when using AUTO_RD * to access the EEPROM. */ eecd = er32(EECD); eecd &= ~(E1000_EECD_REQ | E1000_EECD_GNT); ew32(EECD, eecd); break; case e1000_82573: case e1000_82574: case e1000_82583: msleep(25); break; default: break; } /* Clear any pending interrupt events. */ ew32(IMC, 0xffffffff); er32(ICR); if (hw->mac.type == e1000_82571) { /* Install any alternate MAC address into RAR0 */ ret_val = e1000_check_alt_mac_addr_generic(hw); if (ret_val) return ret_val; e1000e_set_laa_state_82571(hw, true); } /* Reinitialize the 82571 serdes link state machine */ if (hw->phy.media_type == e1000_media_type_internal_serdes) hw->mac.serdes_link_state = e1000_serdes_link_down; return 0; } /** * e1000_init_hw_82571 - Initialize hardware * @hw: pointer to the HW structure * * This inits the hardware readying it for operation. **/ static s32 e1000_init_hw_82571(struct e1000_hw *hw) { struct e1000_mac_info *mac = &hw->mac; u32 reg_data; s32 ret_val; u16 i, rar_count = mac->rar_entry_count; e1000_initialize_hw_bits_82571(hw); /* Initialize identification LED */ ret_val = mac->ops.id_led_init(hw); /* An error is not fatal and we should not stop init due to this */ if (ret_val) e_dbg("Error initializing identification LED\n"); /* Disabling VLAN filtering */ e_dbg("Initializing the IEEE VLAN\n"); mac->ops.clear_vfta(hw); /* Setup the receive address. * If, however, a locally administered address was assigned to the * 82571, we must reserve a RAR for it to work around an issue where * resetting one port will reload the MAC on the other port. */ if (e1000e_get_laa_state_82571(hw)) rar_count--; e1000e_init_rx_addrs(hw, rar_count); /* Zero out the Multicast HASH table */ e_dbg("Zeroing the MTA\n"); for (i = 0; i < mac->mta_reg_count; i++) E1000_WRITE_REG_ARRAY(hw, E1000_MTA, i, 0); /* Setup link and flow control */ ret_val = mac->ops.setup_link(hw); /* Set the transmit descriptor write-back policy */ reg_data = er32(TXDCTL(0)); reg_data = ((reg_data & ~E1000_TXDCTL_WTHRESH) | E1000_TXDCTL_FULL_TX_DESC_WB | E1000_TXDCTL_COUNT_DESC); ew32(TXDCTL(0), reg_data); /* ...for both queues. */ switch (mac->type) { case e1000_82573: e1000e_enable_tx_pkt_filtering(hw); fallthrough; case e1000_82574: case e1000_82583: reg_data = er32(GCR); reg_data |= E1000_GCR_L1_ACT_WITHOUT_L0S_RX; ew32(GCR, reg_data); break; default: reg_data = er32(TXDCTL(1)); reg_data = ((reg_data & ~E1000_TXDCTL_WTHRESH) | E1000_TXDCTL_FULL_TX_DESC_WB | E1000_TXDCTL_COUNT_DESC); ew32(TXDCTL(1), reg_data); break; } /* Clear all of the statistics registers (clear on read). It is * important that we do this after we have tried to establish link * because the symbol error count will increment wildly if there * is no link. */ e1000_clear_hw_cntrs_82571(hw); return ret_val; } /** * e1000_initialize_hw_bits_82571 - Initialize hardware-dependent bits * @hw: pointer to the HW structure * * Initializes required hardware-dependent bits needed for normal operation. **/ static void e1000_initialize_hw_bits_82571(struct e1000_hw *hw) { u32 reg; /* Transmit Descriptor Control 0 */ reg = er32(TXDCTL(0)); reg |= BIT(22); ew32(TXDCTL(0), reg); /* Transmit Descriptor Control 1 */ reg = er32(TXDCTL(1)); reg |= BIT(22); ew32(TXDCTL(1), reg); /* Transmit Arbitration Control 0 */ reg = er32(TARC(0)); reg &= ~(0xF << 27); /* 30:27 */ switch (hw->mac.type) { case e1000_82571: case e1000_82572: reg |= BIT(23) | BIT(24) | BIT(25) | BIT(26); break; case e1000_82574: case e1000_82583: reg |= BIT(26); break; default: break; } ew32(TARC(0), reg); /* Transmit Arbitration Control 1 */ reg = er32(TARC(1)); switch (hw->mac.type) { case e1000_82571: case e1000_82572: reg &= ~(BIT(29) | BIT(30)); reg |= BIT(22) | BIT(24) | BIT(25) | BIT(26); if (er32(TCTL) & E1000_TCTL_MULR) reg &= ~BIT(28); else reg |= BIT(28); ew32(TARC(1), reg); break; default: break; } /* Device Control */ switch (hw->mac.type) { case e1000_82573: case e1000_82574: case e1000_82583: reg = er32(CTRL); reg &= ~BIT(29); ew32(CTRL, reg); break; default: break; } /* Extended Device Control */ switch (hw->mac.type) { case e1000_82573: case e1000_82574: case e1000_82583: reg = er32(CTRL_EXT); reg &= ~BIT(23); reg |= BIT(22); ew32(CTRL_EXT, reg); break; default: break; } if (hw->mac.type == e1000_82571) { reg = er32(PBA_ECC); reg |= E1000_PBA_ECC_CORR_EN; ew32(PBA_ECC, reg); } /* Workaround for hardware errata. * Ensure that DMA Dynamic Clock gating is disabled on 82571 and 82572 */ if ((hw->mac.type == e1000_82571) || (hw->mac.type == e1000_82572)) { reg = er32(CTRL_EXT); reg &= ~E1000_CTRL_EXT_DMA_DYN_CLK_EN; ew32(CTRL_EXT, reg); } /* Disable IPv6 extension header parsing because some malformed * IPv6 headers can hang the Rx. */ if (hw->mac.type <= e1000_82573) { reg = er32(RFCTL); reg |= (E1000_RFCTL_IPV6_EX_DIS | E1000_RFCTL_NEW_IPV6_EXT_DIS); ew32(RFCTL, reg); } /* PCI-Ex Control Registers */ switch (hw->mac.type) { case e1000_82574: case e1000_82583: reg = er32(GCR); reg |= BIT(22); ew32(GCR, reg); /* Workaround for hardware errata. * apply workaround for hardware errata documented in errata * docs Fixes issue where some error prone or unreliable PCIe * completions are occurring, particularly with ASPM enabled. * Without fix, issue can cause Tx timeouts. */ reg = er32(GCR2); reg |= 1; ew32(GCR2, reg); break; default: break; } } /** * e1000_clear_vfta_82571 - Clear VLAN filter table * @hw: pointer to the HW structure * * Clears the register array which contains the VLAN filter table by * setting all the values to 0. **/ static void e1000_clear_vfta_82571(struct e1000_hw *hw) { u32 offset; u32 vfta_value = 0; u32 vfta_offset = 0; u32 vfta_bit_in_reg = 0; switch (hw->mac.type) { case e1000_82573: case e1000_82574: case e1000_82583: if (hw->mng_cookie.vlan_id != 0) { /* The VFTA is a 4096b bit-field, each identifying * a single VLAN ID. The following operations * determine which 32b entry (i.e. offset) into the * array we want to set the VLAN ID (i.e. bit) of * the manageability unit. */ vfta_offset = (hw->mng_cookie.vlan_id >> E1000_VFTA_ENTRY_SHIFT) & E1000_VFTA_ENTRY_MASK; vfta_bit_in_reg = BIT(hw->mng_cookie.vlan_id & E1000_VFTA_ENTRY_BIT_SHIFT_MASK); } break; default: break; } for (offset = 0; offset < E1000_VLAN_FILTER_TBL_SIZE; offset++) { /* If the offset we want to clear is the same offset of the * manageability VLAN ID, then clear all bits except that of * the manageability unit. */ vfta_value = (offset == vfta_offset) ? vfta_bit_in_reg : 0; E1000_WRITE_REG_ARRAY(hw, E1000_VFTA, offset, vfta_value); e1e_flush(); } } /** * e1000_check_mng_mode_82574 - Check manageability is enabled * @hw: pointer to the HW structure * * Reads the NVM Initialization Control Word 2 and returns true * (>0) if any manageability is enabled, else false (0). **/ static bool e1000_check_mng_mode_82574(struct e1000_hw *hw) { u16 data; e1000_read_nvm(hw, NVM_INIT_CONTROL2_REG, 1, &data); return (data & E1000_NVM_INIT_CTRL2_MNGM) != 0; } /** * e1000_led_on_82574 - Turn LED on * @hw: pointer to the HW structure * * Turn LED on. **/ static s32 e1000_led_on_82574(struct e1000_hw *hw) { u32 ctrl; u32 i; ctrl = hw->mac.ledctl_mode2; if (!(E1000_STATUS_LU & er32(STATUS))) { /* If no link, then turn LED on by setting the invert bit * for each LED that's "on" (0x0E) in ledctl_mode2. */ for (i = 0; i < 4; i++) if (((hw->mac.ledctl_mode2 >> (i * 8)) & 0xFF) == E1000_LEDCTL_MODE_LED_ON) ctrl |= (E1000_LEDCTL_LED0_IVRT << (i * 8)); } ew32(LEDCTL, ctrl); return 0; } /** * e1000_check_phy_82574 - check 82574 phy hung state * @hw: pointer to the HW structure * * Returns whether phy is hung or not **/ bool e1000_check_phy_82574(struct e1000_hw *hw) { u16 status_1kbt = 0; u16 receive_errors = 0; s32 ret_val; /* Read PHY Receive Error counter first, if its is max - all F's then * read the Base1000T status register If both are max then PHY is hung. */ ret_val = e1e_rphy(hw, E1000_RECEIVE_ERROR_COUNTER, &receive_errors); if (ret_val) return false; if (receive_errors == E1000_RECEIVE_ERROR_MAX) { ret_val = e1e_rphy(hw, E1000_BASE1000T_STATUS, &status_1kbt); if (ret_val) return false; if ((status_1kbt & E1000_IDLE_ERROR_COUNT_MASK) == E1000_IDLE_ERROR_COUNT_MASK) return true; } return false; } /** * e1000_setup_link_82571 - Setup flow control and link settings * @hw: pointer to the HW structure * * Determines which flow control settings to use, then configures flow * control. Calls the appropriate media-specific link configuration * function. Assuming the adapter has a valid link partner, a valid link * should be established. Assumes the hardware has previously been reset * and the transmitter and receiver are not enabled. **/ static s32 e1000_setup_link_82571(struct e1000_hw *hw) { /* 82573 does not have a word in the NVM to determine * the default flow control setting, so we explicitly * set it to full. */ switch (hw->mac.type) { case e1000_82573: case e1000_82574: case e1000_82583: if (hw->fc.requested_mode == e1000_fc_default) hw->fc.requested_mode = e1000_fc_full; break; default: break; } return e1000e_setup_link_generic(hw); } /** * e1000_setup_copper_link_82571 - Configure copper link settings * @hw: pointer to the HW structure * * Configures the link for auto-neg or forced speed and duplex. Then we check * for link, once link is established calls to configure collision distance * and flow control are called. **/ static s32 e1000_setup_copper_link_82571(struct e1000_hw *hw) { u32 ctrl; s32 ret_val; ctrl = er32(CTRL); ctrl |= E1000_CTRL_SLU; ctrl &= ~(E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX); ew32(CTRL, ctrl); switch (hw->phy.type) { case e1000_phy_m88: case e1000_phy_bm: ret_val = e1000e_copper_link_setup_m88(hw); break; case e1000_phy_igp_2: ret_val = e1000e_copper_link_setup_igp(hw); break; default: return -E1000_ERR_PHY; } if (ret_val) return ret_val; return e1000e_setup_copper_link(hw); } /** * e1000_setup_fiber_serdes_link_82571 - Setup link for fiber/serdes * @hw: pointer to the HW structure * * Configures collision distance and flow control for fiber and serdes links. * Upon successful setup, poll for link. **/ static s32 e1000_setup_fiber_serdes_link_82571(struct e1000_hw *hw) { switch (hw->mac.type) { case e1000_82571: case e1000_82572: /* If SerDes loopback mode is entered, there is no form * of reset to take the adapter out of that mode. So we * have to explicitly take the adapter out of loopback * mode. This prevents drivers from twiddling their thumbs * if another tool failed to take it out of loopback mode. */ ew32(SCTL, E1000_SCTL_DISABLE_SERDES_LOOPBACK); break; default: break; } return e1000e_setup_fiber_serdes_link(hw); } /** * e1000_check_for_serdes_link_82571 - Check for link (Serdes) * @hw: pointer to the HW structure * * Reports the link state as up or down. * * If autonegotiation is supported by the link partner, the link state is * determined by the result of autonegotiation. This is the most likely case. * If autonegotiation is not supported by the link partner, and the link * has a valid signal, force the link up. * * The link state is represented internally here by 4 states: * * 1) down * 2) autoneg_progress * 3) autoneg_complete (the link successfully autonegotiated) * 4) forced_up (the link has been forced up, it did not autonegotiate) * **/ static s32 e1000_check_for_serdes_link_82571(struct e1000_hw *hw) { struct e1000_mac_info *mac = &hw->mac; u32 rxcw; u32 ctrl; u32 status; u32 txcw; u32 i; s32 ret_val = 0; ctrl = er32(CTRL); status = er32(STATUS); er32(RXCW); /* SYNCH bit and IV bit are sticky */ usleep_range(10, 20); rxcw = er32(RXCW); if ((rxcw & E1000_RXCW_SYNCH) && !(rxcw & E1000_RXCW_IV)) { /* Receiver is synchronized with no invalid bits. */ switch (mac->serdes_link_state) { case e1000_serdes_link_autoneg_complete: if (!(status & E1000_STATUS_LU)) { /* We have lost link, retry autoneg before * reporting link failure */ mac->serdes_link_state = e1000_serdes_link_autoneg_progress; mac->serdes_has_link = false; e_dbg("AN_UP -> AN_PROG\n"); } else { mac->serdes_has_link = true; } break; case e1000_serdes_link_forced_up: /* If we are receiving /C/ ordered sets, re-enable * auto-negotiation in the TXCW register and disable * forced link in the Device Control register in an * attempt to auto-negotiate with our link partner. */ if (rxcw & E1000_RXCW_C) { /* Enable autoneg, and unforce link up */ ew32(TXCW, mac->txcw); ew32(CTRL, (ctrl & ~E1000_CTRL_SLU)); mac->serdes_link_state = e1000_serdes_link_autoneg_progress; mac->serdes_has_link = false; e_dbg("FORCED_UP -> AN_PROG\n"); } else { mac->serdes_has_link = true; } break; case e1000_serdes_link_autoneg_progress: if (rxcw & E1000_RXCW_C) { /* We received /C/ ordered sets, meaning the * link partner has autonegotiated, and we can * trust the Link Up (LU) status bit. */ if (status & E1000_STATUS_LU) { mac->serdes_link_state = e1000_serdes_link_autoneg_complete; e_dbg("AN_PROG -> AN_UP\n"); mac->serdes_has_link = true; } else { /* Autoneg completed, but failed. */ mac->serdes_link_state = e1000_serdes_link_down; e_dbg("AN_PROG -> DOWN\n"); } } else { /* The link partner did not autoneg. * Force link up and full duplex, and change * state to forced. */ ew32(TXCW, (mac->txcw & ~E1000_TXCW_ANE)); ctrl |= (E1000_CTRL_SLU | E1000_CTRL_FD); ew32(CTRL, ctrl); /* Configure Flow Control after link up. */ ret_val = e1000e_config_fc_after_link_up(hw); if (ret_val) { e_dbg("Error config flow control\n"); break; } mac->serdes_link_state = e1000_serdes_link_forced_up; mac->serdes_has_link = true; e_dbg("AN_PROG -> FORCED_UP\n"); } break; case e1000_serdes_link_down: default: /* The link was down but the receiver has now gained * valid sync, so lets see if we can bring the link * up. */ ew32(TXCW, mac->txcw); ew32(CTRL, (ctrl & ~E1000_CTRL_SLU)); mac->serdes_link_state = e1000_serdes_link_autoneg_progress; mac->serdes_has_link = false; e_dbg("DOWN -> AN_PROG\n"); break; } } else { if (!(rxcw & E1000_RXCW_SYNCH)) { mac->serdes_has_link = false; mac->serdes_link_state = e1000_serdes_link_down; e_dbg("ANYSTATE -> DOWN\n"); } else { /* Check several times, if SYNCH bit and CONFIG * bit both are consistently 1 then simply ignore * the IV bit and restart Autoneg */ for (i = 0; i < AN_RETRY_COUNT; i++) { usleep_range(10, 20); rxcw = er32(RXCW); if ((rxcw & E1000_RXCW_SYNCH) && (rxcw & E1000_RXCW_C)) continue; if (rxcw & E1000_RXCW_IV) { mac->serdes_has_link = false; mac->serdes_link_state = e1000_serdes_link_down; e_dbg("ANYSTATE -> DOWN\n"); break; } } if (i == AN_RETRY_COUNT) { txcw = er32(TXCW); txcw |= E1000_TXCW_ANE; ew32(TXCW, txcw); mac->serdes_link_state = e1000_serdes_link_autoneg_progress; mac->serdes_has_link = false; e_dbg("ANYSTATE -> AN_PROG\n"); } } } return ret_val; } /** * e1000_valid_led_default_82571 - Verify a valid default LED config * @hw: pointer to the HW structure * @data: pointer to the NVM (EEPROM) * * Read the EEPROM for the current default LED configuration. If the * LED configuration is not valid, set to a valid LED configuration. **/ static s32 e1000_valid_led_default_82571(struct e1000_hw *hw, u16 *data) { s32 ret_val; ret_val = e1000_read_nvm(hw, NVM_ID_LED_SETTINGS, 1, data); if (ret_val) { e_dbg("NVM Read Error\n"); return ret_val; } switch (hw->mac.type) { case e1000_82573: case e1000_82574: case e1000_82583: if (*data == ID_LED_RESERVED_F746) *data = ID_LED_DEFAULT_82573; break; default: if (*data == ID_LED_RESERVED_0000 || *data == ID_LED_RESERVED_FFFF) *data = ID_LED_DEFAULT; break; } return 0; } /** * e1000e_get_laa_state_82571 - Get locally administered address state * @hw: pointer to the HW structure * * Retrieve and return the current locally administered address state. **/ bool e1000e_get_laa_state_82571(struct e1000_hw *hw) { if (hw->mac.type != e1000_82571) return false; return hw->dev_spec.e82571.laa_is_present; } /** * e1000e_set_laa_state_82571 - Set locally administered address state * @hw: pointer to the HW structure * @state: enable/disable locally administered address * * Enable/Disable the current locally administered address state. **/ void e1000e_set_laa_state_82571(struct e1000_hw *hw, bool state) { if (hw->mac.type != e1000_82571) return; hw->dev_spec.e82571.laa_is_present = state; /* If workaround is activated... */ if (state) /* Hold a copy of the LAA in RAR[14] This is done so that * between the time RAR[0] gets clobbered and the time it * gets fixed, the actual LAA is in one of the RARs and no * incoming packets directed to this port are dropped. * Eventually the LAA will be in RAR[0] and RAR[14]. */ hw->mac.ops.rar_set(hw, hw->mac.addr, hw->mac.rar_entry_count - 1); } /** * e1000_fix_nvm_checksum_82571 - Fix EEPROM checksum * @hw: pointer to the HW structure * * Verifies that the EEPROM has completed the update. After updating the * EEPROM, we need to check bit 15 in work 0x23 for the checksum fix. If * the checksum fix is not implemented, we need to set the bit and update * the checksum. Otherwise, if bit 15 is set and the checksum is incorrect, * we need to return bad checksum. **/ static s32 e1000_fix_nvm_checksum_82571(struct e1000_hw *hw) { struct e1000_nvm_info *nvm = &hw->nvm; s32 ret_val; u16 data; if (nvm->type != e1000_nvm_flash_hw) return 0; /* Check bit 4 of word 10h. If it is 0, firmware is done updating * 10h-12h. Checksum may need to be fixed. */ ret_val = e1000_read_nvm(hw, 0x10, 1, &data); if (ret_val) return ret_val; if (!(data & 0x10)) { /* Read 0x23 and check bit 15. This bit is a 1 * when the checksum has already been fixed. If * the checksum is still wrong and this bit is a * 1, we need to return bad checksum. Otherwise, * we need to set this bit to a 1 and update the * checksum. */ ret_val = e1000_read_nvm(hw, 0x23, 1, &data); if (ret_val) return ret_val; if (!(data & 0x8000)) { data |= 0x8000; ret_val = e1000_write_nvm(hw, 0x23, 1, &data); if (ret_val) return ret_val; ret_val = e1000e_update_nvm_checksum(hw); if (ret_val) return ret_val; } } return 0; } /** * e1000_read_mac_addr_82571 - Read device MAC address * @hw: pointer to the HW structure **/ static s32 e1000_read_mac_addr_82571(struct e1000_hw *hw) { if (hw->mac.type == e1000_82571) { s32 ret_val; /* If there's an alternate MAC address place it in RAR0 * so that it will override the Si installed default perm * address. */ ret_val = e1000_check_alt_mac_addr_generic(hw); if (ret_val) return ret_val; } return e1000_read_mac_addr_generic(hw); } /** * e1000_power_down_phy_copper_82571 - Remove link during PHY power down * @hw: pointer to the HW structure * * In the case of a PHY power down to save power, or to turn off link during a * driver unload, or wake on lan is not enabled, remove the link. **/ static void e1000_power_down_phy_copper_82571(struct e1000_hw *hw) { struct e1000_phy_info *phy = &hw->phy; struct e1000_mac_info *mac = &hw->mac; if (!phy->ops.check_reset_block) return; /* If the management interface is not enabled, then power down */ if (!(mac->ops.check_mng_mode(hw) || phy->ops.check_reset_block(hw))) e1000_power_down_phy_copper(hw); } /** * e1000_clear_hw_cntrs_82571 - Clear device specific hardware counters * @hw: pointer to the HW structure * * Clears the hardware counters by reading the counter registers. **/ static void e1000_clear_hw_cntrs_82571(struct e1000_hw *hw) { e1000e_clear_hw_cntrs_base(hw); er32(PRC64); er32(PRC127); er32(PRC255); er32(PRC511); er32(PRC1023); er32(PRC1522); er32(PTC64); er32(PTC127); er32(PTC255); er32(PTC511); er32(PTC1023); er32(PTC1522); er32(ALGNERRC); er32(RXERRC); er32(TNCRS); er32(CEXTERR); er32(TSCTC); er32(TSCTFC); er32(MGTPRC); er32(MGTPDC); er32(MGTPTC); er32(IAC); er32(ICRXOC); er32(ICRXPTC); er32(ICRXATC); er32(ICTXPTC); er32(ICTXATC); er32(ICTXQEC); er32(ICTXQMTC); er32(ICRXDMTC); } static const struct e1000_mac_operations e82571_mac_ops = { /* .check_mng_mode: mac type dependent */ /* .check_for_link: media type dependent */ .id_led_init = e1000e_id_led_init_generic, .cleanup_led = e1000e_cleanup_led_generic, .clear_hw_cntrs = e1000_clear_hw_cntrs_82571, .get_bus_info = e1000e_get_bus_info_pcie, .set_lan_id = e1000_set_lan_id_multi_port_pcie, /* .get_link_up_info: media type dependent */ /* .led_on: mac type dependent */ .led_off = e1000e_led_off_generic, .update_mc_addr_list = e1000e_update_mc_addr_list_generic, .write_vfta = e1000_write_vfta_generic, .clear_vfta = e1000_clear_vfta_82571, .reset_hw = e1000_reset_hw_82571, .init_hw = e1000_init_hw_82571, .setup_link = e1000_setup_link_82571, /* .setup_physical_interface: media type dependent */ .setup_led = e1000e_setup_led_generic, .config_collision_dist = e1000e_config_collision_dist_generic, .read_mac_addr = e1000_read_mac_addr_82571, .rar_set = e1000e_rar_set_generic, .rar_get_count = e1000e_rar_get_count_generic, }; static const struct e1000_phy_operations e82_phy_ops_igp = { .acquire = e1000_get_hw_semaphore_82571, .check_polarity = e1000_check_polarity_igp, .check_reset_block = e1000e_check_reset_block_generic, .commit = NULL, .force_speed_duplex = e1000e_phy_force_speed_duplex_igp, .get_cfg_done = e1000_get_cfg_done_82571, .get_cable_length = e1000e_get_cable_length_igp_2, .get_info = e1000e_get_phy_info_igp, .read_reg = e1000e_read_phy_reg_igp, .release = e1000_put_hw_semaphore_82571, .reset = e1000e_phy_hw_reset_generic, .set_d0_lplu_state = e1000_set_d0_lplu_state_82571, .set_d3_lplu_state = e1000e_set_d3_lplu_state, .write_reg = e1000e_write_phy_reg_igp, .cfg_on_link_up = NULL, }; static const struct e1000_phy_operations e82_phy_ops_m88 = { .acquire = e1000_get_hw_semaphore_82571, .check_polarity = e1000_check_polarity_m88, .check_reset_block = e1000e_check_reset_block_generic, .commit = e1000e_phy_sw_reset, .force_speed_duplex = e1000e_phy_force_speed_duplex_m88, .get_cfg_done = e1000e_get_cfg_done_generic, .get_cable_length = e1000e_get_cable_length_m88, .get_info = e1000e_get_phy_info_m88, .read_reg = e1000e_read_phy_reg_m88, .release = e1000_put_hw_semaphore_82571, .reset = e1000e_phy_hw_reset_generic, .set_d0_lplu_state = e1000_set_d0_lplu_state_82571, .set_d3_lplu_state = e1000e_set_d3_lplu_state, .write_reg = e1000e_write_phy_reg_m88, .cfg_on_link_up = NULL, }; static const struct e1000_phy_operations e82_phy_ops_bm = { .acquire = e1000_get_hw_semaphore_82571, .check_polarity = e1000_check_polarity_m88, .check_reset_block = e1000e_check_reset_block_generic, .commit = e1000e_phy_sw_reset, .force_speed_duplex = e1000e_phy_force_speed_duplex_m88, .get_cfg_done = e1000e_get_cfg_done_generic, .get_cable_length = e1000e_get_cable_length_m88, .get_info = e1000e_get_phy_info_m88, .read_reg = e1000e_read_phy_reg_bm2, .release = e1000_put_hw_semaphore_82571, .reset = e1000e_phy_hw_reset_generic, .set_d0_lplu_state = e1000_set_d0_lplu_state_82571, .set_d3_lplu_state = e1000e_set_d3_lplu_state, .write_reg = e1000e_write_phy_reg_bm2, .cfg_on_link_up = NULL, }; static const struct e1000_nvm_operations e82571_nvm_ops = { .acquire = e1000_acquire_nvm_82571, .read = e1000e_read_nvm_eerd, .release = e1000_release_nvm_82571, .reload = e1000e_reload_nvm_generic, .update = e1000_update_nvm_checksum_82571, .valid_led_default = e1000_valid_led_default_82571, .validate = e1000_validate_nvm_checksum_82571, .write = e1000_write_nvm_82571, }; const struct e1000_info e1000_82571_info = { .mac = e1000_82571, .flags = FLAG_HAS_HW_VLAN_FILTER | FLAG_HAS_JUMBO_FRAMES | FLAG_HAS_WOL | FLAG_APME_IN_CTRL3 | FLAG_HAS_CTRLEXT_ON_LOAD | FLAG_HAS_SMART_POWER_DOWN | FLAG_RESET_OVERWRITES_LAA /* errata */ | FLAG_TARC_SPEED_MODE_BIT /* errata */ | FLAG_APME_CHECK_PORT_B, .flags2 = FLAG2_DISABLE_ASPM_L1 /* errata 13 */ | FLAG2_DMA_BURST, .pba = 38, .max_hw_frame_size = DEFAULT_JUMBO, .get_variants = e1000_get_variants_82571, .mac_ops = &e82571_mac_ops, .phy_ops = &e82_phy_ops_igp, .nvm_ops = &e82571_nvm_ops, }; const struct e1000_info e1000_82572_info = { .mac = e1000_82572, .flags = FLAG_HAS_HW_VLAN_FILTER | FLAG_HAS_JUMBO_FRAMES | FLAG_HAS_WOL | FLAG_APME_IN_CTRL3 | FLAG_HAS_CTRLEXT_ON_LOAD | FLAG_TARC_SPEED_MODE_BIT, /* errata */ .flags2 = FLAG2_DISABLE_ASPM_L1 /* errata 13 */ | FLAG2_DMA_BURST, .pba = 38, .max_hw_frame_size = DEFAULT_JUMBO, .get_variants = e1000_get_variants_82571, .mac_ops = &e82571_mac_ops, .phy_ops = &e82_phy_ops_igp, .nvm_ops = &e82571_nvm_ops, }; const struct e1000_info e1000_82573_info = { .mac = e1000_82573, .flags = FLAG_HAS_HW_VLAN_FILTER | FLAG_HAS_WOL | FLAG_APME_IN_CTRL3 | FLAG_HAS_SMART_POWER_DOWN | FLAG_HAS_AMT | FLAG_HAS_SWSM_ON_LOAD, .flags2 = FLAG2_DISABLE_ASPM_L1 | FLAG2_DISABLE_ASPM_L0S, .pba = 20, .max_hw_frame_size = VLAN_ETH_FRAME_LEN + ETH_FCS_LEN, .get_variants = e1000_get_variants_82571, .mac_ops = &e82571_mac_ops, .phy_ops = &e82_phy_ops_m88, .nvm_ops = &e82571_nvm_ops, }; const struct e1000_info e1000_82574_info = { .mac = e1000_82574, .flags = FLAG_HAS_HW_VLAN_FILTER | FLAG_HAS_MSIX | FLAG_HAS_JUMBO_FRAMES | FLAG_HAS_WOL | FLAG_HAS_HW_TIMESTAMP | FLAG_APME_IN_CTRL3 | FLAG_HAS_SMART_POWER_DOWN | FLAG_HAS_AMT | FLAG_HAS_CTRLEXT_ON_LOAD, .flags2 = FLAG2_CHECK_PHY_HANG | FLAG2_DISABLE_ASPM_L0S | FLAG2_DISABLE_ASPM_L1 | FLAG2_NO_DISABLE_RX | FLAG2_DMA_BURST | FLAG2_CHECK_SYSTIM_OVERFLOW, .pba = 32, .max_hw_frame_size = DEFAULT_JUMBO, .get_variants = e1000_get_variants_82571, .mac_ops = &e82571_mac_ops, .phy_ops = &e82_phy_ops_bm, .nvm_ops = &e82571_nvm_ops, }; const struct e1000_info e1000_82583_info = { .mac = e1000_82583, .flags = FLAG_HAS_HW_VLAN_FILTER | FLAG_HAS_WOL | FLAG_HAS_HW_TIMESTAMP | FLAG_APME_IN_CTRL3 | FLAG_HAS_SMART_POWER_DOWN | FLAG_HAS_AMT | FLAG_HAS_JUMBO_FRAMES | FLAG_HAS_CTRLEXT_ON_LOAD, .flags2 = FLAG2_DISABLE_ASPM_L0S | FLAG2_DISABLE_ASPM_L1 | FLAG2_NO_DISABLE_RX | FLAG2_CHECK_SYSTIM_OVERFLOW, .pba = 32, .max_hw_frame_size = DEFAULT_JUMBO, .get_variants = e1000_get_variants_82571, .mac_ops = &e82571_mac_ops, .phy_ops = &e82_phy_ops_bm, .nvm_ops = &e82571_nvm_ops, };
linux-master
drivers/net/ethernet/intel/e1000e/82571.c
// SPDX-License-Identifier: GPL-2.0 /* Copyright(c) 1999 - 2018 Intel Corporation. */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/module.h> #include <linux/types.h> #include <linux/init.h> #include <linux/pci.h> #include <linux/vmalloc.h> #include <linux/pagemap.h> #include <linux/delay.h> #include <linux/netdevice.h> #include <linux/interrupt.h> #include <linux/tcp.h> #include <linux/ipv6.h> #include <linux/slab.h> #include <net/checksum.h> #include <net/ip6_checksum.h> #include <linux/ethtool.h> #include <linux/if_vlan.h> #include <linux/cpu.h> #include <linux/smp.h> #include <linux/pm_qos.h> #include <linux/pm_runtime.h> #include <linux/prefetch.h> #include <linux/suspend.h> #include "e1000.h" #define CREATE_TRACE_POINTS #include "e1000e_trace.h" char e1000e_driver_name[] = "e1000e"; #define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV|NETIF_MSG_PROBE|NETIF_MSG_LINK) static int debug = -1; module_param(debug, int, 0); MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)"); static const struct e1000_info *e1000_info_tbl[] = { [board_82571] = &e1000_82571_info, [board_82572] = &e1000_82572_info, [board_82573] = &e1000_82573_info, [board_82574] = &e1000_82574_info, [board_82583] = &e1000_82583_info, [board_80003es2lan] = &e1000_es2_info, [board_ich8lan] = &e1000_ich8_info, [board_ich9lan] = &e1000_ich9_info, [board_ich10lan] = &e1000_ich10_info, [board_pchlan] = &e1000_pch_info, [board_pch2lan] = &e1000_pch2_info, [board_pch_lpt] = &e1000_pch_lpt_info, [board_pch_spt] = &e1000_pch_spt_info, [board_pch_cnp] = &e1000_pch_cnp_info, [board_pch_tgp] = &e1000_pch_tgp_info, [board_pch_adp] = &e1000_pch_adp_info, [board_pch_mtp] = &e1000_pch_mtp_info, }; struct e1000_reg_info { u32 ofs; char *name; }; static const struct e1000_reg_info e1000_reg_info_tbl[] = { /* General Registers */ {E1000_CTRL, "CTRL"}, {E1000_STATUS, "STATUS"}, {E1000_CTRL_EXT, "CTRL_EXT"}, /* Interrupt Registers */ {E1000_ICR, "ICR"}, /* Rx Registers */ {E1000_RCTL, "RCTL"}, {E1000_RDLEN(0), "RDLEN"}, {E1000_RDH(0), "RDH"}, {E1000_RDT(0), "RDT"}, {E1000_RDTR, "RDTR"}, {E1000_RXDCTL(0), "RXDCTL"}, {E1000_ERT, "ERT"}, {E1000_RDBAL(0), "RDBAL"}, {E1000_RDBAH(0), "RDBAH"}, {E1000_RDFH, "RDFH"}, {E1000_RDFT, "RDFT"}, {E1000_RDFHS, "RDFHS"}, {E1000_RDFTS, "RDFTS"}, {E1000_RDFPC, "RDFPC"}, /* Tx Registers */ {E1000_TCTL, "TCTL"}, {E1000_TDBAL(0), "TDBAL"}, {E1000_TDBAH(0), "TDBAH"}, {E1000_TDLEN(0), "TDLEN"}, {E1000_TDH(0), "TDH"}, {E1000_TDT(0), "TDT"}, {E1000_TIDV, "TIDV"}, {E1000_TXDCTL(0), "TXDCTL"}, {E1000_TADV, "TADV"}, {E1000_TARC(0), "TARC"}, {E1000_TDFH, "TDFH"}, {E1000_TDFT, "TDFT"}, {E1000_TDFHS, "TDFHS"}, {E1000_TDFTS, "TDFTS"}, {E1000_TDFPC, "TDFPC"}, /* List Terminator */ {0, NULL} }; /** * __ew32_prepare - prepare to write to MAC CSR register on certain parts * @hw: pointer to the HW structure * * When updating the MAC CSR registers, the Manageability Engine (ME) could * be accessing the registers at the same time. Normally, this is handled in * h/w by an arbiter but on some parts there is a bug that acknowledges Host * accesses later than it should which could result in the register to have * an incorrect value. Workaround this by checking the FWSM register which * has bit 24 set while ME is accessing MAC CSR registers, wait if it is set * and try again a number of times. **/ static void __ew32_prepare(struct e1000_hw *hw) { s32 i = E1000_ICH_FWSM_PCIM2PCI_COUNT; while ((er32(FWSM) & E1000_ICH_FWSM_PCIM2PCI) && --i) udelay(50); } void __ew32(struct e1000_hw *hw, unsigned long reg, u32 val) { if (hw->adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA) __ew32_prepare(hw); writel(val, hw->hw_addr + reg); } /** * e1000_regdump - register printout routine * @hw: pointer to the HW structure * @reginfo: pointer to the register info table **/ static void e1000_regdump(struct e1000_hw *hw, struct e1000_reg_info *reginfo) { int n = 0; char rname[16]; u32 regs[8]; switch (reginfo->ofs) { case E1000_RXDCTL(0): for (n = 0; n < 2; n++) regs[n] = __er32(hw, E1000_RXDCTL(n)); break; case E1000_TXDCTL(0): for (n = 0; n < 2; n++) regs[n] = __er32(hw, E1000_TXDCTL(n)); break; case E1000_TARC(0): for (n = 0; n < 2; n++) regs[n] = __er32(hw, E1000_TARC(n)); break; default: pr_info("%-15s %08x\n", reginfo->name, __er32(hw, reginfo->ofs)); return; } snprintf(rname, 16, "%s%s", reginfo->name, "[0-1]"); pr_info("%-15s %08x %08x\n", rname, regs[0], regs[1]); } static void e1000e_dump_ps_pages(struct e1000_adapter *adapter, struct e1000_buffer *bi) { int i; struct e1000_ps_page *ps_page; for (i = 0; i < adapter->rx_ps_pages; i++) { ps_page = &bi->ps_pages[i]; if (ps_page->page) { pr_info("packet dump for ps_page %d:\n", i); print_hex_dump(KERN_INFO, "", DUMP_PREFIX_ADDRESS, 16, 1, page_address(ps_page->page), PAGE_SIZE, true); } } } /** * e1000e_dump - Print registers, Tx-ring and Rx-ring * @adapter: board private structure **/ static void e1000e_dump(struct e1000_adapter *adapter) { struct net_device *netdev = adapter->netdev; struct e1000_hw *hw = &adapter->hw; struct e1000_reg_info *reginfo; struct e1000_ring *tx_ring = adapter->tx_ring; struct e1000_tx_desc *tx_desc; struct my_u0 { __le64 a; __le64 b; } *u0; struct e1000_buffer *buffer_info; struct e1000_ring *rx_ring = adapter->rx_ring; union e1000_rx_desc_packet_split *rx_desc_ps; union e1000_rx_desc_extended *rx_desc; struct my_u1 { __le64 a; __le64 b; __le64 c; __le64 d; } *u1; u32 staterr; int i = 0; if (!netif_msg_hw(adapter)) return; /* Print netdevice Info */ if (netdev) { dev_info(&adapter->pdev->dev, "Net device Info\n"); pr_info("Device Name state trans_start\n"); pr_info("%-15s %016lX %016lX\n", netdev->name, netdev->state, dev_trans_start(netdev)); } /* Print Registers */ dev_info(&adapter->pdev->dev, "Register Dump\n"); pr_info(" Register Name Value\n"); for (reginfo = (struct e1000_reg_info *)e1000_reg_info_tbl; reginfo->name; reginfo++) { e1000_regdump(hw, reginfo); } /* Print Tx Ring Summary */ if (!netdev || !netif_running(netdev)) return; dev_info(&adapter->pdev->dev, "Tx Ring Summary\n"); pr_info("Queue [NTU] [NTC] [bi(ntc)->dma ] leng ntw timestamp\n"); buffer_info = &tx_ring->buffer_info[tx_ring->next_to_clean]; pr_info(" %5d %5X %5X %016llX %04X %3X %016llX\n", 0, tx_ring->next_to_use, tx_ring->next_to_clean, (unsigned long long)buffer_info->dma, buffer_info->length, buffer_info->next_to_watch, (unsigned long long)buffer_info->time_stamp); /* Print Tx Ring */ if (!netif_msg_tx_done(adapter)) goto rx_ring_summary; dev_info(&adapter->pdev->dev, "Tx Ring Dump\n"); /* Transmit Descriptor Formats - DEXT[29] is 0 (Legacy) or 1 (Extended) * * Legacy Transmit Descriptor * +--------------------------------------------------------------+ * 0 | Buffer Address [63:0] (Reserved on Write Back) | * +--------------------------------------------------------------+ * 8 | Special | CSS | Status | CMD | CSO | Length | * +--------------------------------------------------------------+ * 63 48 47 36 35 32 31 24 23 16 15 0 * * Extended Context Descriptor (DTYP=0x0) for TSO or checksum offload * 63 48 47 40 39 32 31 16 15 8 7 0 * +----------------------------------------------------------------+ * 0 | TUCSE | TUCS0 | TUCSS | IPCSE | IPCS0 | IPCSS | * +----------------------------------------------------------------+ * 8 | MSS | HDRLEN | RSV | STA | TUCMD | DTYP | PAYLEN | * +----------------------------------------------------------------+ * 63 48 47 40 39 36 35 32 31 24 23 20 19 0 * * Extended Data Descriptor (DTYP=0x1) * +----------------------------------------------------------------+ * 0 | Buffer Address [63:0] | * +----------------------------------------------------------------+ * 8 | VLAN tag | POPTS | Rsvd | Status | Command | DTYP | DTALEN | * +----------------------------------------------------------------+ * 63 48 47 40 39 36 35 32 31 24 23 20 19 0 */ pr_info("Tl[desc] [address 63:0 ] [SpeCssSCmCsLen] [bi->dma ] leng ntw timestamp bi->skb <-- Legacy format\n"); pr_info("Tc[desc] [Ce CoCsIpceCoS] [MssHlRSCm0Plen] [bi->dma ] leng ntw timestamp bi->skb <-- Ext Context format\n"); pr_info("Td[desc] [address 63:0 ] [VlaPoRSCm1Dlen] [bi->dma ] leng ntw timestamp bi->skb <-- Ext Data format\n"); for (i = 0; tx_ring->desc && (i < tx_ring->count); i++) { const char *next_desc; tx_desc = E1000_TX_DESC(*tx_ring, i); buffer_info = &tx_ring->buffer_info[i]; u0 = (struct my_u0 *)tx_desc; if (i == tx_ring->next_to_use && i == tx_ring->next_to_clean) next_desc = " NTC/U"; else if (i == tx_ring->next_to_use) next_desc = " NTU"; else if (i == tx_ring->next_to_clean) next_desc = " NTC"; else next_desc = ""; pr_info("T%c[0x%03X] %016llX %016llX %016llX %04X %3X %016llX %p%s\n", (!(le64_to_cpu(u0->b) & BIT(29)) ? 'l' : ((le64_to_cpu(u0->b) & BIT(20)) ? 'd' : 'c')), i, (unsigned long long)le64_to_cpu(u0->a), (unsigned long long)le64_to_cpu(u0->b), (unsigned long long)buffer_info->dma, buffer_info->length, buffer_info->next_to_watch, (unsigned long long)buffer_info->time_stamp, buffer_info->skb, next_desc); if (netif_msg_pktdata(adapter) && buffer_info->skb) print_hex_dump(KERN_INFO, "", DUMP_PREFIX_ADDRESS, 16, 1, buffer_info->skb->data, buffer_info->skb->len, true); } /* Print Rx Ring Summary */ rx_ring_summary: dev_info(&adapter->pdev->dev, "Rx Ring Summary\n"); pr_info("Queue [NTU] [NTC]\n"); pr_info(" %5d %5X %5X\n", 0, rx_ring->next_to_use, rx_ring->next_to_clean); /* Print Rx Ring */ if (!netif_msg_rx_status(adapter)) return; dev_info(&adapter->pdev->dev, "Rx Ring Dump\n"); switch (adapter->rx_ps_pages) { case 1: case 2: case 3: /* [Extended] Packet Split Receive Descriptor Format * * +-----------------------------------------------------+ * 0 | Buffer Address 0 [63:0] | * +-----------------------------------------------------+ * 8 | Buffer Address 1 [63:0] | * +-----------------------------------------------------+ * 16 | Buffer Address 2 [63:0] | * +-----------------------------------------------------+ * 24 | Buffer Address 3 [63:0] | * +-----------------------------------------------------+ */ pr_info("R [desc] [buffer 0 63:0 ] [buffer 1 63:0 ] [buffer 2 63:0 ] [buffer 3 63:0 ] [bi->dma ] [bi->skb] <-- Ext Pkt Split format\n"); /* [Extended] Receive Descriptor (Write-Back) Format * * 63 48 47 32 31 13 12 8 7 4 3 0 * +------------------------------------------------------+ * 0 | Packet | IP | Rsvd | MRQ | Rsvd | MRQ RSS | * | Checksum | Ident | | Queue | | Type | * +------------------------------------------------------+ * 8 | VLAN Tag | Length | Extended Error | Extended Status | * +------------------------------------------------------+ * 63 48 47 32 31 20 19 0 */ pr_info("RWB[desc] [ck ipid mrqhsh] [vl l0 ee es] [ l3 l2 l1 hs] [reserved ] ---------------- [bi->skb] <-- Ext Rx Write-Back format\n"); for (i = 0; i < rx_ring->count; i++) { const char *next_desc; buffer_info = &rx_ring->buffer_info[i]; rx_desc_ps = E1000_RX_DESC_PS(*rx_ring, i); u1 = (struct my_u1 *)rx_desc_ps; staterr = le32_to_cpu(rx_desc_ps->wb.middle.status_error); if (i == rx_ring->next_to_use) next_desc = " NTU"; else if (i == rx_ring->next_to_clean) next_desc = " NTC"; else next_desc = ""; if (staterr & E1000_RXD_STAT_DD) { /* Descriptor Done */ pr_info("%s[0x%03X] %016llX %016llX %016llX %016llX ---------------- %p%s\n", "RWB", i, (unsigned long long)le64_to_cpu(u1->a), (unsigned long long)le64_to_cpu(u1->b), (unsigned long long)le64_to_cpu(u1->c), (unsigned long long)le64_to_cpu(u1->d), buffer_info->skb, next_desc); } else { pr_info("%s[0x%03X] %016llX %016llX %016llX %016llX %016llX %p%s\n", "R ", i, (unsigned long long)le64_to_cpu(u1->a), (unsigned long long)le64_to_cpu(u1->b), (unsigned long long)le64_to_cpu(u1->c), (unsigned long long)le64_to_cpu(u1->d), (unsigned long long)buffer_info->dma, buffer_info->skb, next_desc); if (netif_msg_pktdata(adapter)) e1000e_dump_ps_pages(adapter, buffer_info); } } break; default: case 0: /* Extended Receive Descriptor (Read) Format * * +-----------------------------------------------------+ * 0 | Buffer Address [63:0] | * +-----------------------------------------------------+ * 8 | Reserved | * +-----------------------------------------------------+ */ pr_info("R [desc] [buf addr 63:0 ] [reserved 63:0 ] [bi->dma ] [bi->skb] <-- Ext (Read) format\n"); /* Extended Receive Descriptor (Write-Back) Format * * 63 48 47 32 31 24 23 4 3 0 * +------------------------------------------------------+ * | RSS Hash | | | | * 0 +-------------------+ Rsvd | Reserved | MRQ RSS | * | Packet | IP | | | Type | * | Checksum | Ident | | | | * +------------------------------------------------------+ * 8 | VLAN Tag | Length | Extended Error | Extended Status | * +------------------------------------------------------+ * 63 48 47 32 31 20 19 0 */ pr_info("RWB[desc] [cs ipid mrq] [vt ln xe xs] [bi->skb] <-- Ext (Write-Back) format\n"); for (i = 0; i < rx_ring->count; i++) { const char *next_desc; buffer_info = &rx_ring->buffer_info[i]; rx_desc = E1000_RX_DESC_EXT(*rx_ring, i); u1 = (struct my_u1 *)rx_desc; staterr = le32_to_cpu(rx_desc->wb.upper.status_error); if (i == rx_ring->next_to_use) next_desc = " NTU"; else if (i == rx_ring->next_to_clean) next_desc = " NTC"; else next_desc = ""; if (staterr & E1000_RXD_STAT_DD) { /* Descriptor Done */ pr_info("%s[0x%03X] %016llX %016llX ---------------- %p%s\n", "RWB", i, (unsigned long long)le64_to_cpu(u1->a), (unsigned long long)le64_to_cpu(u1->b), buffer_info->skb, next_desc); } else { pr_info("%s[0x%03X] %016llX %016llX %016llX %p%s\n", "R ", i, (unsigned long long)le64_to_cpu(u1->a), (unsigned long long)le64_to_cpu(u1->b), (unsigned long long)buffer_info->dma, buffer_info->skb, next_desc); if (netif_msg_pktdata(adapter) && buffer_info->skb) print_hex_dump(KERN_INFO, "", DUMP_PREFIX_ADDRESS, 16, 1, buffer_info->skb->data, adapter->rx_buffer_len, true); } } } } /** * e1000_desc_unused - calculate if we have unused descriptors * @ring: pointer to ring struct to perform calculation on **/ static int e1000_desc_unused(struct e1000_ring *ring) { if (ring->next_to_clean > ring->next_to_use) return ring->next_to_clean - ring->next_to_use - 1; return ring->count + ring->next_to_clean - ring->next_to_use - 1; } /** * e1000e_systim_to_hwtstamp - convert system time value to hw time stamp * @adapter: board private structure * @hwtstamps: time stamp structure to update * @systim: unsigned 64bit system time value. * * Convert the system time value stored in the RX/TXSTMP registers into a * hwtstamp which can be used by the upper level time stamping functions. * * The 'systim_lock' spinlock is used to protect the consistency of the * system time value. This is needed because reading the 64 bit time * value involves reading two 32 bit registers. The first read latches the * value. **/ static void e1000e_systim_to_hwtstamp(struct e1000_adapter *adapter, struct skb_shared_hwtstamps *hwtstamps, u64 systim) { u64 ns; unsigned long flags; spin_lock_irqsave(&adapter->systim_lock, flags); ns = timecounter_cyc2time(&adapter->tc, systim); spin_unlock_irqrestore(&adapter->systim_lock, flags); memset(hwtstamps, 0, sizeof(*hwtstamps)); hwtstamps->hwtstamp = ns_to_ktime(ns); } /** * e1000e_rx_hwtstamp - utility function which checks for Rx time stamp * @adapter: board private structure * @status: descriptor extended error and status field * @skb: particular skb to include time stamp * * If the time stamp is valid, convert it into the timecounter ns value * and store that result into the shhwtstamps structure which is passed * up the network stack. **/ static void e1000e_rx_hwtstamp(struct e1000_adapter *adapter, u32 status, struct sk_buff *skb) { struct e1000_hw *hw = &adapter->hw; u64 rxstmp; if (!(adapter->flags & FLAG_HAS_HW_TIMESTAMP) || !(status & E1000_RXDEXT_STATERR_TST) || !(er32(TSYNCRXCTL) & E1000_TSYNCRXCTL_VALID)) return; /* The Rx time stamp registers contain the time stamp. No other * received packet will be time stamped until the Rx time stamp * registers are read. Because only one packet can be time stamped * at a time, the register values must belong to this packet and * therefore none of the other additional attributes need to be * compared. */ rxstmp = (u64)er32(RXSTMPL); rxstmp |= (u64)er32(RXSTMPH) << 32; e1000e_systim_to_hwtstamp(adapter, skb_hwtstamps(skb), rxstmp); adapter->flags2 &= ~FLAG2_CHECK_RX_HWTSTAMP; } /** * e1000_receive_skb - helper function to handle Rx indications * @adapter: board private structure * @netdev: pointer to netdev struct * @staterr: descriptor extended error and status field as written by hardware * @vlan: descriptor vlan field as written by hardware (no le/be conversion) * @skb: pointer to sk_buff to be indicated to stack **/ static void e1000_receive_skb(struct e1000_adapter *adapter, struct net_device *netdev, struct sk_buff *skb, u32 staterr, __le16 vlan) { u16 tag = le16_to_cpu(vlan); e1000e_rx_hwtstamp(adapter, staterr, skb); skb->protocol = eth_type_trans(skb, netdev); if (staterr & E1000_RXD_STAT_VP) __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), tag); napi_gro_receive(&adapter->napi, skb); } /** * e1000_rx_checksum - Receive Checksum Offload * @adapter: board private structure * @status_err: receive descriptor status and error fields * @skb: socket buffer with received data **/ static void e1000_rx_checksum(struct e1000_adapter *adapter, u32 status_err, struct sk_buff *skb) { u16 status = (u16)status_err; u8 errors = (u8)(status_err >> 24); skb_checksum_none_assert(skb); /* Rx checksum disabled */ if (!(adapter->netdev->features & NETIF_F_RXCSUM)) return; /* Ignore Checksum bit is set */ if (status & E1000_RXD_STAT_IXSM) return; /* TCP/UDP checksum error bit or IP checksum error bit is set */ if (errors & (E1000_RXD_ERR_TCPE | E1000_RXD_ERR_IPE)) { /* let the stack verify checksum errors */ adapter->hw_csum_err++; return; } /* TCP/UDP Checksum has not been calculated */ if (!(status & (E1000_RXD_STAT_TCPCS | E1000_RXD_STAT_UDPCS))) return; /* It must be a TCP or UDP packet with a valid checksum */ skb->ip_summed = CHECKSUM_UNNECESSARY; adapter->hw_csum_good++; } static void e1000e_update_rdt_wa(struct e1000_ring *rx_ring, unsigned int i) { struct e1000_adapter *adapter = rx_ring->adapter; struct e1000_hw *hw = &adapter->hw; __ew32_prepare(hw); writel(i, rx_ring->tail); if (unlikely(i != readl(rx_ring->tail))) { u32 rctl = er32(RCTL); ew32(RCTL, rctl & ~E1000_RCTL_EN); e_err("ME firmware caused invalid RDT - resetting\n"); schedule_work(&adapter->reset_task); } } static void e1000e_update_tdt_wa(struct e1000_ring *tx_ring, unsigned int i) { struct e1000_adapter *adapter = tx_ring->adapter; struct e1000_hw *hw = &adapter->hw; __ew32_prepare(hw); writel(i, tx_ring->tail); if (unlikely(i != readl(tx_ring->tail))) { u32 tctl = er32(TCTL); ew32(TCTL, tctl & ~E1000_TCTL_EN); e_err("ME firmware caused invalid TDT - resetting\n"); schedule_work(&adapter->reset_task); } } /** * e1000_alloc_rx_buffers - Replace used receive buffers * @rx_ring: Rx descriptor ring * @cleaned_count: number to reallocate * @gfp: flags for allocation **/ static void e1000_alloc_rx_buffers(struct e1000_ring *rx_ring, int cleaned_count, gfp_t gfp) { struct e1000_adapter *adapter = rx_ring->adapter; struct net_device *netdev = adapter->netdev; struct pci_dev *pdev = adapter->pdev; union e1000_rx_desc_extended *rx_desc; struct e1000_buffer *buffer_info; struct sk_buff *skb; unsigned int i; unsigned int bufsz = adapter->rx_buffer_len; i = rx_ring->next_to_use; buffer_info = &rx_ring->buffer_info[i]; while (cleaned_count--) { skb = buffer_info->skb; if (skb) { skb_trim(skb, 0); goto map_skb; } skb = __netdev_alloc_skb_ip_align(netdev, bufsz, gfp); if (!skb) { /* Better luck next round */ adapter->alloc_rx_buff_failed++; break; } buffer_info->skb = skb; map_skb: buffer_info->dma = dma_map_single(&pdev->dev, skb->data, adapter->rx_buffer_len, DMA_FROM_DEVICE); if (dma_mapping_error(&pdev->dev, buffer_info->dma)) { dev_err(&pdev->dev, "Rx DMA map failed\n"); adapter->rx_dma_failed++; break; } rx_desc = E1000_RX_DESC_EXT(*rx_ring, i); rx_desc->read.buffer_addr = cpu_to_le64(buffer_info->dma); if (unlikely(!(i & (E1000_RX_BUFFER_WRITE - 1)))) { /* Force memory writes to complete before letting h/w * know there are new descriptors to fetch. (Only * applicable for weak-ordered memory model archs, * such as IA-64). */ wmb(); if (adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA) e1000e_update_rdt_wa(rx_ring, i); else writel(i, rx_ring->tail); } i++; if (i == rx_ring->count) i = 0; buffer_info = &rx_ring->buffer_info[i]; } rx_ring->next_to_use = i; } /** * e1000_alloc_rx_buffers_ps - Replace used receive buffers; packet split * @rx_ring: Rx descriptor ring * @cleaned_count: number to reallocate * @gfp: flags for allocation **/ static void e1000_alloc_rx_buffers_ps(struct e1000_ring *rx_ring, int cleaned_count, gfp_t gfp) { struct e1000_adapter *adapter = rx_ring->adapter; struct net_device *netdev = adapter->netdev; struct pci_dev *pdev = adapter->pdev; union e1000_rx_desc_packet_split *rx_desc; struct e1000_buffer *buffer_info; struct e1000_ps_page *ps_page; struct sk_buff *skb; unsigned int i, j; i = rx_ring->next_to_use; buffer_info = &rx_ring->buffer_info[i]; while (cleaned_count--) { rx_desc = E1000_RX_DESC_PS(*rx_ring, i); for (j = 0; j < PS_PAGE_BUFFERS; j++) { ps_page = &buffer_info->ps_pages[j]; if (j >= adapter->rx_ps_pages) { /* all unused desc entries get hw null ptr */ rx_desc->read.buffer_addr[j + 1] = ~cpu_to_le64(0); continue; } if (!ps_page->page) { ps_page->page = alloc_page(gfp); if (!ps_page->page) { adapter->alloc_rx_buff_failed++; goto no_buffers; } ps_page->dma = dma_map_page(&pdev->dev, ps_page->page, 0, PAGE_SIZE, DMA_FROM_DEVICE); if (dma_mapping_error(&pdev->dev, ps_page->dma)) { dev_err(&adapter->pdev->dev, "Rx DMA page map failed\n"); adapter->rx_dma_failed++; goto no_buffers; } } /* Refresh the desc even if buffer_addrs * didn't change because each write-back * erases this info. */ rx_desc->read.buffer_addr[j + 1] = cpu_to_le64(ps_page->dma); } skb = __netdev_alloc_skb_ip_align(netdev, adapter->rx_ps_bsize0, gfp); if (!skb) { adapter->alloc_rx_buff_failed++; break; } buffer_info->skb = skb; buffer_info->dma = dma_map_single(&pdev->dev, skb->data, adapter->rx_ps_bsize0, DMA_FROM_DEVICE); if (dma_mapping_error(&pdev->dev, buffer_info->dma)) { dev_err(&pdev->dev, "Rx DMA map failed\n"); adapter->rx_dma_failed++; /* cleanup skb */ dev_kfree_skb_any(skb); buffer_info->skb = NULL; break; } rx_desc->read.buffer_addr[0] = cpu_to_le64(buffer_info->dma); if (unlikely(!(i & (E1000_RX_BUFFER_WRITE - 1)))) { /* Force memory writes to complete before letting h/w * know there are new descriptors to fetch. (Only * applicable for weak-ordered memory model archs, * such as IA-64). */ wmb(); if (adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA) e1000e_update_rdt_wa(rx_ring, i << 1); else writel(i << 1, rx_ring->tail); } i++; if (i == rx_ring->count) i = 0; buffer_info = &rx_ring->buffer_info[i]; } no_buffers: rx_ring->next_to_use = i; } /** * e1000_alloc_jumbo_rx_buffers - Replace used jumbo receive buffers * @rx_ring: Rx descriptor ring * @cleaned_count: number of buffers to allocate this pass * @gfp: flags for allocation **/ static void e1000_alloc_jumbo_rx_buffers(struct e1000_ring *rx_ring, int cleaned_count, gfp_t gfp) { struct e1000_adapter *adapter = rx_ring->adapter; struct net_device *netdev = adapter->netdev; struct pci_dev *pdev = adapter->pdev; union e1000_rx_desc_extended *rx_desc; struct e1000_buffer *buffer_info; struct sk_buff *skb; unsigned int i; unsigned int bufsz = 256 - 16; /* for skb_reserve */ i = rx_ring->next_to_use; buffer_info = &rx_ring->buffer_info[i]; while (cleaned_count--) { skb = buffer_info->skb; if (skb) { skb_trim(skb, 0); goto check_page; } skb = __netdev_alloc_skb_ip_align(netdev, bufsz, gfp); if (unlikely(!skb)) { /* Better luck next round */ adapter->alloc_rx_buff_failed++; break; } buffer_info->skb = skb; check_page: /* allocate a new page if necessary */ if (!buffer_info->page) { buffer_info->page = alloc_page(gfp); if (unlikely(!buffer_info->page)) { adapter->alloc_rx_buff_failed++; break; } } if (!buffer_info->dma) { buffer_info->dma = dma_map_page(&pdev->dev, buffer_info->page, 0, PAGE_SIZE, DMA_FROM_DEVICE); if (dma_mapping_error(&pdev->dev, buffer_info->dma)) { adapter->alloc_rx_buff_failed++; break; } } rx_desc = E1000_RX_DESC_EXT(*rx_ring, i); rx_desc->read.buffer_addr = cpu_to_le64(buffer_info->dma); if (unlikely(++i == rx_ring->count)) i = 0; buffer_info = &rx_ring->buffer_info[i]; } if (likely(rx_ring->next_to_use != i)) { rx_ring->next_to_use = i; if (unlikely(i-- == 0)) i = (rx_ring->count - 1); /* Force memory writes to complete before letting h/w * know there are new descriptors to fetch. (Only * applicable for weak-ordered memory model archs, * such as IA-64). */ wmb(); if (adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA) e1000e_update_rdt_wa(rx_ring, i); else writel(i, rx_ring->tail); } } static inline void e1000_rx_hash(struct net_device *netdev, __le32 rss, struct sk_buff *skb) { if (netdev->features & NETIF_F_RXHASH) skb_set_hash(skb, le32_to_cpu(rss), PKT_HASH_TYPE_L3); } /** * e1000_clean_rx_irq - Send received data up the network stack * @rx_ring: Rx descriptor ring * @work_done: output parameter for indicating completed work * @work_to_do: how many packets we can clean * * the return value indicates whether actual cleaning was done, there * is no guarantee that everything was cleaned **/ static bool e1000_clean_rx_irq(struct e1000_ring *rx_ring, int *work_done, int work_to_do) { struct e1000_adapter *adapter = rx_ring->adapter; struct net_device *netdev = adapter->netdev; struct pci_dev *pdev = adapter->pdev; struct e1000_hw *hw = &adapter->hw; union e1000_rx_desc_extended *rx_desc, *next_rxd; struct e1000_buffer *buffer_info, *next_buffer; u32 length, staterr; unsigned int i; int cleaned_count = 0; bool cleaned = false; unsigned int total_rx_bytes = 0, total_rx_packets = 0; i = rx_ring->next_to_clean; rx_desc = E1000_RX_DESC_EXT(*rx_ring, i); staterr = le32_to_cpu(rx_desc->wb.upper.status_error); buffer_info = &rx_ring->buffer_info[i]; while (staterr & E1000_RXD_STAT_DD) { struct sk_buff *skb; if (*work_done >= work_to_do) break; (*work_done)++; dma_rmb(); /* read descriptor and rx_buffer_info after status DD */ skb = buffer_info->skb; buffer_info->skb = NULL; prefetch(skb->data - NET_IP_ALIGN); i++; if (i == rx_ring->count) i = 0; next_rxd = E1000_RX_DESC_EXT(*rx_ring, i); prefetch(next_rxd); next_buffer = &rx_ring->buffer_info[i]; cleaned = true; cleaned_count++; dma_unmap_single(&pdev->dev, buffer_info->dma, adapter->rx_buffer_len, DMA_FROM_DEVICE); buffer_info->dma = 0; length = le16_to_cpu(rx_desc->wb.upper.length); /* !EOP means multiple descriptors were used to store a single * packet, if that's the case we need to toss it. In fact, we * need to toss every packet with the EOP bit clear and the * next frame that _does_ have the EOP bit set, as it is by * definition only a frame fragment */ if (unlikely(!(staterr & E1000_RXD_STAT_EOP))) adapter->flags2 |= FLAG2_IS_DISCARDING; if (adapter->flags2 & FLAG2_IS_DISCARDING) { /* All receives must fit into a single buffer */ e_dbg("Receive packet consumed multiple buffers\n"); /* recycle */ buffer_info->skb = skb; if (staterr & E1000_RXD_STAT_EOP) adapter->flags2 &= ~FLAG2_IS_DISCARDING; goto next_desc; } if (unlikely((staterr & E1000_RXDEXT_ERR_FRAME_ERR_MASK) && !(netdev->features & NETIF_F_RXALL))) { /* recycle */ buffer_info->skb = skb; goto next_desc; } /* adjust length to remove Ethernet CRC */ if (!(adapter->flags2 & FLAG2_CRC_STRIPPING)) { /* If configured to store CRC, don't subtract FCS, * but keep the FCS bytes out of the total_rx_bytes * counter */ if (netdev->features & NETIF_F_RXFCS) total_rx_bytes -= 4; else length -= 4; } total_rx_bytes += length; total_rx_packets++; /* code added for copybreak, this should improve * performance for small packets with large amounts * of reassembly being done in the stack */ if (length < copybreak) { struct sk_buff *new_skb = napi_alloc_skb(&adapter->napi, length); if (new_skb) { skb_copy_to_linear_data_offset(new_skb, -NET_IP_ALIGN, (skb->data - NET_IP_ALIGN), (length + NET_IP_ALIGN)); /* save the skb in buffer_info as good */ buffer_info->skb = skb; skb = new_skb; } /* else just continue with the old one */ } /* end copybreak code */ skb_put(skb, length); /* Receive Checksum Offload */ e1000_rx_checksum(adapter, staterr, skb); e1000_rx_hash(netdev, rx_desc->wb.lower.hi_dword.rss, skb); e1000_receive_skb(adapter, netdev, skb, staterr, rx_desc->wb.upper.vlan); next_desc: rx_desc->wb.upper.status_error &= cpu_to_le32(~0xFF); /* return some buffers to hardware, one at a time is too slow */ if (cleaned_count >= E1000_RX_BUFFER_WRITE) { adapter->alloc_rx_buf(rx_ring, cleaned_count, GFP_ATOMIC); cleaned_count = 0; } /* use prefetched values */ rx_desc = next_rxd; buffer_info = next_buffer; staterr = le32_to_cpu(rx_desc->wb.upper.status_error); } rx_ring->next_to_clean = i; cleaned_count = e1000_desc_unused(rx_ring); if (cleaned_count) adapter->alloc_rx_buf(rx_ring, cleaned_count, GFP_ATOMIC); adapter->total_rx_bytes += total_rx_bytes; adapter->total_rx_packets += total_rx_packets; return cleaned; } static void e1000_put_txbuf(struct e1000_ring *tx_ring, struct e1000_buffer *buffer_info, bool drop) { struct e1000_adapter *adapter = tx_ring->adapter; if (buffer_info->dma) { if (buffer_info->mapped_as_page) dma_unmap_page(&adapter->pdev->dev, buffer_info->dma, buffer_info->length, DMA_TO_DEVICE); else dma_unmap_single(&adapter->pdev->dev, buffer_info->dma, buffer_info->length, DMA_TO_DEVICE); buffer_info->dma = 0; } if (buffer_info->skb) { if (drop) dev_kfree_skb_any(buffer_info->skb); else dev_consume_skb_any(buffer_info->skb); buffer_info->skb = NULL; } buffer_info->time_stamp = 0; } static void e1000_print_hw_hang(struct work_struct *work) { struct e1000_adapter *adapter = container_of(work, struct e1000_adapter, print_hang_task); struct net_device *netdev = adapter->netdev; struct e1000_ring *tx_ring = adapter->tx_ring; unsigned int i = tx_ring->next_to_clean; unsigned int eop = tx_ring->buffer_info[i].next_to_watch; struct e1000_tx_desc *eop_desc = E1000_TX_DESC(*tx_ring, eop); struct e1000_hw *hw = &adapter->hw; u16 phy_status, phy_1000t_status, phy_ext_status; u16 pci_status; if (test_bit(__E1000_DOWN, &adapter->state)) return; if (!adapter->tx_hang_recheck && (adapter->flags2 & FLAG2_DMA_BURST)) { /* May be block on write-back, flush and detect again * flush pending descriptor writebacks to memory */ ew32(TIDV, adapter->tx_int_delay | E1000_TIDV_FPD); /* execute the writes immediately */ e1e_flush(); /* Due to rare timing issues, write to TIDV again to ensure * the write is successful */ ew32(TIDV, adapter->tx_int_delay | E1000_TIDV_FPD); /* execute the writes immediately */ e1e_flush(); adapter->tx_hang_recheck = true; return; } adapter->tx_hang_recheck = false; if (er32(TDH(0)) == er32(TDT(0))) { e_dbg("false hang detected, ignoring\n"); return; } /* Real hang detected */ netif_stop_queue(netdev); e1e_rphy(hw, MII_BMSR, &phy_status); e1e_rphy(hw, MII_STAT1000, &phy_1000t_status); e1e_rphy(hw, MII_ESTATUS, &phy_ext_status); pci_read_config_word(adapter->pdev, PCI_STATUS, &pci_status); /* detected Hardware unit hang */ e_err("Detected Hardware Unit Hang:\n" " TDH <%x>\n" " TDT <%x>\n" " next_to_use <%x>\n" " next_to_clean <%x>\n" "buffer_info[next_to_clean]:\n" " time_stamp <%lx>\n" " next_to_watch <%x>\n" " jiffies <%lx>\n" " next_to_watch.status <%x>\n" "MAC Status <%x>\n" "PHY Status <%x>\n" "PHY 1000BASE-T Status <%x>\n" "PHY Extended Status <%x>\n" "PCI Status <%x>\n", readl(tx_ring->head), readl(tx_ring->tail), tx_ring->next_to_use, tx_ring->next_to_clean, tx_ring->buffer_info[eop].time_stamp, eop, jiffies, eop_desc->upper.fields.status, er32(STATUS), phy_status, phy_1000t_status, phy_ext_status, pci_status); e1000e_dump(adapter); /* Suggest workaround for known h/w issue */ if ((hw->mac.type == e1000_pchlan) && (er32(CTRL) & E1000_CTRL_TFCE)) e_err("Try turning off Tx pause (flow control) via ethtool\n"); } /** * e1000e_tx_hwtstamp_work - check for Tx time stamp * @work: pointer to work struct * * This work function polls the TSYNCTXCTL valid bit to determine when a * timestamp has been taken for the current stored skb. The timestamp must * be for this skb because only one such packet is allowed in the queue. */ static void e1000e_tx_hwtstamp_work(struct work_struct *work) { struct e1000_adapter *adapter = container_of(work, struct e1000_adapter, tx_hwtstamp_work); struct e1000_hw *hw = &adapter->hw; if (er32(TSYNCTXCTL) & E1000_TSYNCTXCTL_VALID) { struct sk_buff *skb = adapter->tx_hwtstamp_skb; struct skb_shared_hwtstamps shhwtstamps; u64 txstmp; txstmp = er32(TXSTMPL); txstmp |= (u64)er32(TXSTMPH) << 32; e1000e_systim_to_hwtstamp(adapter, &shhwtstamps, txstmp); /* Clear the global tx_hwtstamp_skb pointer and force writes * prior to notifying the stack of a Tx timestamp. */ adapter->tx_hwtstamp_skb = NULL; wmb(); /* force write prior to skb_tstamp_tx */ skb_tstamp_tx(skb, &shhwtstamps); dev_consume_skb_any(skb); } else if (time_after(jiffies, adapter->tx_hwtstamp_start + adapter->tx_timeout_factor * HZ)) { dev_kfree_skb_any(adapter->tx_hwtstamp_skb); adapter->tx_hwtstamp_skb = NULL; adapter->tx_hwtstamp_timeouts++; e_warn("clearing Tx timestamp hang\n"); } else { /* reschedule to check later */ schedule_work(&adapter->tx_hwtstamp_work); } } /** * e1000_clean_tx_irq - Reclaim resources after transmit completes * @tx_ring: Tx descriptor ring * * the return value indicates whether actual cleaning was done, there * is no guarantee that everything was cleaned **/ static bool e1000_clean_tx_irq(struct e1000_ring *tx_ring) { struct e1000_adapter *adapter = tx_ring->adapter; struct net_device *netdev = adapter->netdev; struct e1000_hw *hw = &adapter->hw; struct e1000_tx_desc *tx_desc, *eop_desc; struct e1000_buffer *buffer_info; unsigned int i, eop; unsigned int count = 0; unsigned int total_tx_bytes = 0, total_tx_packets = 0; unsigned int bytes_compl = 0, pkts_compl = 0; i = tx_ring->next_to_clean; eop = tx_ring->buffer_info[i].next_to_watch; eop_desc = E1000_TX_DESC(*tx_ring, eop); while ((eop_desc->upper.data & cpu_to_le32(E1000_TXD_STAT_DD)) && (count < tx_ring->count)) { bool cleaned = false; dma_rmb(); /* read buffer_info after eop_desc */ for (; !cleaned; count++) { tx_desc = E1000_TX_DESC(*tx_ring, i); buffer_info = &tx_ring->buffer_info[i]; cleaned = (i == eop); if (cleaned) { total_tx_packets += buffer_info->segs; total_tx_bytes += buffer_info->bytecount; if (buffer_info->skb) { bytes_compl += buffer_info->skb->len; pkts_compl++; } } e1000_put_txbuf(tx_ring, buffer_info, false); tx_desc->upper.data = 0; i++; if (i == tx_ring->count) i = 0; } if (i == tx_ring->next_to_use) break; eop = tx_ring->buffer_info[i].next_to_watch; eop_desc = E1000_TX_DESC(*tx_ring, eop); } tx_ring->next_to_clean = i; netdev_completed_queue(netdev, pkts_compl, bytes_compl); #define TX_WAKE_THRESHOLD 32 if (count && netif_carrier_ok(netdev) && e1000_desc_unused(tx_ring) >= TX_WAKE_THRESHOLD) { /* Make sure that anybody stopping the queue after this * sees the new next_to_clean. */ smp_mb(); if (netif_queue_stopped(netdev) && !(test_bit(__E1000_DOWN, &adapter->state))) { netif_wake_queue(netdev); ++adapter->restart_queue; } } if (adapter->detect_tx_hung) { /* Detect a transmit hang in hardware, this serializes the * check with the clearing of time_stamp and movement of i */ adapter->detect_tx_hung = false; if (tx_ring->buffer_info[i].time_stamp && time_after(jiffies, tx_ring->buffer_info[i].time_stamp + (adapter->tx_timeout_factor * HZ)) && !(er32(STATUS) & E1000_STATUS_TXOFF)) schedule_work(&adapter->print_hang_task); else adapter->tx_hang_recheck = false; } adapter->total_tx_bytes += total_tx_bytes; adapter->total_tx_packets += total_tx_packets; return count < tx_ring->count; } /** * e1000_clean_rx_irq_ps - Send received data up the network stack; packet split * @rx_ring: Rx descriptor ring * @work_done: output parameter for indicating completed work * @work_to_do: how many packets we can clean * * the return value indicates whether actual cleaning was done, there * is no guarantee that everything was cleaned **/ static bool e1000_clean_rx_irq_ps(struct e1000_ring *rx_ring, int *work_done, int work_to_do) { struct e1000_adapter *adapter = rx_ring->adapter; struct e1000_hw *hw = &adapter->hw; union e1000_rx_desc_packet_split *rx_desc, *next_rxd; struct net_device *netdev = adapter->netdev; struct pci_dev *pdev = adapter->pdev; struct e1000_buffer *buffer_info, *next_buffer; struct e1000_ps_page *ps_page; struct sk_buff *skb; unsigned int i, j; u32 length, staterr; int cleaned_count = 0; bool cleaned = false; unsigned int total_rx_bytes = 0, total_rx_packets = 0; i = rx_ring->next_to_clean; rx_desc = E1000_RX_DESC_PS(*rx_ring, i); staterr = le32_to_cpu(rx_desc->wb.middle.status_error); buffer_info = &rx_ring->buffer_info[i]; while (staterr & E1000_RXD_STAT_DD) { if (*work_done >= work_to_do) break; (*work_done)++; skb = buffer_info->skb; dma_rmb(); /* read descriptor and rx_buffer_info after status DD */ /* in the packet split case this is header only */ prefetch(skb->data - NET_IP_ALIGN); i++; if (i == rx_ring->count) i = 0; next_rxd = E1000_RX_DESC_PS(*rx_ring, i); prefetch(next_rxd); next_buffer = &rx_ring->buffer_info[i]; cleaned = true; cleaned_count++; dma_unmap_single(&pdev->dev, buffer_info->dma, adapter->rx_ps_bsize0, DMA_FROM_DEVICE); buffer_info->dma = 0; /* see !EOP comment in other Rx routine */ if (!(staterr & E1000_RXD_STAT_EOP)) adapter->flags2 |= FLAG2_IS_DISCARDING; if (adapter->flags2 & FLAG2_IS_DISCARDING) { e_dbg("Packet Split buffers didn't pick up the full packet\n"); dev_kfree_skb_irq(skb); if (staterr & E1000_RXD_STAT_EOP) adapter->flags2 &= ~FLAG2_IS_DISCARDING; goto next_desc; } if (unlikely((staterr & E1000_RXDEXT_ERR_FRAME_ERR_MASK) && !(netdev->features & NETIF_F_RXALL))) { dev_kfree_skb_irq(skb); goto next_desc; } length = le16_to_cpu(rx_desc->wb.middle.length0); if (!length) { e_dbg("Last part of the packet spanning multiple descriptors\n"); dev_kfree_skb_irq(skb); goto next_desc; } /* Good Receive */ skb_put(skb, length); { /* this looks ugly, but it seems compiler issues make * it more efficient than reusing j */ int l1 = le16_to_cpu(rx_desc->wb.upper.length[0]); /* page alloc/put takes too long and effects small * packet throughput, so unsplit small packets and * save the alloc/put */ if (l1 && (l1 <= copybreak) && ((length + l1) <= adapter->rx_ps_bsize0)) { ps_page = &buffer_info->ps_pages[0]; dma_sync_single_for_cpu(&pdev->dev, ps_page->dma, PAGE_SIZE, DMA_FROM_DEVICE); memcpy(skb_tail_pointer(skb), page_address(ps_page->page), l1); dma_sync_single_for_device(&pdev->dev, ps_page->dma, PAGE_SIZE, DMA_FROM_DEVICE); /* remove the CRC */ if (!(adapter->flags2 & FLAG2_CRC_STRIPPING)) { if (!(netdev->features & NETIF_F_RXFCS)) l1 -= 4; } skb_put(skb, l1); goto copydone; } /* if */ } for (j = 0; j < PS_PAGE_BUFFERS; j++) { length = le16_to_cpu(rx_desc->wb.upper.length[j]); if (!length) break; ps_page = &buffer_info->ps_pages[j]; dma_unmap_page(&pdev->dev, ps_page->dma, PAGE_SIZE, DMA_FROM_DEVICE); ps_page->dma = 0; skb_fill_page_desc(skb, j, ps_page->page, 0, length); ps_page->page = NULL; skb->len += length; skb->data_len += length; skb->truesize += PAGE_SIZE; } /* strip the ethernet crc, problem is we're using pages now so * this whole operation can get a little cpu intensive */ if (!(adapter->flags2 & FLAG2_CRC_STRIPPING)) { if (!(netdev->features & NETIF_F_RXFCS)) pskb_trim(skb, skb->len - 4); } copydone: total_rx_bytes += skb->len; total_rx_packets++; e1000_rx_checksum(adapter, staterr, skb); e1000_rx_hash(netdev, rx_desc->wb.lower.hi_dword.rss, skb); if (rx_desc->wb.upper.header_status & cpu_to_le16(E1000_RXDPS_HDRSTAT_HDRSP)) adapter->rx_hdr_split++; e1000_receive_skb(adapter, netdev, skb, staterr, rx_desc->wb.middle.vlan); next_desc: rx_desc->wb.middle.status_error &= cpu_to_le32(~0xFF); buffer_info->skb = NULL; /* return some buffers to hardware, one at a time is too slow */ if (cleaned_count >= E1000_RX_BUFFER_WRITE) { adapter->alloc_rx_buf(rx_ring, cleaned_count, GFP_ATOMIC); cleaned_count = 0; } /* use prefetched values */ rx_desc = next_rxd; buffer_info = next_buffer; staterr = le32_to_cpu(rx_desc->wb.middle.status_error); } rx_ring->next_to_clean = i; cleaned_count = e1000_desc_unused(rx_ring); if (cleaned_count) adapter->alloc_rx_buf(rx_ring, cleaned_count, GFP_ATOMIC); adapter->total_rx_bytes += total_rx_bytes; adapter->total_rx_packets += total_rx_packets; return cleaned; } static void e1000_consume_page(struct e1000_buffer *bi, struct sk_buff *skb, u16 length) { bi->page = NULL; skb->len += length; skb->data_len += length; skb->truesize += PAGE_SIZE; } /** * e1000_clean_jumbo_rx_irq - Send received data up the network stack; legacy * @rx_ring: Rx descriptor ring * @work_done: output parameter for indicating completed work * @work_to_do: how many packets we can clean * * the return value indicates whether actual cleaning was done, there * is no guarantee that everything was cleaned **/ static bool e1000_clean_jumbo_rx_irq(struct e1000_ring *rx_ring, int *work_done, int work_to_do) { struct e1000_adapter *adapter = rx_ring->adapter; struct net_device *netdev = adapter->netdev; struct pci_dev *pdev = adapter->pdev; union e1000_rx_desc_extended *rx_desc, *next_rxd; struct e1000_buffer *buffer_info, *next_buffer; u32 length, staterr; unsigned int i; int cleaned_count = 0; bool cleaned = false; unsigned int total_rx_bytes = 0, total_rx_packets = 0; struct skb_shared_info *shinfo; i = rx_ring->next_to_clean; rx_desc = E1000_RX_DESC_EXT(*rx_ring, i); staterr = le32_to_cpu(rx_desc->wb.upper.status_error); buffer_info = &rx_ring->buffer_info[i]; while (staterr & E1000_RXD_STAT_DD) { struct sk_buff *skb; if (*work_done >= work_to_do) break; (*work_done)++; dma_rmb(); /* read descriptor and rx_buffer_info after status DD */ skb = buffer_info->skb; buffer_info->skb = NULL; ++i; if (i == rx_ring->count) i = 0; next_rxd = E1000_RX_DESC_EXT(*rx_ring, i); prefetch(next_rxd); next_buffer = &rx_ring->buffer_info[i]; cleaned = true; cleaned_count++; dma_unmap_page(&pdev->dev, buffer_info->dma, PAGE_SIZE, DMA_FROM_DEVICE); buffer_info->dma = 0; length = le16_to_cpu(rx_desc->wb.upper.length); /* errors is only valid for DD + EOP descriptors */ if (unlikely((staterr & E1000_RXD_STAT_EOP) && ((staterr & E1000_RXDEXT_ERR_FRAME_ERR_MASK) && !(netdev->features & NETIF_F_RXALL)))) { /* recycle both page and skb */ buffer_info->skb = skb; /* an error means any chain goes out the window too */ if (rx_ring->rx_skb_top) dev_kfree_skb_irq(rx_ring->rx_skb_top); rx_ring->rx_skb_top = NULL; goto next_desc; } #define rxtop (rx_ring->rx_skb_top) if (!(staterr & E1000_RXD_STAT_EOP)) { /* this descriptor is only the beginning (or middle) */ if (!rxtop) { /* this is the beginning of a chain */ rxtop = skb; skb_fill_page_desc(rxtop, 0, buffer_info->page, 0, length); } else { /* this is the middle of a chain */ shinfo = skb_shinfo(rxtop); skb_fill_page_desc(rxtop, shinfo->nr_frags, buffer_info->page, 0, length); /* re-use the skb, only consumed the page */ buffer_info->skb = skb; } e1000_consume_page(buffer_info, rxtop, length); goto next_desc; } else { if (rxtop) { /* end of the chain */ shinfo = skb_shinfo(rxtop); skb_fill_page_desc(rxtop, shinfo->nr_frags, buffer_info->page, 0, length); /* re-use the current skb, we only consumed the * page */ buffer_info->skb = skb; skb = rxtop; rxtop = NULL; e1000_consume_page(buffer_info, skb, length); } else { /* no chain, got EOP, this buf is the packet * copybreak to save the put_page/alloc_page */ if (length <= copybreak && skb_tailroom(skb) >= length) { memcpy(skb_tail_pointer(skb), page_address(buffer_info->page), length); /* re-use the page, so don't erase * buffer_info->page */ skb_put(skb, length); } else { skb_fill_page_desc(skb, 0, buffer_info->page, 0, length); e1000_consume_page(buffer_info, skb, length); } } } /* Receive Checksum Offload */ e1000_rx_checksum(adapter, staterr, skb); e1000_rx_hash(netdev, rx_desc->wb.lower.hi_dword.rss, skb); /* probably a little skewed due to removing CRC */ total_rx_bytes += skb->len; total_rx_packets++; /* eth type trans needs skb->data to point to something */ if (!pskb_may_pull(skb, ETH_HLEN)) { e_err("pskb_may_pull failed.\n"); dev_kfree_skb_irq(skb); goto next_desc; } e1000_receive_skb(adapter, netdev, skb, staterr, rx_desc->wb.upper.vlan); next_desc: rx_desc->wb.upper.status_error &= cpu_to_le32(~0xFF); /* return some buffers to hardware, one at a time is too slow */ if (unlikely(cleaned_count >= E1000_RX_BUFFER_WRITE)) { adapter->alloc_rx_buf(rx_ring, cleaned_count, GFP_ATOMIC); cleaned_count = 0; } /* use prefetched values */ rx_desc = next_rxd; buffer_info = next_buffer; staterr = le32_to_cpu(rx_desc->wb.upper.status_error); } rx_ring->next_to_clean = i; cleaned_count = e1000_desc_unused(rx_ring); if (cleaned_count) adapter->alloc_rx_buf(rx_ring, cleaned_count, GFP_ATOMIC); adapter->total_rx_bytes += total_rx_bytes; adapter->total_rx_packets += total_rx_packets; return cleaned; } /** * e1000_clean_rx_ring - Free Rx Buffers per Queue * @rx_ring: Rx descriptor ring **/ static void e1000_clean_rx_ring(struct e1000_ring *rx_ring) { struct e1000_adapter *adapter = rx_ring->adapter; struct e1000_buffer *buffer_info; struct e1000_ps_page *ps_page; struct pci_dev *pdev = adapter->pdev; unsigned int i, j; /* Free all the Rx ring sk_buffs */ for (i = 0; i < rx_ring->count; i++) { buffer_info = &rx_ring->buffer_info[i]; if (buffer_info->dma) { if (adapter->clean_rx == e1000_clean_rx_irq) dma_unmap_single(&pdev->dev, buffer_info->dma, adapter->rx_buffer_len, DMA_FROM_DEVICE); else if (adapter->clean_rx == e1000_clean_jumbo_rx_irq) dma_unmap_page(&pdev->dev, buffer_info->dma, PAGE_SIZE, DMA_FROM_DEVICE); else if (adapter->clean_rx == e1000_clean_rx_irq_ps) dma_unmap_single(&pdev->dev, buffer_info->dma, adapter->rx_ps_bsize0, DMA_FROM_DEVICE); buffer_info->dma = 0; } if (buffer_info->page) { put_page(buffer_info->page); buffer_info->page = NULL; } if (buffer_info->skb) { dev_kfree_skb(buffer_info->skb); buffer_info->skb = NULL; } for (j = 0; j < PS_PAGE_BUFFERS; j++) { ps_page = &buffer_info->ps_pages[j]; if (!ps_page->page) break; dma_unmap_page(&pdev->dev, ps_page->dma, PAGE_SIZE, DMA_FROM_DEVICE); ps_page->dma = 0; put_page(ps_page->page); ps_page->page = NULL; } } /* there also may be some cached data from a chained receive */ if (rx_ring->rx_skb_top) { dev_kfree_skb(rx_ring->rx_skb_top); rx_ring->rx_skb_top = NULL; } /* Zero out the descriptor ring */ memset(rx_ring->desc, 0, rx_ring->size); rx_ring->next_to_clean = 0; rx_ring->next_to_use = 0; adapter->flags2 &= ~FLAG2_IS_DISCARDING; } static void e1000e_downshift_workaround(struct work_struct *work) { struct e1000_adapter *adapter = container_of(work, struct e1000_adapter, downshift_task); if (test_bit(__E1000_DOWN, &adapter->state)) return; e1000e_gig_downshift_workaround_ich8lan(&adapter->hw); } /** * e1000_intr_msi - Interrupt Handler * @irq: interrupt number * @data: pointer to a network interface device structure **/ static irqreturn_t e1000_intr_msi(int __always_unused irq, void *data) { struct net_device *netdev = data; struct e1000_adapter *adapter = netdev_priv(netdev); struct e1000_hw *hw = &adapter->hw; u32 icr = er32(ICR); /* read ICR disables interrupts using IAM */ if (icr & E1000_ICR_LSC) { hw->mac.get_link_status = true; /* ICH8 workaround-- Call gig speed drop workaround on cable * disconnect (LSC) before accessing any PHY registers */ if ((adapter->flags & FLAG_LSC_GIG_SPEED_DROP) && (!(er32(STATUS) & E1000_STATUS_LU))) schedule_work(&adapter->downshift_task); /* 80003ES2LAN workaround-- For packet buffer work-around on * link down event; disable receives here in the ISR and reset * adapter in watchdog */ if (netif_carrier_ok(netdev) && adapter->flags & FLAG_RX_NEEDS_RESTART) { /* disable receives */ u32 rctl = er32(RCTL); ew32(RCTL, rctl & ~E1000_RCTL_EN); adapter->flags |= FLAG_RESTART_NOW; } /* guard against interrupt when we're going down */ if (!test_bit(__E1000_DOWN, &adapter->state)) mod_timer(&adapter->watchdog_timer, jiffies + 1); } /* Reset on uncorrectable ECC error */ if ((icr & E1000_ICR_ECCER) && (hw->mac.type >= e1000_pch_lpt)) { u32 pbeccsts = er32(PBECCSTS); adapter->corr_errors += pbeccsts & E1000_PBECCSTS_CORR_ERR_CNT_MASK; adapter->uncorr_errors += (pbeccsts & E1000_PBECCSTS_UNCORR_ERR_CNT_MASK) >> E1000_PBECCSTS_UNCORR_ERR_CNT_SHIFT; /* Do the reset outside of interrupt context */ schedule_work(&adapter->reset_task); /* return immediately since reset is imminent */ return IRQ_HANDLED; } if (napi_schedule_prep(&adapter->napi)) { adapter->total_tx_bytes = 0; adapter->total_tx_packets = 0; adapter->total_rx_bytes = 0; adapter->total_rx_packets = 0; __napi_schedule(&adapter->napi); } return IRQ_HANDLED; } /** * e1000_intr - Interrupt Handler * @irq: interrupt number * @data: pointer to a network interface device structure **/ static irqreturn_t e1000_intr(int __always_unused irq, void *data) { struct net_device *netdev = data; struct e1000_adapter *adapter = netdev_priv(netdev); struct e1000_hw *hw = &adapter->hw; u32 rctl, icr = er32(ICR); if (!icr || test_bit(__E1000_DOWN, &adapter->state)) return IRQ_NONE; /* Not our interrupt */ /* IMS will not auto-mask if INT_ASSERTED is not set, and if it is * not set, then the adapter didn't send an interrupt */ if (!(icr & E1000_ICR_INT_ASSERTED)) return IRQ_NONE; /* Interrupt Auto-Mask...upon reading ICR, * interrupts are masked. No need for the * IMC write */ if (icr & E1000_ICR_LSC) { hw->mac.get_link_status = true; /* ICH8 workaround-- Call gig speed drop workaround on cable * disconnect (LSC) before accessing any PHY registers */ if ((adapter->flags & FLAG_LSC_GIG_SPEED_DROP) && (!(er32(STATUS) & E1000_STATUS_LU))) schedule_work(&adapter->downshift_task); /* 80003ES2LAN workaround-- * For packet buffer work-around on link down event; * disable receives here in the ISR and * reset adapter in watchdog */ if (netif_carrier_ok(netdev) && (adapter->flags & FLAG_RX_NEEDS_RESTART)) { /* disable receives */ rctl = er32(RCTL); ew32(RCTL, rctl & ~E1000_RCTL_EN); adapter->flags |= FLAG_RESTART_NOW; } /* guard against interrupt when we're going down */ if (!test_bit(__E1000_DOWN, &adapter->state)) mod_timer(&adapter->watchdog_timer, jiffies + 1); } /* Reset on uncorrectable ECC error */ if ((icr & E1000_ICR_ECCER) && (hw->mac.type >= e1000_pch_lpt)) { u32 pbeccsts = er32(PBECCSTS); adapter->corr_errors += pbeccsts & E1000_PBECCSTS_CORR_ERR_CNT_MASK; adapter->uncorr_errors += (pbeccsts & E1000_PBECCSTS_UNCORR_ERR_CNT_MASK) >> E1000_PBECCSTS_UNCORR_ERR_CNT_SHIFT; /* Do the reset outside of interrupt context */ schedule_work(&adapter->reset_task); /* return immediately since reset is imminent */ return IRQ_HANDLED; } if (napi_schedule_prep(&adapter->napi)) { adapter->total_tx_bytes = 0; adapter->total_tx_packets = 0; adapter->total_rx_bytes = 0; adapter->total_rx_packets = 0; __napi_schedule(&adapter->napi); } return IRQ_HANDLED; } static irqreturn_t e1000_msix_other(int __always_unused irq, void *data) { struct net_device *netdev = data; struct e1000_adapter *adapter = netdev_priv(netdev); struct e1000_hw *hw = &adapter->hw; u32 icr = er32(ICR); if (icr & adapter->eiac_mask) ew32(ICS, (icr & adapter->eiac_mask)); if (icr & E1000_ICR_LSC) { hw->mac.get_link_status = true; /* guard against interrupt when we're going down */ if (!test_bit(__E1000_DOWN, &adapter->state)) mod_timer(&adapter->watchdog_timer, jiffies + 1); } if (!test_bit(__E1000_DOWN, &adapter->state)) ew32(IMS, E1000_IMS_OTHER | IMS_OTHER_MASK); return IRQ_HANDLED; } static irqreturn_t e1000_intr_msix_tx(int __always_unused irq, void *data) { struct net_device *netdev = data; struct e1000_adapter *adapter = netdev_priv(netdev); struct e1000_hw *hw = &adapter->hw; struct e1000_ring *tx_ring = adapter->tx_ring; adapter->total_tx_bytes = 0; adapter->total_tx_packets = 0; if (!e1000_clean_tx_irq(tx_ring)) /* Ring was not completely cleaned, so fire another interrupt */ ew32(ICS, tx_ring->ims_val); if (!test_bit(__E1000_DOWN, &adapter->state)) ew32(IMS, adapter->tx_ring->ims_val); return IRQ_HANDLED; } static irqreturn_t e1000_intr_msix_rx(int __always_unused irq, void *data) { struct net_device *netdev = data; struct e1000_adapter *adapter = netdev_priv(netdev); struct e1000_ring *rx_ring = adapter->rx_ring; /* Write the ITR value calculated at the end of the * previous interrupt. */ if (rx_ring->set_itr) { u32 itr = rx_ring->itr_val ? 1000000000 / (rx_ring->itr_val * 256) : 0; writel(itr, rx_ring->itr_register); rx_ring->set_itr = 0; } if (napi_schedule_prep(&adapter->napi)) { adapter->total_rx_bytes = 0; adapter->total_rx_packets = 0; __napi_schedule(&adapter->napi); } return IRQ_HANDLED; } /** * e1000_configure_msix - Configure MSI-X hardware * @adapter: board private structure * * e1000_configure_msix sets up the hardware to properly * generate MSI-X interrupts. **/ static void e1000_configure_msix(struct e1000_adapter *adapter) { struct e1000_hw *hw = &adapter->hw; struct e1000_ring *rx_ring = adapter->rx_ring; struct e1000_ring *tx_ring = adapter->tx_ring; int vector = 0; u32 ctrl_ext, ivar = 0; adapter->eiac_mask = 0; /* Workaround issue with spurious interrupts on 82574 in MSI-X mode */ if (hw->mac.type == e1000_82574) { u32 rfctl = er32(RFCTL); rfctl |= E1000_RFCTL_ACK_DIS; ew32(RFCTL, rfctl); } /* Configure Rx vector */ rx_ring->ims_val = E1000_IMS_RXQ0; adapter->eiac_mask |= rx_ring->ims_val; if (rx_ring->itr_val) writel(1000000000 / (rx_ring->itr_val * 256), rx_ring->itr_register); else writel(1, rx_ring->itr_register); ivar = E1000_IVAR_INT_ALLOC_VALID | vector; /* Configure Tx vector */ tx_ring->ims_val = E1000_IMS_TXQ0; vector++; if (tx_ring->itr_val) writel(1000000000 / (tx_ring->itr_val * 256), tx_ring->itr_register); else writel(1, tx_ring->itr_register); adapter->eiac_mask |= tx_ring->ims_val; ivar |= ((E1000_IVAR_INT_ALLOC_VALID | vector) << 8); /* set vector for Other Causes, e.g. link changes */ vector++; ivar |= ((E1000_IVAR_INT_ALLOC_VALID | vector) << 16); if (rx_ring->itr_val) writel(1000000000 / (rx_ring->itr_val * 256), hw->hw_addr + E1000_EITR_82574(vector)); else writel(1, hw->hw_addr + E1000_EITR_82574(vector)); /* Cause Tx interrupts on every write back */ ivar |= BIT(31); ew32(IVAR, ivar); /* enable MSI-X PBA support */ ctrl_ext = er32(CTRL_EXT) & ~E1000_CTRL_EXT_IAME; ctrl_ext |= E1000_CTRL_EXT_PBA_CLR | E1000_CTRL_EXT_EIAME; ew32(CTRL_EXT, ctrl_ext); e1e_flush(); } void e1000e_reset_interrupt_capability(struct e1000_adapter *adapter) { if (adapter->msix_entries) { pci_disable_msix(adapter->pdev); kfree(adapter->msix_entries); adapter->msix_entries = NULL; } else if (adapter->flags & FLAG_MSI_ENABLED) { pci_disable_msi(adapter->pdev); adapter->flags &= ~FLAG_MSI_ENABLED; } } /** * e1000e_set_interrupt_capability - set MSI or MSI-X if supported * @adapter: board private structure * * Attempt to configure interrupts using the best available * capabilities of the hardware and kernel. **/ void e1000e_set_interrupt_capability(struct e1000_adapter *adapter) { int err; int i; switch (adapter->int_mode) { case E1000E_INT_MODE_MSIX: if (adapter->flags & FLAG_HAS_MSIX) { adapter->num_vectors = 3; /* RxQ0, TxQ0 and other */ adapter->msix_entries = kcalloc(adapter->num_vectors, sizeof(struct msix_entry), GFP_KERNEL); if (adapter->msix_entries) { struct e1000_adapter *a = adapter; for (i = 0; i < adapter->num_vectors; i++) adapter->msix_entries[i].entry = i; err = pci_enable_msix_range(a->pdev, a->msix_entries, a->num_vectors, a->num_vectors); if (err > 0) return; } /* MSI-X failed, so fall through and try MSI */ e_err("Failed to initialize MSI-X interrupts. Falling back to MSI interrupts.\n"); e1000e_reset_interrupt_capability(adapter); } adapter->int_mode = E1000E_INT_MODE_MSI; fallthrough; case E1000E_INT_MODE_MSI: if (!pci_enable_msi(adapter->pdev)) { adapter->flags |= FLAG_MSI_ENABLED; } else { adapter->int_mode = E1000E_INT_MODE_LEGACY; e_err("Failed to initialize MSI interrupts. Falling back to legacy interrupts.\n"); } fallthrough; case E1000E_INT_MODE_LEGACY: /* Don't do anything; this is the system default */ break; } /* store the number of vectors being used */ adapter->num_vectors = 1; } /** * e1000_request_msix - Initialize MSI-X interrupts * @adapter: board private structure * * e1000_request_msix allocates MSI-X vectors and requests interrupts from the * kernel. **/ static int e1000_request_msix(struct e1000_adapter *adapter) { struct net_device *netdev = adapter->netdev; int err = 0, vector = 0; if (strlen(netdev->name) < (IFNAMSIZ - 5)) snprintf(adapter->rx_ring->name, sizeof(adapter->rx_ring->name) - 1, "%.14s-rx-0", netdev->name); else memcpy(adapter->rx_ring->name, netdev->name, IFNAMSIZ); err = request_irq(adapter->msix_entries[vector].vector, e1000_intr_msix_rx, 0, adapter->rx_ring->name, netdev); if (err) return err; adapter->rx_ring->itr_register = adapter->hw.hw_addr + E1000_EITR_82574(vector); adapter->rx_ring->itr_val = adapter->itr; vector++; if (strlen(netdev->name) < (IFNAMSIZ - 5)) snprintf(adapter->tx_ring->name, sizeof(adapter->tx_ring->name) - 1, "%.14s-tx-0", netdev->name); else memcpy(adapter->tx_ring->name, netdev->name, IFNAMSIZ); err = request_irq(adapter->msix_entries[vector].vector, e1000_intr_msix_tx, 0, adapter->tx_ring->name, netdev); if (err) return err; adapter->tx_ring->itr_register = adapter->hw.hw_addr + E1000_EITR_82574(vector); adapter->tx_ring->itr_val = adapter->itr; vector++; err = request_irq(adapter->msix_entries[vector].vector, e1000_msix_other, 0, netdev->name, netdev); if (err) return err; e1000_configure_msix(adapter); return 0; } /** * e1000_request_irq - initialize interrupts * @adapter: board private structure * * Attempts to configure interrupts using the best available * capabilities of the hardware and kernel. **/ static int e1000_request_irq(struct e1000_adapter *adapter) { struct net_device *netdev = adapter->netdev; int err; if (adapter->msix_entries) { err = e1000_request_msix(adapter); if (!err) return err; /* fall back to MSI */ e1000e_reset_interrupt_capability(adapter); adapter->int_mode = E1000E_INT_MODE_MSI; e1000e_set_interrupt_capability(adapter); } if (adapter->flags & FLAG_MSI_ENABLED) { err = request_irq(adapter->pdev->irq, e1000_intr_msi, 0, netdev->name, netdev); if (!err) return err; /* fall back to legacy interrupt */ e1000e_reset_interrupt_capability(adapter); adapter->int_mode = E1000E_INT_MODE_LEGACY; } err = request_irq(adapter->pdev->irq, e1000_intr, IRQF_SHARED, netdev->name, netdev); if (err) e_err("Unable to allocate interrupt, Error: %d\n", err); return err; } static void e1000_free_irq(struct e1000_adapter *adapter) { struct net_device *netdev = adapter->netdev; if (adapter->msix_entries) { int vector = 0; free_irq(adapter->msix_entries[vector].vector, netdev); vector++; free_irq(adapter->msix_entries[vector].vector, netdev); vector++; /* Other Causes interrupt vector */ free_irq(adapter->msix_entries[vector].vector, netdev); return; } free_irq(adapter->pdev->irq, netdev); } /** * e1000_irq_disable - Mask off interrupt generation on the NIC * @adapter: board private structure **/ static void e1000_irq_disable(struct e1000_adapter *adapter) { struct e1000_hw *hw = &adapter->hw; ew32(IMC, ~0); if (adapter->msix_entries) ew32(EIAC_82574, 0); e1e_flush(); if (adapter->msix_entries) { int i; for (i = 0; i < adapter->num_vectors; i++) synchronize_irq(adapter->msix_entries[i].vector); } else { synchronize_irq(adapter->pdev->irq); } } /** * e1000_irq_enable - Enable default interrupt generation settings * @adapter: board private structure **/ static void e1000_irq_enable(struct e1000_adapter *adapter) { struct e1000_hw *hw = &adapter->hw; if (adapter->msix_entries) { ew32(EIAC_82574, adapter->eiac_mask & E1000_EIAC_MASK_82574); ew32(IMS, adapter->eiac_mask | E1000_IMS_OTHER | IMS_OTHER_MASK); } else if (hw->mac.type >= e1000_pch_lpt) { ew32(IMS, IMS_ENABLE_MASK | E1000_IMS_ECCER); } else { ew32(IMS, IMS_ENABLE_MASK); } e1e_flush(); } /** * e1000e_get_hw_control - get control of the h/w from f/w * @adapter: address of board private structure * * e1000e_get_hw_control sets {CTRL_EXT|SWSM}:DRV_LOAD bit. * For ASF and Pass Through versions of f/w this means that * the driver is loaded. For AMT version (only with 82573) * of the f/w this means that the network i/f is open. **/ void e1000e_get_hw_control(struct e1000_adapter *adapter) { struct e1000_hw *hw = &adapter->hw; u32 ctrl_ext; u32 swsm; /* Let firmware know the driver has taken over */ if (adapter->flags & FLAG_HAS_SWSM_ON_LOAD) { swsm = er32(SWSM); ew32(SWSM, swsm | E1000_SWSM_DRV_LOAD); } else if (adapter->flags & FLAG_HAS_CTRLEXT_ON_LOAD) { ctrl_ext = er32(CTRL_EXT); ew32(CTRL_EXT, ctrl_ext | E1000_CTRL_EXT_DRV_LOAD); } } /** * e1000e_release_hw_control - release control of the h/w to f/w * @adapter: address of board private structure * * e1000e_release_hw_control resets {CTRL_EXT|SWSM}:DRV_LOAD bit. * For ASF and Pass Through versions of f/w this means that the * driver is no longer loaded. For AMT version (only with 82573) i * of the f/w this means that the network i/f is closed. * **/ void e1000e_release_hw_control(struct e1000_adapter *adapter) { struct e1000_hw *hw = &adapter->hw; u32 ctrl_ext; u32 swsm; /* Let firmware taken over control of h/w */ if (adapter->flags & FLAG_HAS_SWSM_ON_LOAD) { swsm = er32(SWSM); ew32(SWSM, swsm & ~E1000_SWSM_DRV_LOAD); } else if (adapter->flags & FLAG_HAS_CTRLEXT_ON_LOAD) { ctrl_ext = er32(CTRL_EXT); ew32(CTRL_EXT, ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD); } } /** * e1000_alloc_ring_dma - allocate memory for a ring structure * @adapter: board private structure * @ring: ring struct for which to allocate dma **/ static int e1000_alloc_ring_dma(struct e1000_adapter *adapter, struct e1000_ring *ring) { struct pci_dev *pdev = adapter->pdev; ring->desc = dma_alloc_coherent(&pdev->dev, ring->size, &ring->dma, GFP_KERNEL); if (!ring->desc) return -ENOMEM; return 0; } /** * e1000e_setup_tx_resources - allocate Tx resources (Descriptors) * @tx_ring: Tx descriptor ring * * Return 0 on success, negative on failure **/ int e1000e_setup_tx_resources(struct e1000_ring *tx_ring) { struct e1000_adapter *adapter = tx_ring->adapter; int err = -ENOMEM, size; size = sizeof(struct e1000_buffer) * tx_ring->count; tx_ring->buffer_info = vzalloc(size); if (!tx_ring->buffer_info) goto err; /* round up to nearest 4K */ tx_ring->size = tx_ring->count * sizeof(struct e1000_tx_desc); tx_ring->size = ALIGN(tx_ring->size, 4096); err = e1000_alloc_ring_dma(adapter, tx_ring); if (err) goto err; tx_ring->next_to_use = 0; tx_ring->next_to_clean = 0; return 0; err: vfree(tx_ring->buffer_info); e_err("Unable to allocate memory for the transmit descriptor ring\n"); return err; } /** * e1000e_setup_rx_resources - allocate Rx resources (Descriptors) * @rx_ring: Rx descriptor ring * * Returns 0 on success, negative on failure **/ int e1000e_setup_rx_resources(struct e1000_ring *rx_ring) { struct e1000_adapter *adapter = rx_ring->adapter; struct e1000_buffer *buffer_info; int i, size, desc_len, err = -ENOMEM; size = sizeof(struct e1000_buffer) * rx_ring->count; rx_ring->buffer_info = vzalloc(size); if (!rx_ring->buffer_info) goto err; for (i = 0; i < rx_ring->count; i++) { buffer_info = &rx_ring->buffer_info[i]; buffer_info->ps_pages = kcalloc(PS_PAGE_BUFFERS, sizeof(struct e1000_ps_page), GFP_KERNEL); if (!buffer_info->ps_pages) goto err_pages; } desc_len = sizeof(union e1000_rx_desc_packet_split); /* Round up to nearest 4K */ rx_ring->size = rx_ring->count * desc_len; rx_ring->size = ALIGN(rx_ring->size, 4096); err = e1000_alloc_ring_dma(adapter, rx_ring); if (err) goto err_pages; rx_ring->next_to_clean = 0; rx_ring->next_to_use = 0; rx_ring->rx_skb_top = NULL; return 0; err_pages: for (i = 0; i < rx_ring->count; i++) { buffer_info = &rx_ring->buffer_info[i]; kfree(buffer_info->ps_pages); } err: vfree(rx_ring->buffer_info); e_err("Unable to allocate memory for the receive descriptor ring\n"); return err; } /** * e1000_clean_tx_ring - Free Tx Buffers * @tx_ring: Tx descriptor ring **/ static void e1000_clean_tx_ring(struct e1000_ring *tx_ring) { struct e1000_adapter *adapter = tx_ring->adapter; struct e1000_buffer *buffer_info; unsigned long size; unsigned int i; for (i = 0; i < tx_ring->count; i++) { buffer_info = &tx_ring->buffer_info[i]; e1000_put_txbuf(tx_ring, buffer_info, false); } netdev_reset_queue(adapter->netdev); size = sizeof(struct e1000_buffer) * tx_ring->count; memset(tx_ring->buffer_info, 0, size); memset(tx_ring->desc, 0, tx_ring->size); tx_ring->next_to_use = 0; tx_ring->next_to_clean = 0; } /** * e1000e_free_tx_resources - Free Tx Resources per Queue * @tx_ring: Tx descriptor ring * * Free all transmit software resources **/ void e1000e_free_tx_resources(struct e1000_ring *tx_ring) { struct e1000_adapter *adapter = tx_ring->adapter; struct pci_dev *pdev = adapter->pdev; e1000_clean_tx_ring(tx_ring); vfree(tx_ring->buffer_info); tx_ring->buffer_info = NULL; dma_free_coherent(&pdev->dev, tx_ring->size, tx_ring->desc, tx_ring->dma); tx_ring->desc = NULL; } /** * e1000e_free_rx_resources - Free Rx Resources * @rx_ring: Rx descriptor ring * * Free all receive software resources **/ void e1000e_free_rx_resources(struct e1000_ring *rx_ring) { struct e1000_adapter *adapter = rx_ring->adapter; struct pci_dev *pdev = adapter->pdev; int i; e1000_clean_rx_ring(rx_ring); for (i = 0; i < rx_ring->count; i++) kfree(rx_ring->buffer_info[i].ps_pages); vfree(rx_ring->buffer_info); rx_ring->buffer_info = NULL; dma_free_coherent(&pdev->dev, rx_ring->size, rx_ring->desc, rx_ring->dma); rx_ring->desc = NULL; } /** * e1000_update_itr - update the dynamic ITR value based on statistics * @itr_setting: current adapter->itr * @packets: the number of packets during this measurement interval * @bytes: the number of bytes during this measurement interval * * Stores a new ITR value based on packets and byte * counts during the last interrupt. The advantage of per interrupt * computation is faster updates and more accurate ITR for the current * traffic pattern. Constants in this function were computed * based on theoretical maximum wire speed and thresholds were set based * on testing data as well as attempting to minimize response time * while increasing bulk throughput. This functionality is controlled * by the InterruptThrottleRate module parameter. **/ static unsigned int e1000_update_itr(u16 itr_setting, int packets, int bytes) { unsigned int retval = itr_setting; if (packets == 0) return itr_setting; switch (itr_setting) { case lowest_latency: /* handle TSO and jumbo frames */ if (bytes / packets > 8000) retval = bulk_latency; else if ((packets < 5) && (bytes > 512)) retval = low_latency; break; case low_latency: /* 50 usec aka 20000 ints/s */ if (bytes > 10000) { /* this if handles the TSO accounting */ if (bytes / packets > 8000) retval = bulk_latency; else if ((packets < 10) || ((bytes / packets) > 1200)) retval = bulk_latency; else if ((packets > 35)) retval = lowest_latency; } else if (bytes / packets > 2000) { retval = bulk_latency; } else if (packets <= 2 && bytes < 512) { retval = lowest_latency; } break; case bulk_latency: /* 250 usec aka 4000 ints/s */ if (bytes > 25000) { if (packets > 35) retval = low_latency; } else if (bytes < 6000) { retval = low_latency; } break; } return retval; } static void e1000_set_itr(struct e1000_adapter *adapter) { u16 current_itr; u32 new_itr = adapter->itr; /* for non-gigabit speeds, just fix the interrupt rate at 4000 */ if (adapter->link_speed != SPEED_1000) { new_itr = 4000; goto set_itr_now; } if (adapter->flags2 & FLAG2_DISABLE_AIM) { new_itr = 0; goto set_itr_now; } adapter->tx_itr = e1000_update_itr(adapter->tx_itr, adapter->total_tx_packets, adapter->total_tx_bytes); /* conservative mode (itr 3) eliminates the lowest_latency setting */ if (adapter->itr_setting == 3 && adapter->tx_itr == lowest_latency) adapter->tx_itr = low_latency; adapter->rx_itr = e1000_update_itr(adapter->rx_itr, adapter->total_rx_packets, adapter->total_rx_bytes); /* conservative mode (itr 3) eliminates the lowest_latency setting */ if (adapter->itr_setting == 3 && adapter->rx_itr == lowest_latency) adapter->rx_itr = low_latency; current_itr = max(adapter->rx_itr, adapter->tx_itr); /* counts and packets in update_itr are dependent on these numbers */ switch (current_itr) { case lowest_latency: new_itr = 70000; break; case low_latency: new_itr = 20000; /* aka hwitr = ~200 */ break; case bulk_latency: new_itr = 4000; break; default: break; } set_itr_now: if (new_itr != adapter->itr) { /* this attempts to bias the interrupt rate towards Bulk * by adding intermediate steps when interrupt rate is * increasing */ new_itr = new_itr > adapter->itr ? min(adapter->itr + (new_itr >> 2), new_itr) : new_itr; adapter->itr = new_itr; adapter->rx_ring->itr_val = new_itr; if (adapter->msix_entries) adapter->rx_ring->set_itr = 1; else e1000e_write_itr(adapter, new_itr); } } /** * e1000e_write_itr - write the ITR value to the appropriate registers * @adapter: address of board private structure * @itr: new ITR value to program * * e1000e_write_itr determines if the adapter is in MSI-X mode * and, if so, writes the EITR registers with the ITR value. * Otherwise, it writes the ITR value into the ITR register. **/ void e1000e_write_itr(struct e1000_adapter *adapter, u32 itr) { struct e1000_hw *hw = &adapter->hw; u32 new_itr = itr ? 1000000000 / (itr * 256) : 0; if (adapter->msix_entries) { int vector; for (vector = 0; vector < adapter->num_vectors; vector++) writel(new_itr, hw->hw_addr + E1000_EITR_82574(vector)); } else { ew32(ITR, new_itr); } } /** * e1000_alloc_queues - Allocate memory for all rings * @adapter: board private structure to initialize **/ static int e1000_alloc_queues(struct e1000_adapter *adapter) { int size = sizeof(struct e1000_ring); adapter->tx_ring = kzalloc(size, GFP_KERNEL); if (!adapter->tx_ring) goto err; adapter->tx_ring->count = adapter->tx_ring_count; adapter->tx_ring->adapter = adapter; adapter->rx_ring = kzalloc(size, GFP_KERNEL); if (!adapter->rx_ring) goto err; adapter->rx_ring->count = adapter->rx_ring_count; adapter->rx_ring->adapter = adapter; return 0; err: e_err("Unable to allocate memory for queues\n"); kfree(adapter->rx_ring); kfree(adapter->tx_ring); return -ENOMEM; } /** * e1000e_poll - NAPI Rx polling callback * @napi: struct associated with this polling callback * @budget: number of packets driver is allowed to process this poll **/ static int e1000e_poll(struct napi_struct *napi, int budget) { struct e1000_adapter *adapter = container_of(napi, struct e1000_adapter, napi); struct e1000_hw *hw = &adapter->hw; struct net_device *poll_dev = adapter->netdev; int tx_cleaned = 1, work_done = 0; adapter = netdev_priv(poll_dev); if (!adapter->msix_entries || (adapter->rx_ring->ims_val & adapter->tx_ring->ims_val)) tx_cleaned = e1000_clean_tx_irq(adapter->tx_ring); adapter->clean_rx(adapter->rx_ring, &work_done, budget); if (!tx_cleaned || work_done == budget) return budget; /* Exit the polling mode, but don't re-enable interrupts if stack might * poll us due to busy-polling */ if (likely(napi_complete_done(napi, work_done))) { if (adapter->itr_setting & 3) e1000_set_itr(adapter); if (!test_bit(__E1000_DOWN, &adapter->state)) { if (adapter->msix_entries) ew32(IMS, adapter->rx_ring->ims_val); else e1000_irq_enable(adapter); } } return work_done; } static int e1000_vlan_rx_add_vid(struct net_device *netdev, __always_unused __be16 proto, u16 vid) { struct e1000_adapter *adapter = netdev_priv(netdev); struct e1000_hw *hw = &adapter->hw; u32 vfta, index; /* don't update vlan cookie if already programmed */ if ((adapter->hw.mng_cookie.status & E1000_MNG_DHCP_COOKIE_STATUS_VLAN) && (vid == adapter->mng_vlan_id)) return 0; /* add VID to filter table */ if (adapter->flags & FLAG_HAS_HW_VLAN_FILTER) { index = (vid >> 5) & 0x7F; vfta = E1000_READ_REG_ARRAY(hw, E1000_VFTA, index); vfta |= BIT((vid & 0x1F)); hw->mac.ops.write_vfta(hw, index, vfta); } set_bit(vid, adapter->active_vlans); return 0; } static int e1000_vlan_rx_kill_vid(struct net_device *netdev, __always_unused __be16 proto, u16 vid) { struct e1000_adapter *adapter = netdev_priv(netdev); struct e1000_hw *hw = &adapter->hw; u32 vfta, index; if ((adapter->hw.mng_cookie.status & E1000_MNG_DHCP_COOKIE_STATUS_VLAN) && (vid == adapter->mng_vlan_id)) { /* release control to f/w */ e1000e_release_hw_control(adapter); return 0; } /* remove VID from filter table */ if (adapter->flags & FLAG_HAS_HW_VLAN_FILTER) { index = (vid >> 5) & 0x7F; vfta = E1000_READ_REG_ARRAY(hw, E1000_VFTA, index); vfta &= ~BIT((vid & 0x1F)); hw->mac.ops.write_vfta(hw, index, vfta); } clear_bit(vid, adapter->active_vlans); return 0; } /** * e1000e_vlan_filter_disable - helper to disable hw VLAN filtering * @adapter: board private structure to initialize **/ static void e1000e_vlan_filter_disable(struct e1000_adapter *adapter) { struct net_device *netdev = adapter->netdev; struct e1000_hw *hw = &adapter->hw; u32 rctl; if (adapter->flags & FLAG_HAS_HW_VLAN_FILTER) { /* disable VLAN receive filtering */ rctl = er32(RCTL); rctl &= ~(E1000_RCTL_VFE | E1000_RCTL_CFIEN); ew32(RCTL, rctl); if (adapter->mng_vlan_id != (u16)E1000_MNG_VLAN_NONE) { e1000_vlan_rx_kill_vid(netdev, htons(ETH_P_8021Q), adapter->mng_vlan_id); adapter->mng_vlan_id = E1000_MNG_VLAN_NONE; } } } /** * e1000e_vlan_filter_enable - helper to enable HW VLAN filtering * @adapter: board private structure to initialize **/ static void e1000e_vlan_filter_enable(struct e1000_adapter *adapter) { struct e1000_hw *hw = &adapter->hw; u32 rctl; if (adapter->flags & FLAG_HAS_HW_VLAN_FILTER) { /* enable VLAN receive filtering */ rctl = er32(RCTL); rctl |= E1000_RCTL_VFE; rctl &= ~E1000_RCTL_CFIEN; ew32(RCTL, rctl); } } /** * e1000e_vlan_strip_disable - helper to disable HW VLAN stripping * @adapter: board private structure to initialize **/ static void e1000e_vlan_strip_disable(struct e1000_adapter *adapter) { struct e1000_hw *hw = &adapter->hw; u32 ctrl; /* disable VLAN tag insert/strip */ ctrl = er32(CTRL); ctrl &= ~E1000_CTRL_VME; ew32(CTRL, ctrl); } /** * e1000e_vlan_strip_enable - helper to enable HW VLAN stripping * @adapter: board private structure to initialize **/ static void e1000e_vlan_strip_enable(struct e1000_adapter *adapter) { struct e1000_hw *hw = &adapter->hw; u32 ctrl; /* enable VLAN tag insert/strip */ ctrl = er32(CTRL); ctrl |= E1000_CTRL_VME; ew32(CTRL, ctrl); } static void e1000_update_mng_vlan(struct e1000_adapter *adapter) { struct net_device *netdev = adapter->netdev; u16 vid = adapter->hw.mng_cookie.vlan_id; u16 old_vid = adapter->mng_vlan_id; if (adapter->hw.mng_cookie.status & E1000_MNG_DHCP_COOKIE_STATUS_VLAN) { e1000_vlan_rx_add_vid(netdev, htons(ETH_P_8021Q), vid); adapter->mng_vlan_id = vid; } if ((old_vid != (u16)E1000_MNG_VLAN_NONE) && (vid != old_vid)) e1000_vlan_rx_kill_vid(netdev, htons(ETH_P_8021Q), old_vid); } static void e1000_restore_vlan(struct e1000_adapter *adapter) { u16 vid; e1000_vlan_rx_add_vid(adapter->netdev, htons(ETH_P_8021Q), 0); for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID) e1000_vlan_rx_add_vid(adapter->netdev, htons(ETH_P_8021Q), vid); } static void e1000_init_manageability_pt(struct e1000_adapter *adapter) { struct e1000_hw *hw = &adapter->hw; u32 manc, manc2h, mdef, i, j; if (!(adapter->flags & FLAG_MNG_PT_ENABLED)) return; manc = er32(MANC); /* enable receiving management packets to the host. this will probably * generate destination unreachable messages from the host OS, but * the packets will be handled on SMBUS */ manc |= E1000_MANC_EN_MNG2HOST; manc2h = er32(MANC2H); switch (hw->mac.type) { default: manc2h |= (E1000_MANC2H_PORT_623 | E1000_MANC2H_PORT_664); break; case e1000_82574: case e1000_82583: /* Check if IPMI pass-through decision filter already exists; * if so, enable it. */ for (i = 0, j = 0; i < 8; i++) { mdef = er32(MDEF(i)); /* Ignore filters with anything other than IPMI ports */ if (mdef & ~(E1000_MDEF_PORT_623 | E1000_MDEF_PORT_664)) continue; /* Enable this decision filter in MANC2H */ if (mdef) manc2h |= BIT(i); j |= mdef; } if (j == (E1000_MDEF_PORT_623 | E1000_MDEF_PORT_664)) break; /* Create new decision filter in an empty filter */ for (i = 0, j = 0; i < 8; i++) if (er32(MDEF(i)) == 0) { ew32(MDEF(i), (E1000_MDEF_PORT_623 | E1000_MDEF_PORT_664)); manc2h |= BIT(1); j++; break; } if (!j) e_warn("Unable to create IPMI pass-through filter\n"); break; } ew32(MANC2H, manc2h); ew32(MANC, manc); } /** * e1000_configure_tx - Configure Transmit Unit after Reset * @adapter: board private structure * * Configure the Tx unit of the MAC after a reset. **/ static void e1000_configure_tx(struct e1000_adapter *adapter) { struct e1000_hw *hw = &adapter->hw; struct e1000_ring *tx_ring = adapter->tx_ring; u64 tdba; u32 tdlen, tctl, tarc; /* Setup the HW Tx Head and Tail descriptor pointers */ tdba = tx_ring->dma; tdlen = tx_ring->count * sizeof(struct e1000_tx_desc); ew32(TDBAL(0), (tdba & DMA_BIT_MASK(32))); ew32(TDBAH(0), (tdba >> 32)); ew32(TDLEN(0), tdlen); ew32(TDH(0), 0); ew32(TDT(0), 0); tx_ring->head = adapter->hw.hw_addr + E1000_TDH(0); tx_ring->tail = adapter->hw.hw_addr + E1000_TDT(0); writel(0, tx_ring->head); if (adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA) e1000e_update_tdt_wa(tx_ring, 0); else writel(0, tx_ring->tail); /* Set the Tx Interrupt Delay register */ ew32(TIDV, adapter->tx_int_delay); /* Tx irq moderation */ ew32(TADV, adapter->tx_abs_int_delay); if (adapter->flags2 & FLAG2_DMA_BURST) { u32 txdctl = er32(TXDCTL(0)); txdctl &= ~(E1000_TXDCTL_PTHRESH | E1000_TXDCTL_HTHRESH | E1000_TXDCTL_WTHRESH); /* set up some performance related parameters to encourage the * hardware to use the bus more efficiently in bursts, depends * on the tx_int_delay to be enabled, * wthresh = 1 ==> burst write is disabled to avoid Tx stalls * hthresh = 1 ==> prefetch when one or more available * pthresh = 0x1f ==> prefetch if internal cache 31 or less * BEWARE: this seems to work but should be considered first if * there are Tx hangs or other Tx related bugs */ txdctl |= E1000_TXDCTL_DMA_BURST_ENABLE; ew32(TXDCTL(0), txdctl); } /* erratum work around: set txdctl the same for both queues */ ew32(TXDCTL(1), er32(TXDCTL(0))); /* Program the Transmit Control Register */ tctl = er32(TCTL); tctl &= ~E1000_TCTL_CT; tctl |= E1000_TCTL_PSP | E1000_TCTL_RTLC | (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT); if (adapter->flags & FLAG_TARC_SPEED_MODE_BIT) { tarc = er32(TARC(0)); /* set the speed mode bit, we'll clear it if we're not at * gigabit link later */ #define SPEED_MODE_BIT BIT(21) tarc |= SPEED_MODE_BIT; ew32(TARC(0), tarc); } /* errata: program both queues to unweighted RR */ if (adapter->flags & FLAG_TARC_SET_BIT_ZERO) { tarc = er32(TARC(0)); tarc |= 1; ew32(TARC(0), tarc); tarc = er32(TARC(1)); tarc |= 1; ew32(TARC(1), tarc); } /* Setup Transmit Descriptor Settings for eop descriptor */ adapter->txd_cmd = E1000_TXD_CMD_EOP | E1000_TXD_CMD_IFCS; /* only set IDE if we are delaying interrupts using the timers */ if (adapter->tx_int_delay) adapter->txd_cmd |= E1000_TXD_CMD_IDE; /* enable Report Status bit */ adapter->txd_cmd |= E1000_TXD_CMD_RS; ew32(TCTL, tctl); hw->mac.ops.config_collision_dist(hw); /* SPT and KBL Si errata workaround to avoid data corruption */ if (hw->mac.type == e1000_pch_spt) { u32 reg_val; reg_val = er32(IOSFPC); reg_val |= E1000_RCTL_RDMTS_HEX; ew32(IOSFPC, reg_val); reg_val = er32(TARC(0)); /* SPT and KBL Si errata workaround to avoid Tx hang. * Dropping the number of outstanding requests from * 3 to 2 in order to avoid a buffer overrun. */ reg_val &= ~E1000_TARC0_CB_MULTIQ_3_REQ; reg_val |= E1000_TARC0_CB_MULTIQ_2_REQ; ew32(TARC(0), reg_val); } } #define PAGE_USE_COUNT(S) (((S) >> PAGE_SHIFT) + \ (((S) & (PAGE_SIZE - 1)) ? 1 : 0)) /** * e1000_setup_rctl - configure the receive control registers * @adapter: Board private structure **/ static void e1000_setup_rctl(struct e1000_adapter *adapter) { struct e1000_hw *hw = &adapter->hw; u32 rctl, rfctl; u32 pages = 0; /* Workaround Si errata on PCHx - configure jumbo frame flow. * If jumbo frames not set, program related MAC/PHY registers * to h/w defaults */ if (hw->mac.type >= e1000_pch2lan) { s32 ret_val; if (adapter->netdev->mtu > ETH_DATA_LEN) ret_val = e1000_lv_jumbo_workaround_ich8lan(hw, true); else ret_val = e1000_lv_jumbo_workaround_ich8lan(hw, false); if (ret_val) e_dbg("failed to enable|disable jumbo frame workaround mode\n"); } /* Program MC offset vector base */ rctl = er32(RCTL); rctl &= ~(3 << E1000_RCTL_MO_SHIFT); rctl |= E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_LBM_NO | E1000_RCTL_RDMTS_HALF | (adapter->hw.mac.mc_filter_type << E1000_RCTL_MO_SHIFT); /* Do not Store bad packets */ rctl &= ~E1000_RCTL_SBP; /* Enable Long Packet receive */ if (adapter->netdev->mtu <= ETH_DATA_LEN) rctl &= ~E1000_RCTL_LPE; else rctl |= E1000_RCTL_LPE; /* Some systems expect that the CRC is included in SMBUS traffic. The * hardware strips the CRC before sending to both SMBUS (BMC) and to * host memory when this is enabled */ if (adapter->flags2 & FLAG2_CRC_STRIPPING) rctl |= E1000_RCTL_SECRC; /* Workaround Si errata on 82577 PHY - configure IPG for jumbos */ if ((hw->phy.type == e1000_phy_82577) && (rctl & E1000_RCTL_LPE)) { u16 phy_data; e1e_rphy(hw, PHY_REG(770, 26), &phy_data); phy_data &= 0xfff8; phy_data |= BIT(2); e1e_wphy(hw, PHY_REG(770, 26), phy_data); e1e_rphy(hw, 22, &phy_data); phy_data &= 0x0fff; phy_data |= BIT(14); e1e_wphy(hw, 0x10, 0x2823); e1e_wphy(hw, 0x11, 0x0003); e1e_wphy(hw, 22, phy_data); } /* Setup buffer sizes */ rctl &= ~E1000_RCTL_SZ_4096; rctl |= E1000_RCTL_BSEX; switch (adapter->rx_buffer_len) { case 2048: default: rctl |= E1000_RCTL_SZ_2048; rctl &= ~E1000_RCTL_BSEX; break; case 4096: rctl |= E1000_RCTL_SZ_4096; break; case 8192: rctl |= E1000_RCTL_SZ_8192; break; case 16384: rctl |= E1000_RCTL_SZ_16384; break; } /* Enable Extended Status in all Receive Descriptors */ rfctl = er32(RFCTL); rfctl |= E1000_RFCTL_EXTEN; ew32(RFCTL, rfctl); /* 82571 and greater support packet-split where the protocol * header is placed in skb->data and the packet data is * placed in pages hanging off of skb_shinfo(skb)->nr_frags. * In the case of a non-split, skb->data is linearly filled, * followed by the page buffers. Therefore, skb->data is * sized to hold the largest protocol header. * * allocations using alloc_page take too long for regular MTU * so only enable packet split for jumbo frames * * Using pages when the page size is greater than 16k wastes * a lot of memory, since we allocate 3 pages at all times * per packet. */ pages = PAGE_USE_COUNT(adapter->netdev->mtu); if ((pages <= 3) && (PAGE_SIZE <= 16384) && (rctl & E1000_RCTL_LPE)) adapter->rx_ps_pages = pages; else adapter->rx_ps_pages = 0; if (adapter->rx_ps_pages) { u32 psrctl = 0; /* Enable Packet split descriptors */ rctl |= E1000_RCTL_DTYP_PS; psrctl |= adapter->rx_ps_bsize0 >> E1000_PSRCTL_BSIZE0_SHIFT; switch (adapter->rx_ps_pages) { case 3: psrctl |= PAGE_SIZE << E1000_PSRCTL_BSIZE3_SHIFT; fallthrough; case 2: psrctl |= PAGE_SIZE << E1000_PSRCTL_BSIZE2_SHIFT; fallthrough; case 1: psrctl |= PAGE_SIZE >> E1000_PSRCTL_BSIZE1_SHIFT; break; } ew32(PSRCTL, psrctl); } /* This is useful for sniffing bad packets. */ if (adapter->netdev->features & NETIF_F_RXALL) { /* UPE and MPE will be handled by normal PROMISC logic * in e1000e_set_rx_mode */ rctl |= (E1000_RCTL_SBP | /* Receive bad packets */ E1000_RCTL_BAM | /* RX All Bcast Pkts */ E1000_RCTL_PMCF); /* RX All MAC Ctrl Pkts */ rctl &= ~(E1000_RCTL_VFE | /* Disable VLAN filter */ E1000_RCTL_DPF | /* Allow filtered pause */ E1000_RCTL_CFIEN); /* Dis VLAN CFIEN Filter */ /* Do not mess with E1000_CTRL_VME, it affects transmit as well, * and that breaks VLANs. */ } ew32(RCTL, rctl); /* just started the receive unit, no need to restart */ adapter->flags &= ~FLAG_RESTART_NOW; } /** * e1000_configure_rx - Configure Receive Unit after Reset * @adapter: board private structure * * Configure the Rx unit of the MAC after a reset. **/ static void e1000_configure_rx(struct e1000_adapter *adapter) { struct e1000_hw *hw = &adapter->hw; struct e1000_ring *rx_ring = adapter->rx_ring; u64 rdba; u32 rdlen, rctl, rxcsum, ctrl_ext; if (adapter->rx_ps_pages) { /* this is a 32 byte descriptor */ rdlen = rx_ring->count * sizeof(union e1000_rx_desc_packet_split); adapter->clean_rx = e1000_clean_rx_irq_ps; adapter->alloc_rx_buf = e1000_alloc_rx_buffers_ps; } else if (adapter->netdev->mtu > ETH_FRAME_LEN + ETH_FCS_LEN) { rdlen = rx_ring->count * sizeof(union e1000_rx_desc_extended); adapter->clean_rx = e1000_clean_jumbo_rx_irq; adapter->alloc_rx_buf = e1000_alloc_jumbo_rx_buffers; } else { rdlen = rx_ring->count * sizeof(union e1000_rx_desc_extended); adapter->clean_rx = e1000_clean_rx_irq; adapter->alloc_rx_buf = e1000_alloc_rx_buffers; } /* disable receives while setting up the descriptors */ rctl = er32(RCTL); if (!(adapter->flags2 & FLAG2_NO_DISABLE_RX)) ew32(RCTL, rctl & ~E1000_RCTL_EN); e1e_flush(); usleep_range(10000, 11000); if (adapter->flags2 & FLAG2_DMA_BURST) { /* set the writeback threshold (only takes effect if the RDTR * is set). set GRAN=1 and write back up to 0x4 worth, and * enable prefetching of 0x20 Rx descriptors * granularity = 01 * wthresh = 04, * hthresh = 04, * pthresh = 0x20 */ ew32(RXDCTL(0), E1000_RXDCTL_DMA_BURST_ENABLE); ew32(RXDCTL(1), E1000_RXDCTL_DMA_BURST_ENABLE); } /* set the Receive Delay Timer Register */ ew32(RDTR, adapter->rx_int_delay); /* irq moderation */ ew32(RADV, adapter->rx_abs_int_delay); if ((adapter->itr_setting != 0) && (adapter->itr != 0)) e1000e_write_itr(adapter, adapter->itr); ctrl_ext = er32(CTRL_EXT); /* Auto-Mask interrupts upon ICR access */ ctrl_ext |= E1000_CTRL_EXT_IAME; ew32(IAM, 0xffffffff); ew32(CTRL_EXT, ctrl_ext); e1e_flush(); /* Setup the HW Rx Head and Tail Descriptor Pointers and * the Base and Length of the Rx Descriptor Ring */ rdba = rx_ring->dma; ew32(RDBAL(0), (rdba & DMA_BIT_MASK(32))); ew32(RDBAH(0), (rdba >> 32)); ew32(RDLEN(0), rdlen); ew32(RDH(0), 0); ew32(RDT(0), 0); rx_ring->head = adapter->hw.hw_addr + E1000_RDH(0); rx_ring->tail = adapter->hw.hw_addr + E1000_RDT(0); writel(0, rx_ring->head); if (adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA) e1000e_update_rdt_wa(rx_ring, 0); else writel(0, rx_ring->tail); /* Enable Receive Checksum Offload for TCP and UDP */ rxcsum = er32(RXCSUM); if (adapter->netdev->features & NETIF_F_RXCSUM) rxcsum |= E1000_RXCSUM_TUOFL; else rxcsum &= ~E1000_RXCSUM_TUOFL; ew32(RXCSUM, rxcsum); /* With jumbo frames, excessive C-state transition latencies result * in dropped transactions. */ if (adapter->netdev->mtu > ETH_DATA_LEN) { u32 lat = ((er32(PBA) & E1000_PBA_RXA_MASK) * 1024 - adapter->max_frame_size) * 8 / 1000; if (adapter->flags & FLAG_IS_ICH) { u32 rxdctl = er32(RXDCTL(0)); ew32(RXDCTL(0), rxdctl | 0x3 | BIT(8)); } dev_info(&adapter->pdev->dev, "Some CPU C-states have been disabled in order to enable jumbo frames\n"); cpu_latency_qos_update_request(&adapter->pm_qos_req, lat); } else { cpu_latency_qos_update_request(&adapter->pm_qos_req, PM_QOS_DEFAULT_VALUE); } /* Enable Receives */ ew32(RCTL, rctl); } /** * e1000e_write_mc_addr_list - write multicast addresses to MTA * @netdev: network interface device structure * * Writes multicast address list to the MTA hash table. * Returns: -ENOMEM on failure * 0 on no addresses written * X on writing X addresses to MTA */ static int e1000e_write_mc_addr_list(struct net_device *netdev) { struct e1000_adapter *adapter = netdev_priv(netdev); struct e1000_hw *hw = &adapter->hw; struct netdev_hw_addr *ha; u8 *mta_list; int i; if (netdev_mc_empty(netdev)) { /* nothing to program, so clear mc list */ hw->mac.ops.update_mc_addr_list(hw, NULL, 0); return 0; } mta_list = kcalloc(netdev_mc_count(netdev), ETH_ALEN, GFP_ATOMIC); if (!mta_list) return -ENOMEM; /* update_mc_addr_list expects a packed array of only addresses. */ i = 0; netdev_for_each_mc_addr(ha, netdev) memcpy(mta_list + (i++ * ETH_ALEN), ha->addr, ETH_ALEN); hw->mac.ops.update_mc_addr_list(hw, mta_list, i); kfree(mta_list); return netdev_mc_count(netdev); } /** * e1000e_write_uc_addr_list - write unicast addresses to RAR table * @netdev: network interface device structure * * Writes unicast address list to the RAR table. * Returns: -ENOMEM on failure/insufficient address space * 0 on no addresses written * X on writing X addresses to the RAR table **/ static int e1000e_write_uc_addr_list(struct net_device *netdev) { struct e1000_adapter *adapter = netdev_priv(netdev); struct e1000_hw *hw = &adapter->hw; unsigned int rar_entries; int count = 0; rar_entries = hw->mac.ops.rar_get_count(hw); /* save a rar entry for our hardware address */ rar_entries--; /* save a rar entry for the LAA workaround */ if (adapter->flags & FLAG_RESET_OVERWRITES_LAA) rar_entries--; /* return ENOMEM indicating insufficient memory for addresses */ if (netdev_uc_count(netdev) > rar_entries) return -ENOMEM; if (!netdev_uc_empty(netdev) && rar_entries) { struct netdev_hw_addr *ha; /* write the addresses in reverse order to avoid write * combining */ netdev_for_each_uc_addr(ha, netdev) { int ret_val; if (!rar_entries) break; ret_val = hw->mac.ops.rar_set(hw, ha->addr, rar_entries--); if (ret_val < 0) return -ENOMEM; count++; } } /* zero out the remaining RAR entries not used above */ for (; rar_entries > 0; rar_entries--) { ew32(RAH(rar_entries), 0); ew32(RAL(rar_entries), 0); } e1e_flush(); return count; } /** * e1000e_set_rx_mode - secondary unicast, Multicast and Promiscuous mode set * @netdev: network interface device structure * * The ndo_set_rx_mode entry point is called whenever the unicast or multicast * address list or the network interface flags are updated. This routine is * responsible for configuring the hardware for proper unicast, multicast, * promiscuous mode, and all-multi behavior. **/ static void e1000e_set_rx_mode(struct net_device *netdev) { struct e1000_adapter *adapter = netdev_priv(netdev); struct e1000_hw *hw = &adapter->hw; u32 rctl; if (pm_runtime_suspended(netdev->dev.parent)) return; /* Check for Promiscuous and All Multicast modes */ rctl = er32(RCTL); /* clear the affected bits */ rctl &= ~(E1000_RCTL_UPE | E1000_RCTL_MPE); if (netdev->flags & IFF_PROMISC) { rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE); /* Do not hardware filter VLANs in promisc mode */ e1000e_vlan_filter_disable(adapter); } else { int count; if (netdev->flags & IFF_ALLMULTI) { rctl |= E1000_RCTL_MPE; } else { /* Write addresses to the MTA, if the attempt fails * then we should just turn on promiscuous mode so * that we can at least receive multicast traffic */ count = e1000e_write_mc_addr_list(netdev); if (count < 0) rctl |= E1000_RCTL_MPE; } e1000e_vlan_filter_enable(adapter); /* Write addresses to available RAR registers, if there is not * sufficient space to store all the addresses then enable * unicast promiscuous mode */ count = e1000e_write_uc_addr_list(netdev); if (count < 0) rctl |= E1000_RCTL_UPE; } ew32(RCTL, rctl); if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX) e1000e_vlan_strip_enable(adapter); else e1000e_vlan_strip_disable(adapter); } static void e1000e_setup_rss_hash(struct e1000_adapter *adapter) { struct e1000_hw *hw = &adapter->hw; u32 mrqc, rxcsum; u32 rss_key[10]; int i; netdev_rss_key_fill(rss_key, sizeof(rss_key)); for (i = 0; i < 10; i++) ew32(RSSRK(i), rss_key[i]); /* Direct all traffic to queue 0 */ for (i = 0; i < 32; i++) ew32(RETA(i), 0); /* Disable raw packet checksumming so that RSS hash is placed in * descriptor on writeback. */ rxcsum = er32(RXCSUM); rxcsum |= E1000_RXCSUM_PCSD; ew32(RXCSUM, rxcsum); mrqc = (E1000_MRQC_RSS_FIELD_IPV4 | E1000_MRQC_RSS_FIELD_IPV4_TCP | E1000_MRQC_RSS_FIELD_IPV6 | E1000_MRQC_RSS_FIELD_IPV6_TCP | E1000_MRQC_RSS_FIELD_IPV6_TCP_EX); ew32(MRQC, mrqc); } /** * e1000e_get_base_timinca - get default SYSTIM time increment attributes * @adapter: board private structure * @timinca: pointer to returned time increment attributes * * Get attributes for incrementing the System Time Register SYSTIML/H at * the default base frequency, and set the cyclecounter shift value. **/ s32 e1000e_get_base_timinca(struct e1000_adapter *adapter, u32 *timinca) { struct e1000_hw *hw = &adapter->hw; u32 incvalue, incperiod, shift; /* Make sure clock is enabled on I217/I218/I219 before checking * the frequency */ if ((hw->mac.type >= e1000_pch_lpt) && !(er32(TSYNCTXCTL) & E1000_TSYNCTXCTL_ENABLED) && !(er32(TSYNCRXCTL) & E1000_TSYNCRXCTL_ENABLED)) { u32 fextnvm7 = er32(FEXTNVM7); if (!(fextnvm7 & BIT(0))) { ew32(FEXTNVM7, fextnvm7 | BIT(0)); e1e_flush(); } } switch (hw->mac.type) { case e1000_pch2lan: /* Stable 96MHz frequency */ incperiod = INCPERIOD_96MHZ; incvalue = INCVALUE_96MHZ; shift = INCVALUE_SHIFT_96MHZ; adapter->cc.shift = shift + INCPERIOD_SHIFT_96MHZ; break; case e1000_pch_lpt: if (er32(TSYNCRXCTL) & E1000_TSYNCRXCTL_SYSCFI) { /* Stable 96MHz frequency */ incperiod = INCPERIOD_96MHZ; incvalue = INCVALUE_96MHZ; shift = INCVALUE_SHIFT_96MHZ; adapter->cc.shift = shift + INCPERIOD_SHIFT_96MHZ; } else { /* Stable 25MHz frequency */ incperiod = INCPERIOD_25MHZ; incvalue = INCVALUE_25MHZ; shift = INCVALUE_SHIFT_25MHZ; adapter->cc.shift = shift; } break; case e1000_pch_spt: /* Stable 24MHz frequency */ incperiod = INCPERIOD_24MHZ; incvalue = INCVALUE_24MHZ; shift = INCVALUE_SHIFT_24MHZ; adapter->cc.shift = shift; break; case e1000_pch_cnp: case e1000_pch_tgp: case e1000_pch_adp: case e1000_pch_mtp: case e1000_pch_lnp: case e1000_pch_ptp: case e1000_pch_nvp: if (er32(TSYNCRXCTL) & E1000_TSYNCRXCTL_SYSCFI) { /* Stable 24MHz frequency */ incperiod = INCPERIOD_24MHZ; incvalue = INCVALUE_24MHZ; shift = INCVALUE_SHIFT_24MHZ; adapter->cc.shift = shift; } else { /* Stable 38400KHz frequency */ incperiod = INCPERIOD_38400KHZ; incvalue = INCVALUE_38400KHZ; shift = INCVALUE_SHIFT_38400KHZ; adapter->cc.shift = shift; } break; case e1000_82574: case e1000_82583: /* Stable 25MHz frequency */ incperiod = INCPERIOD_25MHZ; incvalue = INCVALUE_25MHZ; shift = INCVALUE_SHIFT_25MHZ; adapter->cc.shift = shift; break; default: return -EINVAL; } *timinca = ((incperiod << E1000_TIMINCA_INCPERIOD_SHIFT) | ((incvalue << shift) & E1000_TIMINCA_INCVALUE_MASK)); return 0; } /** * e1000e_config_hwtstamp - configure the hwtstamp registers and enable/disable * @adapter: board private structure * @config: timestamp configuration * * Outgoing time stamping can be enabled and disabled. Play nice and * disable it when requested, although it shouldn't cause any overhead * when no packet needs it. At most one packet in the queue may be * marked for time stamping, otherwise it would be impossible to tell * for sure to which packet the hardware time stamp belongs. * * Incoming time stamping has to be configured via the hardware filters. * Not all combinations are supported, in particular event type has to be * specified. Matching the kind of event packet is not supported, with the * exception of "all V2 events regardless of level 2 or 4". **/ static int e1000e_config_hwtstamp(struct e1000_adapter *adapter, struct hwtstamp_config *config) { struct e1000_hw *hw = &adapter->hw; u32 tsync_tx_ctl = E1000_TSYNCTXCTL_ENABLED; u32 tsync_rx_ctl = E1000_TSYNCRXCTL_ENABLED; u32 rxmtrl = 0; u16 rxudp = 0; bool is_l4 = false; bool is_l2 = false; u32 regval; if (!(adapter->flags & FLAG_HAS_HW_TIMESTAMP)) return -EINVAL; switch (config->tx_type) { case HWTSTAMP_TX_OFF: tsync_tx_ctl = 0; break; case HWTSTAMP_TX_ON: break; default: return -ERANGE; } switch (config->rx_filter) { case HWTSTAMP_FILTER_NONE: tsync_rx_ctl = 0; break; case HWTSTAMP_FILTER_PTP_V1_L4_SYNC: tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L4_V1; rxmtrl = E1000_RXMTRL_PTP_V1_SYNC_MESSAGE; is_l4 = true; break; case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ: tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L4_V1; rxmtrl = E1000_RXMTRL_PTP_V1_DELAY_REQ_MESSAGE; is_l4 = true; break; case HWTSTAMP_FILTER_PTP_V2_L2_SYNC: /* Also time stamps V2 L2 Path Delay Request/Response */ tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L2_V2; rxmtrl = E1000_RXMTRL_PTP_V2_SYNC_MESSAGE; is_l2 = true; break; case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ: /* Also time stamps V2 L2 Path Delay Request/Response. */ tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L2_V2; rxmtrl = E1000_RXMTRL_PTP_V2_DELAY_REQ_MESSAGE; is_l2 = true; break; case HWTSTAMP_FILTER_PTP_V2_L4_SYNC: /* Hardware cannot filter just V2 L4 Sync messages */ fallthrough; case HWTSTAMP_FILTER_PTP_V2_SYNC: /* Also time stamps V2 Path Delay Request/Response. */ tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L2_L4_V2; rxmtrl = E1000_RXMTRL_PTP_V2_SYNC_MESSAGE; is_l2 = true; is_l4 = true; break; case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ: /* Hardware cannot filter just V2 L4 Delay Request messages */ fallthrough; case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ: /* Also time stamps V2 Path Delay Request/Response. */ tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L2_L4_V2; rxmtrl = E1000_RXMTRL_PTP_V2_DELAY_REQ_MESSAGE; is_l2 = true; is_l4 = true; break; case HWTSTAMP_FILTER_PTP_V2_L4_EVENT: case HWTSTAMP_FILTER_PTP_V2_L2_EVENT: /* Hardware cannot filter just V2 L4 or L2 Event messages */ fallthrough; case HWTSTAMP_FILTER_PTP_V2_EVENT: tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_EVENT_V2; config->rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT; is_l2 = true; is_l4 = true; break; case HWTSTAMP_FILTER_PTP_V1_L4_EVENT: /* For V1, the hardware can only filter Sync messages or * Delay Request messages but not both so fall-through to * time stamp all packets. */ fallthrough; case HWTSTAMP_FILTER_NTP_ALL: case HWTSTAMP_FILTER_ALL: is_l2 = true; is_l4 = true; tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_ALL; config->rx_filter = HWTSTAMP_FILTER_ALL; break; default: return -ERANGE; } adapter->hwtstamp_config = *config; /* enable/disable Tx h/w time stamping */ regval = er32(TSYNCTXCTL); regval &= ~E1000_TSYNCTXCTL_ENABLED; regval |= tsync_tx_ctl; ew32(TSYNCTXCTL, regval); if ((er32(TSYNCTXCTL) & E1000_TSYNCTXCTL_ENABLED) != (regval & E1000_TSYNCTXCTL_ENABLED)) { e_err("Timesync Tx Control register not set as expected\n"); return -EAGAIN; } /* enable/disable Rx h/w time stamping */ regval = er32(TSYNCRXCTL); regval &= ~(E1000_TSYNCRXCTL_ENABLED | E1000_TSYNCRXCTL_TYPE_MASK); regval |= tsync_rx_ctl; ew32(TSYNCRXCTL, regval); if ((er32(TSYNCRXCTL) & (E1000_TSYNCRXCTL_ENABLED | E1000_TSYNCRXCTL_TYPE_MASK)) != (regval & (E1000_TSYNCRXCTL_ENABLED | E1000_TSYNCRXCTL_TYPE_MASK))) { e_err("Timesync Rx Control register not set as expected\n"); return -EAGAIN; } /* L2: define ethertype filter for time stamped packets */ if (is_l2) rxmtrl |= ETH_P_1588; /* define which PTP packets get time stamped */ ew32(RXMTRL, rxmtrl); /* Filter by destination port */ if (is_l4) { rxudp = PTP_EV_PORT; cpu_to_be16s(&rxudp); } ew32(RXUDP, rxudp); e1e_flush(); /* Clear TSYNCRXCTL_VALID & TSYNCTXCTL_VALID bit */ er32(RXSTMPH); er32(TXSTMPH); return 0; } /** * e1000_configure - configure the hardware for Rx and Tx * @adapter: private board structure **/ static void e1000_configure(struct e1000_adapter *adapter) { struct e1000_ring *rx_ring = adapter->rx_ring; e1000e_set_rx_mode(adapter->netdev); e1000_restore_vlan(adapter); e1000_init_manageability_pt(adapter); e1000_configure_tx(adapter); if (adapter->netdev->features & NETIF_F_RXHASH) e1000e_setup_rss_hash(adapter); e1000_setup_rctl(adapter); e1000_configure_rx(adapter); adapter->alloc_rx_buf(rx_ring, e1000_desc_unused(rx_ring), GFP_KERNEL); } /** * e1000e_power_up_phy - restore link in case the phy was powered down * @adapter: address of board private structure * * The phy may be powered down to save power and turn off link when the * driver is unloaded and wake on lan is not enabled (among others) * *** this routine MUST be followed by a call to e1000e_reset *** **/ void e1000e_power_up_phy(struct e1000_adapter *adapter) { if (adapter->hw.phy.ops.power_up) adapter->hw.phy.ops.power_up(&adapter->hw); adapter->hw.mac.ops.setup_link(&adapter->hw); } /** * e1000_power_down_phy - Power down the PHY * @adapter: board private structure * * Power down the PHY so no link is implied when interface is down. * The PHY cannot be powered down if management or WoL is active. */ static void e1000_power_down_phy(struct e1000_adapter *adapter) { if (adapter->hw.phy.ops.power_down) adapter->hw.phy.ops.power_down(&adapter->hw); } /** * e1000_flush_tx_ring - remove all descriptors from the tx_ring * @adapter: board private structure * * We want to clear all pending descriptors from the TX ring. * zeroing happens when the HW reads the regs. We assign the ring itself as * the data of the next descriptor. We don't care about the data we are about * to reset the HW. */ static void e1000_flush_tx_ring(struct e1000_adapter *adapter) { struct e1000_hw *hw = &adapter->hw; struct e1000_ring *tx_ring = adapter->tx_ring; struct e1000_tx_desc *tx_desc = NULL; u32 tdt, tctl, txd_lower = E1000_TXD_CMD_IFCS; u16 size = 512; tctl = er32(TCTL); ew32(TCTL, tctl | E1000_TCTL_EN); tdt = er32(TDT(0)); BUG_ON(tdt != tx_ring->next_to_use); tx_desc = E1000_TX_DESC(*tx_ring, tx_ring->next_to_use); tx_desc->buffer_addr = cpu_to_le64(tx_ring->dma); tx_desc->lower.data = cpu_to_le32(txd_lower | size); tx_desc->upper.data = 0; /* flush descriptors to memory before notifying the HW */ wmb(); tx_ring->next_to_use++; if (tx_ring->next_to_use == tx_ring->count) tx_ring->next_to_use = 0; ew32(TDT(0), tx_ring->next_to_use); usleep_range(200, 250); } /** * e1000_flush_rx_ring - remove all descriptors from the rx_ring * @adapter: board private structure * * Mark all descriptors in the RX ring as consumed and disable the rx ring */ static void e1000_flush_rx_ring(struct e1000_adapter *adapter) { u32 rctl, rxdctl; struct e1000_hw *hw = &adapter->hw; rctl = er32(RCTL); ew32(RCTL, rctl & ~E1000_RCTL_EN); e1e_flush(); usleep_range(100, 150); rxdctl = er32(RXDCTL(0)); /* zero the lower 14 bits (prefetch and host thresholds) */ rxdctl &= 0xffffc000; /* update thresholds: prefetch threshold to 31, host threshold to 1 * and make sure the granularity is "descriptors" and not "cache lines" */ rxdctl |= (0x1F | BIT(8) | E1000_RXDCTL_THRESH_UNIT_DESC); ew32(RXDCTL(0), rxdctl); /* momentarily enable the RX ring for the changes to take effect */ ew32(RCTL, rctl | E1000_RCTL_EN); e1e_flush(); usleep_range(100, 150); ew32(RCTL, rctl & ~E1000_RCTL_EN); } /** * e1000_flush_desc_rings - remove all descriptors from the descriptor rings * @adapter: board private structure * * In i219, the descriptor rings must be emptied before resetting the HW * or before changing the device state to D3 during runtime (runtime PM). * * Failure to do this will cause the HW to enter a unit hang state which can * only be released by PCI reset on the device * */ static void e1000_flush_desc_rings(struct e1000_adapter *adapter) { u16 hang_state; u32 fext_nvm11, tdlen; struct e1000_hw *hw = &adapter->hw; /* First, disable MULR fix in FEXTNVM11 */ fext_nvm11 = er32(FEXTNVM11); fext_nvm11 |= E1000_FEXTNVM11_DISABLE_MULR_FIX; ew32(FEXTNVM11, fext_nvm11); /* do nothing if we're not in faulty state, or if the queue is empty */ tdlen = er32(TDLEN(0)); pci_read_config_word(adapter->pdev, PCICFG_DESC_RING_STATUS, &hang_state); if (!(hang_state & FLUSH_DESC_REQUIRED) || !tdlen) return; e1000_flush_tx_ring(adapter); /* recheck, maybe the fault is caused by the rx ring */ pci_read_config_word(adapter->pdev, PCICFG_DESC_RING_STATUS, &hang_state); if (hang_state & FLUSH_DESC_REQUIRED) e1000_flush_rx_ring(adapter); } /** * e1000e_systim_reset - reset the timesync registers after a hardware reset * @adapter: board private structure * * When the MAC is reset, all hardware bits for timesync will be reset to the * default values. This function will restore the settings last in place. * Since the clock SYSTIME registers are reset, we will simply restore the * cyclecounter to the kernel real clock time. **/ static void e1000e_systim_reset(struct e1000_adapter *adapter) { struct ptp_clock_info *info = &adapter->ptp_clock_info; struct e1000_hw *hw = &adapter->hw; unsigned long flags; u32 timinca; s32 ret_val; if (!(adapter->flags & FLAG_HAS_HW_TIMESTAMP)) return; if (info->adjfine) { /* restore the previous ptp frequency delta */ ret_val = info->adjfine(info, adapter->ptp_delta); } else { /* set the default base frequency if no adjustment possible */ ret_val = e1000e_get_base_timinca(adapter, &timinca); if (!ret_val) ew32(TIMINCA, timinca); } if (ret_val) { dev_warn(&adapter->pdev->dev, "Failed to restore TIMINCA clock rate delta: %d\n", ret_val); return; } /* reset the systim ns time counter */ spin_lock_irqsave(&adapter->systim_lock, flags); timecounter_init(&adapter->tc, &adapter->cc, ktime_to_ns(ktime_get_real())); spin_unlock_irqrestore(&adapter->systim_lock, flags); /* restore the previous hwtstamp configuration settings */ e1000e_config_hwtstamp(adapter, &adapter->hwtstamp_config); } /** * e1000e_reset - bring the hardware into a known good state * @adapter: board private structure * * This function boots the hardware and enables some settings that * require a configuration cycle of the hardware - those cannot be * set/changed during runtime. After reset the device needs to be * properly configured for Rx, Tx etc. */ void e1000e_reset(struct e1000_adapter *adapter) { struct e1000_mac_info *mac = &adapter->hw.mac; struct e1000_fc_info *fc = &adapter->hw.fc; struct e1000_hw *hw = &adapter->hw; u32 tx_space, min_tx_space, min_rx_space; u32 pba = adapter->pba; u16 hwm; /* reset Packet Buffer Allocation to default */ ew32(PBA, pba); if (adapter->max_frame_size > (VLAN_ETH_FRAME_LEN + ETH_FCS_LEN)) { /* To maintain wire speed transmits, the Tx FIFO should be * large enough to accommodate two full transmit packets, * rounded up to the next 1KB and expressed in KB. Likewise, * the Rx FIFO should be large enough to accommodate at least * one full receive packet and is similarly rounded up and * expressed in KB. */ pba = er32(PBA); /* upper 16 bits has Tx packet buffer allocation size in KB */ tx_space = pba >> 16; /* lower 16 bits has Rx packet buffer allocation size in KB */ pba &= 0xffff; /* the Tx fifo also stores 16 bytes of information about the Tx * but don't include ethernet FCS because hardware appends it */ min_tx_space = (adapter->max_frame_size + sizeof(struct e1000_tx_desc) - ETH_FCS_LEN) * 2; min_tx_space = ALIGN(min_tx_space, 1024); min_tx_space >>= 10; /* software strips receive CRC, so leave room for it */ min_rx_space = adapter->max_frame_size; min_rx_space = ALIGN(min_rx_space, 1024); min_rx_space >>= 10; /* If current Tx allocation is less than the min Tx FIFO size, * and the min Tx FIFO size is less than the current Rx FIFO * allocation, take space away from current Rx allocation */ if ((tx_space < min_tx_space) && ((min_tx_space - tx_space) < pba)) { pba -= min_tx_space - tx_space; /* if short on Rx space, Rx wins and must trump Tx * adjustment */ if (pba < min_rx_space) pba = min_rx_space; } ew32(PBA, pba); } /* flow control settings * * The high water mark must be low enough to fit one full frame * (or the size used for early receive) above it in the Rx FIFO. * Set it to the lower of: * - 90% of the Rx FIFO size, and * - the full Rx FIFO size minus one full frame */ if (adapter->flags & FLAG_DISABLE_FC_PAUSE_TIME) fc->pause_time = 0xFFFF; else fc->pause_time = E1000_FC_PAUSE_TIME; fc->send_xon = true; fc->current_mode = fc->requested_mode; switch (hw->mac.type) { case e1000_ich9lan: case e1000_ich10lan: if (adapter->netdev->mtu > ETH_DATA_LEN) { pba = 14; ew32(PBA, pba); fc->high_water = 0x2800; fc->low_water = fc->high_water - 8; break; } fallthrough; default: hwm = min(((pba << 10) * 9 / 10), ((pba << 10) - adapter->max_frame_size)); fc->high_water = hwm & E1000_FCRTH_RTH; /* 8-byte granularity */ fc->low_water = fc->high_water - 8; break; case e1000_pchlan: /* Workaround PCH LOM adapter hangs with certain network * loads. If hangs persist, try disabling Tx flow control. */ if (adapter->netdev->mtu > ETH_DATA_LEN) { fc->high_water = 0x3500; fc->low_water = 0x1500; } else { fc->high_water = 0x5000; fc->low_water = 0x3000; } fc->refresh_time = 0x1000; break; case e1000_pch2lan: case e1000_pch_lpt: case e1000_pch_spt: case e1000_pch_cnp: case e1000_pch_tgp: case e1000_pch_adp: case e1000_pch_mtp: case e1000_pch_lnp: case e1000_pch_ptp: case e1000_pch_nvp: fc->refresh_time = 0xFFFF; fc->pause_time = 0xFFFF; if (adapter->netdev->mtu <= ETH_DATA_LEN) { fc->high_water = 0x05C20; fc->low_water = 0x05048; break; } pba = 14; ew32(PBA, pba); fc->high_water = ((pba << 10) * 9 / 10) & E1000_FCRTH_RTH; fc->low_water = ((pba << 10) * 8 / 10) & E1000_FCRTL_RTL; break; } /* Alignment of Tx data is on an arbitrary byte boundary with the * maximum size per Tx descriptor limited only to the transmit * allocation of the packet buffer minus 96 bytes with an upper * limit of 24KB due to receive synchronization limitations. */ adapter->tx_fifo_limit = min_t(u32, ((er32(PBA) >> 16) << 10) - 96, 24 << 10); /* Disable Adaptive Interrupt Moderation if 2 full packets cannot * fit in receive buffer. */ if (adapter->itr_setting & 0x3) { if ((adapter->max_frame_size * 2) > (pba << 10)) { if (!(adapter->flags2 & FLAG2_DISABLE_AIM)) { dev_info(&adapter->pdev->dev, "Interrupt Throttle Rate off\n"); adapter->flags2 |= FLAG2_DISABLE_AIM; e1000e_write_itr(adapter, 0); } } else if (adapter->flags2 & FLAG2_DISABLE_AIM) { dev_info(&adapter->pdev->dev, "Interrupt Throttle Rate on\n"); adapter->flags2 &= ~FLAG2_DISABLE_AIM; adapter->itr = 20000; e1000e_write_itr(adapter, adapter->itr); } } if (hw->mac.type >= e1000_pch_spt) e1000_flush_desc_rings(adapter); /* Allow time for pending master requests to run */ mac->ops.reset_hw(hw); /* For parts with AMT enabled, let the firmware know * that the network interface is in control */ if (adapter->flags & FLAG_HAS_AMT) e1000e_get_hw_control(adapter); ew32(WUC, 0); if (mac->ops.init_hw(hw)) e_err("Hardware Error\n"); e1000_update_mng_vlan(adapter); /* Enable h/w to recognize an 802.1Q VLAN Ethernet packet */ ew32(VET, ETH_P_8021Q); e1000e_reset_adaptive(hw); /* restore systim and hwtstamp settings */ e1000e_systim_reset(adapter); /* Set EEE advertisement as appropriate */ if (adapter->flags2 & FLAG2_HAS_EEE) { s32 ret_val; u16 adv_addr; switch (hw->phy.type) { case e1000_phy_82579: adv_addr = I82579_EEE_ADVERTISEMENT; break; case e1000_phy_i217: adv_addr = I217_EEE_ADVERTISEMENT; break; default: dev_err(&adapter->pdev->dev, "Invalid PHY type setting EEE advertisement\n"); return; } ret_val = hw->phy.ops.acquire(hw); if (ret_val) { dev_err(&adapter->pdev->dev, "EEE advertisement - unable to acquire PHY\n"); return; } e1000_write_emi_reg_locked(hw, adv_addr, hw->dev_spec.ich8lan.eee_disable ? 0 : adapter->eee_advert); hw->phy.ops.release(hw); } if (!netif_running(adapter->netdev) && !test_bit(__E1000_TESTING, &adapter->state)) e1000_power_down_phy(adapter); e1000_get_phy_info(hw); if ((adapter->flags & FLAG_HAS_SMART_POWER_DOWN) && !(adapter->flags & FLAG_SMART_POWER_DOWN)) { u16 phy_data = 0; /* speed up time to link by disabling smart power down, ignore * the return value of this function because there is nothing * different we would do if it failed */ e1e_rphy(hw, IGP02E1000_PHY_POWER_MGMT, &phy_data); phy_data &= ~IGP02E1000_PM_SPD; e1e_wphy(hw, IGP02E1000_PHY_POWER_MGMT, phy_data); } if (hw->mac.type >= e1000_pch_spt && adapter->int_mode == 0) { u32 reg; /* Fextnvm7 @ 0xe4[2] = 1 */ reg = er32(FEXTNVM7); reg |= E1000_FEXTNVM7_SIDE_CLK_UNGATE; ew32(FEXTNVM7, reg); /* Fextnvm9 @ 0x5bb4[13:12] = 11 */ reg = er32(FEXTNVM9); reg |= E1000_FEXTNVM9_IOSFSB_CLKGATE_DIS | E1000_FEXTNVM9_IOSFSB_CLKREQ_DIS; ew32(FEXTNVM9, reg); } } /** * e1000e_trigger_lsc - trigger an LSC interrupt * @adapter: board private structure * * Fire a link status change interrupt to start the watchdog. **/ static void e1000e_trigger_lsc(struct e1000_adapter *adapter) { struct e1000_hw *hw = &adapter->hw; if (adapter->msix_entries) ew32(ICS, E1000_ICS_LSC | E1000_ICS_OTHER); else ew32(ICS, E1000_ICS_LSC); } void e1000e_up(struct e1000_adapter *adapter) { /* hardware has been reset, we need to reload some things */ e1000_configure(adapter); clear_bit(__E1000_DOWN, &adapter->state); if (adapter->msix_entries) e1000_configure_msix(adapter); e1000_irq_enable(adapter); /* Tx queue started by watchdog timer when link is up */ e1000e_trigger_lsc(adapter); } static void e1000e_flush_descriptors(struct e1000_adapter *adapter) { struct e1000_hw *hw = &adapter->hw; if (!(adapter->flags2 & FLAG2_DMA_BURST)) return; /* flush pending descriptor writebacks to memory */ ew32(TIDV, adapter->tx_int_delay | E1000_TIDV_FPD); ew32(RDTR, adapter->rx_int_delay | E1000_RDTR_FPD); /* execute the writes immediately */ e1e_flush(); /* due to rare timing issues, write to TIDV/RDTR again to ensure the * write is successful */ ew32(TIDV, adapter->tx_int_delay | E1000_TIDV_FPD); ew32(RDTR, adapter->rx_int_delay | E1000_RDTR_FPD); /* execute the writes immediately */ e1e_flush(); } static void e1000e_update_stats(struct e1000_adapter *adapter); /** * e1000e_down - quiesce the device and optionally reset the hardware * @adapter: board private structure * @reset: boolean flag to reset the hardware or not */ void e1000e_down(struct e1000_adapter *adapter, bool reset) { struct net_device *netdev = adapter->netdev; struct e1000_hw *hw = &adapter->hw; u32 tctl, rctl; /* signal that we're down so the interrupt handler does not * reschedule our watchdog timer */ set_bit(__E1000_DOWN, &adapter->state); netif_carrier_off(netdev); /* disable receives in the hardware */ rctl = er32(RCTL); if (!(adapter->flags2 & FLAG2_NO_DISABLE_RX)) ew32(RCTL, rctl & ~E1000_RCTL_EN); /* flush and sleep below */ netif_stop_queue(netdev); /* disable transmits in the hardware */ tctl = er32(TCTL); tctl &= ~E1000_TCTL_EN; ew32(TCTL, tctl); /* flush both disables and wait for them to finish */ e1e_flush(); usleep_range(10000, 11000); e1000_irq_disable(adapter); napi_synchronize(&adapter->napi); del_timer_sync(&adapter->watchdog_timer); del_timer_sync(&adapter->phy_info_timer); spin_lock(&adapter->stats64_lock); e1000e_update_stats(adapter); spin_unlock(&adapter->stats64_lock); e1000e_flush_descriptors(adapter); adapter->link_speed = 0; adapter->link_duplex = 0; /* Disable Si errata workaround on PCHx for jumbo frame flow */ if ((hw->mac.type >= e1000_pch2lan) && (adapter->netdev->mtu > ETH_DATA_LEN) && e1000_lv_jumbo_workaround_ich8lan(hw, false)) e_dbg("failed to disable jumbo frame workaround mode\n"); if (!pci_channel_offline(adapter->pdev)) { if (reset) e1000e_reset(adapter); else if (hw->mac.type >= e1000_pch_spt) e1000_flush_desc_rings(adapter); } e1000_clean_tx_ring(adapter->tx_ring); e1000_clean_rx_ring(adapter->rx_ring); } void e1000e_reinit_locked(struct e1000_adapter *adapter) { might_sleep(); while (test_and_set_bit(__E1000_RESETTING, &adapter->state)) usleep_range(1000, 1100); e1000e_down(adapter, true); e1000e_up(adapter); clear_bit(__E1000_RESETTING, &adapter->state); } /** * e1000e_sanitize_systim - sanitize raw cycle counter reads * @hw: pointer to the HW structure * @systim: PHC time value read, sanitized and returned * @sts: structure to hold system time before and after reading SYSTIML, * may be NULL * * Errata for 82574/82583 possible bad bits read from SYSTIMH/L: * check to see that the time is incrementing at a reasonable * rate and is a multiple of incvalue. **/ static u64 e1000e_sanitize_systim(struct e1000_hw *hw, u64 systim, struct ptp_system_timestamp *sts) { u64 time_delta, rem, temp; u64 systim_next; u32 incvalue; int i; incvalue = er32(TIMINCA) & E1000_TIMINCA_INCVALUE_MASK; for (i = 0; i < E1000_MAX_82574_SYSTIM_REREADS; i++) { /* latch SYSTIMH on read of SYSTIML */ ptp_read_system_prets(sts); systim_next = (u64)er32(SYSTIML); ptp_read_system_postts(sts); systim_next |= (u64)er32(SYSTIMH) << 32; time_delta = systim_next - systim; temp = time_delta; /* VMWare users have seen incvalue of zero, don't div / 0 */ rem = incvalue ? do_div(temp, incvalue) : (time_delta != 0); systim = systim_next; if ((time_delta < E1000_82574_SYSTIM_EPSILON) && (rem == 0)) break; } return systim; } /** * e1000e_read_systim - read SYSTIM register * @adapter: board private structure * @sts: structure which will contain system time before and after reading * SYSTIML, may be NULL **/ u64 e1000e_read_systim(struct e1000_adapter *adapter, struct ptp_system_timestamp *sts) { struct e1000_hw *hw = &adapter->hw; u32 systimel, systimel_2, systimeh; u64 systim; /* SYSTIMH latching upon SYSTIML read does not work well. * This means that if SYSTIML overflows after we read it but before * we read SYSTIMH, the value of SYSTIMH has been incremented and we * will experience a huge non linear increment in the systime value * to fix that we test for overflow and if true, we re-read systime. */ ptp_read_system_prets(sts); systimel = er32(SYSTIML); ptp_read_system_postts(sts); systimeh = er32(SYSTIMH); /* Is systimel is so large that overflow is possible? */ if (systimel >= (u32)0xffffffff - E1000_TIMINCA_INCVALUE_MASK) { ptp_read_system_prets(sts); systimel_2 = er32(SYSTIML); ptp_read_system_postts(sts); if (systimel > systimel_2) { /* There was an overflow, read again SYSTIMH, and use * systimel_2 */ systimeh = er32(SYSTIMH); systimel = systimel_2; } } systim = (u64)systimel; systim |= (u64)systimeh << 32; if (adapter->flags2 & FLAG2_CHECK_SYSTIM_OVERFLOW) systim = e1000e_sanitize_systim(hw, systim, sts); return systim; } /** * e1000e_cyclecounter_read - read raw cycle counter (used by time counter) * @cc: cyclecounter structure **/ static u64 e1000e_cyclecounter_read(const struct cyclecounter *cc) { struct e1000_adapter *adapter = container_of(cc, struct e1000_adapter, cc); return e1000e_read_systim(adapter, NULL); } /** * e1000_sw_init - Initialize general software structures (struct e1000_adapter) * @adapter: board private structure to initialize * * e1000_sw_init initializes the Adapter private data structure. * Fields are initialized based on PCI device information and * OS network device settings (MTU size). **/ static int e1000_sw_init(struct e1000_adapter *adapter) { struct net_device *netdev = adapter->netdev; adapter->rx_buffer_len = VLAN_ETH_FRAME_LEN + ETH_FCS_LEN; adapter->rx_ps_bsize0 = 128; adapter->max_frame_size = netdev->mtu + VLAN_ETH_HLEN + ETH_FCS_LEN; adapter->min_frame_size = ETH_ZLEN + ETH_FCS_LEN; adapter->tx_ring_count = E1000_DEFAULT_TXD; adapter->rx_ring_count = E1000_DEFAULT_RXD; spin_lock_init(&adapter->stats64_lock); e1000e_set_interrupt_capability(adapter); if (e1000_alloc_queues(adapter)) return -ENOMEM; /* Setup hardware time stamping cyclecounter */ if (adapter->flags & FLAG_HAS_HW_TIMESTAMP) { adapter->cc.read = e1000e_cyclecounter_read; adapter->cc.mask = CYCLECOUNTER_MASK(64); adapter->cc.mult = 1; /* cc.shift set in e1000e_get_base_tininca() */ spin_lock_init(&adapter->systim_lock); INIT_WORK(&adapter->tx_hwtstamp_work, e1000e_tx_hwtstamp_work); } /* Explicitly disable IRQ since the NIC can be in any state. */ e1000_irq_disable(adapter); set_bit(__E1000_DOWN, &adapter->state); return 0; } /** * e1000_intr_msi_test - Interrupt Handler * @irq: interrupt number * @data: pointer to a network interface device structure **/ static irqreturn_t e1000_intr_msi_test(int __always_unused irq, void *data) { struct net_device *netdev = data; struct e1000_adapter *adapter = netdev_priv(netdev); struct e1000_hw *hw = &adapter->hw; u32 icr = er32(ICR); e_dbg("icr is %08X\n", icr); if (icr & E1000_ICR_RXSEQ) { adapter->flags &= ~FLAG_MSI_TEST_FAILED; /* Force memory writes to complete before acknowledging the * interrupt is handled. */ wmb(); } return IRQ_HANDLED; } /** * e1000_test_msi_interrupt - Returns 0 for successful test * @adapter: board private struct * * code flow taken from tg3.c **/ static int e1000_test_msi_interrupt(struct e1000_adapter *adapter) { struct net_device *netdev = adapter->netdev; struct e1000_hw *hw = &adapter->hw; int err; /* poll_enable hasn't been called yet, so don't need disable */ /* clear any pending events */ er32(ICR); /* free the real vector and request a test handler */ e1000_free_irq(adapter); e1000e_reset_interrupt_capability(adapter); /* Assume that the test fails, if it succeeds then the test * MSI irq handler will unset this flag */ adapter->flags |= FLAG_MSI_TEST_FAILED; err = pci_enable_msi(adapter->pdev); if (err) goto msi_test_failed; err = request_irq(adapter->pdev->irq, e1000_intr_msi_test, 0, netdev->name, netdev); if (err) { pci_disable_msi(adapter->pdev); goto msi_test_failed; } /* Force memory writes to complete before enabling and firing an * interrupt. */ wmb(); e1000_irq_enable(adapter); /* fire an unusual interrupt on the test handler */ ew32(ICS, E1000_ICS_RXSEQ); e1e_flush(); msleep(100); e1000_irq_disable(adapter); rmb(); /* read flags after interrupt has been fired */ if (adapter->flags & FLAG_MSI_TEST_FAILED) { adapter->int_mode = E1000E_INT_MODE_LEGACY; e_info("MSI interrupt test failed, using legacy interrupt.\n"); } else { e_dbg("MSI interrupt test succeeded!\n"); } free_irq(adapter->pdev->irq, netdev); pci_disable_msi(adapter->pdev); msi_test_failed: e1000e_set_interrupt_capability(adapter); return e1000_request_irq(adapter); } /** * e1000_test_msi - Returns 0 if MSI test succeeds or INTx mode is restored * @adapter: board private struct * * code flow taken from tg3.c, called with e1000 interrupts disabled. **/ static int e1000_test_msi(struct e1000_adapter *adapter) { int err; u16 pci_cmd; if (!(adapter->flags & FLAG_MSI_ENABLED)) return 0; /* disable SERR in case the MSI write causes a master abort */ pci_read_config_word(adapter->pdev, PCI_COMMAND, &pci_cmd); if (pci_cmd & PCI_COMMAND_SERR) pci_write_config_word(adapter->pdev, PCI_COMMAND, pci_cmd & ~PCI_COMMAND_SERR); err = e1000_test_msi_interrupt(adapter); /* re-enable SERR */ if (pci_cmd & PCI_COMMAND_SERR) { pci_read_config_word(adapter->pdev, PCI_COMMAND, &pci_cmd); pci_cmd |= PCI_COMMAND_SERR; pci_write_config_word(adapter->pdev, PCI_COMMAND, pci_cmd); } return err; } /** * e1000e_open - Called when a network interface is made active * @netdev: network interface device structure * * Returns 0 on success, negative value on failure * * The open entry point is called when a network interface is made * active by the system (IFF_UP). At this point all resources needed * for transmit and receive operations are allocated, the interrupt * handler is registered with the OS, the watchdog timer is started, * and the stack is notified that the interface is ready. **/ int e1000e_open(struct net_device *netdev) { struct e1000_adapter *adapter = netdev_priv(netdev); struct e1000_hw *hw = &adapter->hw; struct pci_dev *pdev = adapter->pdev; int err; /* disallow open during test */ if (test_bit(__E1000_TESTING, &adapter->state)) return -EBUSY; pm_runtime_get_sync(&pdev->dev); netif_carrier_off(netdev); netif_stop_queue(netdev); /* allocate transmit descriptors */ err = e1000e_setup_tx_resources(adapter->tx_ring); if (err) goto err_setup_tx; /* allocate receive descriptors */ err = e1000e_setup_rx_resources(adapter->rx_ring); if (err) goto err_setup_rx; /* If AMT is enabled, let the firmware know that the network * interface is now open and reset the part to a known state. */ if (adapter->flags & FLAG_HAS_AMT) { e1000e_get_hw_control(adapter); e1000e_reset(adapter); } e1000e_power_up_phy(adapter); adapter->mng_vlan_id = E1000_MNG_VLAN_NONE; if ((adapter->hw.mng_cookie.status & E1000_MNG_DHCP_COOKIE_STATUS_VLAN)) e1000_update_mng_vlan(adapter); /* DMA latency requirement to workaround jumbo issue */ cpu_latency_qos_add_request(&adapter->pm_qos_req, PM_QOS_DEFAULT_VALUE); /* before we allocate an interrupt, we must be ready to handle it. * Setting DEBUG_SHIRQ in the kernel makes it fire an interrupt * as soon as we call pci_request_irq, so we have to setup our * clean_rx handler before we do so. */ e1000_configure(adapter); err = e1000_request_irq(adapter); if (err) goto err_req_irq; /* Work around PCIe errata with MSI interrupts causing some chipsets to * ignore e1000e MSI messages, which means we need to test our MSI * interrupt now */ if (adapter->int_mode != E1000E_INT_MODE_LEGACY) { err = e1000_test_msi(adapter); if (err) { e_err("Interrupt allocation failed\n"); goto err_req_irq; } } /* From here on the code is the same as e1000e_up() */ clear_bit(__E1000_DOWN, &adapter->state); napi_enable(&adapter->napi); e1000_irq_enable(adapter); adapter->tx_hang_recheck = false; hw->mac.get_link_status = true; pm_runtime_put(&pdev->dev); e1000e_trigger_lsc(adapter); return 0; err_req_irq: cpu_latency_qos_remove_request(&adapter->pm_qos_req); e1000e_release_hw_control(adapter); e1000_power_down_phy(adapter); e1000e_free_rx_resources(adapter->rx_ring); err_setup_rx: e1000e_free_tx_resources(adapter->tx_ring); err_setup_tx: e1000e_reset(adapter); pm_runtime_put_sync(&pdev->dev); return err; } /** * e1000e_close - Disables a network interface * @netdev: network interface device structure * * Returns 0, this is not allowed to fail * * The close entry point is called when an interface is de-activated * by the OS. The hardware is still under the drivers control, but * needs to be disabled. A global MAC reset is issued to stop the * hardware, and all transmit and receive resources are freed. **/ int e1000e_close(struct net_device *netdev) { struct e1000_adapter *adapter = netdev_priv(netdev); struct pci_dev *pdev = adapter->pdev; int count = E1000_CHECK_RESET_COUNT; while (test_bit(__E1000_RESETTING, &adapter->state) && count--) usleep_range(10000, 11000); WARN_ON(test_bit(__E1000_RESETTING, &adapter->state)); pm_runtime_get_sync(&pdev->dev); if (netif_device_present(netdev)) { e1000e_down(adapter, true); e1000_free_irq(adapter); /* Link status message must follow this format */ netdev_info(netdev, "NIC Link is Down\n"); } napi_disable(&adapter->napi); e1000e_free_tx_resources(adapter->tx_ring); e1000e_free_rx_resources(adapter->rx_ring); /* kill manageability vlan ID if supported, but not if a vlan with * the same ID is registered on the host OS (let 8021q kill it) */ if (adapter->hw.mng_cookie.status & E1000_MNG_DHCP_COOKIE_STATUS_VLAN) e1000_vlan_rx_kill_vid(netdev, htons(ETH_P_8021Q), adapter->mng_vlan_id); /* If AMT is enabled, let the firmware know that the network * interface is now closed */ if ((adapter->flags & FLAG_HAS_AMT) && !test_bit(__E1000_TESTING, &adapter->state)) e1000e_release_hw_control(adapter); cpu_latency_qos_remove_request(&adapter->pm_qos_req); pm_runtime_put_sync(&pdev->dev); return 0; } /** * e1000_set_mac - Change the Ethernet Address of the NIC * @netdev: network interface device structure * @p: pointer to an address structure * * Returns 0 on success, negative on failure **/ static int e1000_set_mac(struct net_device *netdev, void *p) { struct e1000_adapter *adapter = netdev_priv(netdev); struct e1000_hw *hw = &adapter->hw; struct sockaddr *addr = p; if (!is_valid_ether_addr(addr->sa_data)) return -EADDRNOTAVAIL; eth_hw_addr_set(netdev, addr->sa_data); memcpy(adapter->hw.mac.addr, addr->sa_data, netdev->addr_len); hw->mac.ops.rar_set(&adapter->hw, adapter->hw.mac.addr, 0); if (adapter->flags & FLAG_RESET_OVERWRITES_LAA) { /* activate the work around */ e1000e_set_laa_state_82571(&adapter->hw, 1); /* Hold a copy of the LAA in RAR[14] This is done so that * between the time RAR[0] gets clobbered and the time it * gets fixed (in e1000_watchdog), the actual LAA is in one * of the RARs and no incoming packets directed to this port * are dropped. Eventually the LAA will be in RAR[0] and * RAR[14] */ hw->mac.ops.rar_set(&adapter->hw, adapter->hw.mac.addr, adapter->hw.mac.rar_entry_count - 1); } return 0; } /** * e1000e_update_phy_task - work thread to update phy * @work: pointer to our work struct * * this worker thread exists because we must acquire a * semaphore to read the phy, which we could msleep while * waiting for it, and we can't msleep in a timer. **/ static void e1000e_update_phy_task(struct work_struct *work) { struct e1000_adapter *adapter = container_of(work, struct e1000_adapter, update_phy_task); struct e1000_hw *hw = &adapter->hw; if (test_bit(__E1000_DOWN, &adapter->state)) return; e1000_get_phy_info(hw); /* Enable EEE on 82579 after link up */ if (hw->phy.type >= e1000_phy_82579) e1000_set_eee_pchlan(hw); } /** * e1000_update_phy_info - timre call-back to update PHY info * @t: pointer to timer_list containing private info adapter * * Need to wait a few seconds after link up to get diagnostic information from * the phy **/ static void e1000_update_phy_info(struct timer_list *t) { struct e1000_adapter *adapter = from_timer(adapter, t, phy_info_timer); if (test_bit(__E1000_DOWN, &adapter->state)) return; schedule_work(&adapter->update_phy_task); } /** * e1000e_update_phy_stats - Update the PHY statistics counters * @adapter: board private structure * * Read/clear the upper 16-bit PHY registers and read/accumulate lower **/ static void e1000e_update_phy_stats(struct e1000_adapter *adapter) { struct e1000_hw *hw = &adapter->hw; s32 ret_val; u16 phy_data; ret_val = hw->phy.ops.acquire(hw); if (ret_val) return; /* A page set is expensive so check if already on desired page. * If not, set to the page with the PHY status registers. */ hw->phy.addr = 1; ret_val = e1000e_read_phy_reg_mdic(hw, IGP01E1000_PHY_PAGE_SELECT, &phy_data); if (ret_val) goto release; if (phy_data != (HV_STATS_PAGE << IGP_PAGE_SHIFT)) { ret_val = hw->phy.ops.set_page(hw, HV_STATS_PAGE << IGP_PAGE_SHIFT); if (ret_val) goto release; } /* Single Collision Count */ hw->phy.ops.read_reg_page(hw, HV_SCC_UPPER, &phy_data); ret_val = hw->phy.ops.read_reg_page(hw, HV_SCC_LOWER, &phy_data); if (!ret_val) adapter->stats.scc += phy_data; /* Excessive Collision Count */ hw->phy.ops.read_reg_page(hw, HV_ECOL_UPPER, &phy_data); ret_val = hw->phy.ops.read_reg_page(hw, HV_ECOL_LOWER, &phy_data); if (!ret_val) adapter->stats.ecol += phy_data; /* Multiple Collision Count */ hw->phy.ops.read_reg_page(hw, HV_MCC_UPPER, &phy_data); ret_val = hw->phy.ops.read_reg_page(hw, HV_MCC_LOWER, &phy_data); if (!ret_val) adapter->stats.mcc += phy_data; /* Late Collision Count */ hw->phy.ops.read_reg_page(hw, HV_LATECOL_UPPER, &phy_data); ret_val = hw->phy.ops.read_reg_page(hw, HV_LATECOL_LOWER, &phy_data); if (!ret_val) adapter->stats.latecol += phy_data; /* Collision Count - also used for adaptive IFS */ hw->phy.ops.read_reg_page(hw, HV_COLC_UPPER, &phy_data); ret_val = hw->phy.ops.read_reg_page(hw, HV_COLC_LOWER, &phy_data); if (!ret_val) hw->mac.collision_delta = phy_data; /* Defer Count */ hw->phy.ops.read_reg_page(hw, HV_DC_UPPER, &phy_data); ret_val = hw->phy.ops.read_reg_page(hw, HV_DC_LOWER, &phy_data); if (!ret_val) adapter->stats.dc += phy_data; /* Transmit with no CRS */ hw->phy.ops.read_reg_page(hw, HV_TNCRS_UPPER, &phy_data); ret_val = hw->phy.ops.read_reg_page(hw, HV_TNCRS_LOWER, &phy_data); if (!ret_val) adapter->stats.tncrs += phy_data; release: hw->phy.ops.release(hw); } /** * e1000e_update_stats - Update the board statistics counters * @adapter: board private structure **/ static void e1000e_update_stats(struct e1000_adapter *adapter) { struct net_device *netdev = adapter->netdev; struct e1000_hw *hw = &adapter->hw; struct pci_dev *pdev = adapter->pdev; /* Prevent stats update while adapter is being reset, or if the pci * connection is down. */ if (adapter->link_speed == 0) return; if (pci_channel_offline(pdev)) return; adapter->stats.crcerrs += er32(CRCERRS); adapter->stats.gprc += er32(GPRC); adapter->stats.gorc += er32(GORCL); er32(GORCH); /* Clear gorc */ adapter->stats.bprc += er32(BPRC); adapter->stats.mprc += er32(MPRC); adapter->stats.roc += er32(ROC); adapter->stats.mpc += er32(MPC); /* Half-duplex statistics */ if (adapter->link_duplex == HALF_DUPLEX) { if (adapter->flags2 & FLAG2_HAS_PHY_STATS) { e1000e_update_phy_stats(adapter); } else { adapter->stats.scc += er32(SCC); adapter->stats.ecol += er32(ECOL); adapter->stats.mcc += er32(MCC); adapter->stats.latecol += er32(LATECOL); adapter->stats.dc += er32(DC); hw->mac.collision_delta = er32(COLC); if ((hw->mac.type != e1000_82574) && (hw->mac.type != e1000_82583)) adapter->stats.tncrs += er32(TNCRS); } adapter->stats.colc += hw->mac.collision_delta; } adapter->stats.xonrxc += er32(XONRXC); adapter->stats.xontxc += er32(XONTXC); adapter->stats.xoffrxc += er32(XOFFRXC); adapter->stats.xofftxc += er32(XOFFTXC); adapter->stats.gptc += er32(GPTC); adapter->stats.gotc += er32(GOTCL); er32(GOTCH); /* Clear gotc */ adapter->stats.rnbc += er32(RNBC); adapter->stats.ruc += er32(RUC); adapter->stats.mptc += er32(MPTC); adapter->stats.bptc += er32(BPTC); /* used for adaptive IFS */ hw->mac.tx_packet_delta = er32(TPT); adapter->stats.tpt += hw->mac.tx_packet_delta; adapter->stats.algnerrc += er32(ALGNERRC); adapter->stats.rxerrc += er32(RXERRC); adapter->stats.cexterr += er32(CEXTERR); adapter->stats.tsctc += er32(TSCTC); adapter->stats.tsctfc += er32(TSCTFC); /* Fill out the OS statistics structure */ netdev->stats.multicast = adapter->stats.mprc; netdev->stats.collisions = adapter->stats.colc; /* Rx Errors */ /* RLEC on some newer hardware can be incorrect so build * our own version based on RUC and ROC */ netdev->stats.rx_errors = adapter->stats.rxerrc + adapter->stats.crcerrs + adapter->stats.algnerrc + adapter->stats.ruc + adapter->stats.roc + adapter->stats.cexterr; netdev->stats.rx_length_errors = adapter->stats.ruc + adapter->stats.roc; netdev->stats.rx_crc_errors = adapter->stats.crcerrs; netdev->stats.rx_frame_errors = adapter->stats.algnerrc; netdev->stats.rx_missed_errors = adapter->stats.mpc; /* Tx Errors */ netdev->stats.tx_errors = adapter->stats.ecol + adapter->stats.latecol; netdev->stats.tx_aborted_errors = adapter->stats.ecol; netdev->stats.tx_window_errors = adapter->stats.latecol; netdev->stats.tx_carrier_errors = adapter->stats.tncrs; /* Tx Dropped needs to be maintained elsewhere */ /* Management Stats */ adapter->stats.mgptc += er32(MGTPTC); adapter->stats.mgprc += er32(MGTPRC); adapter->stats.mgpdc += er32(MGTPDC); /* Correctable ECC Errors */ if (hw->mac.type >= e1000_pch_lpt) { u32 pbeccsts = er32(PBECCSTS); adapter->corr_errors += pbeccsts & E1000_PBECCSTS_CORR_ERR_CNT_MASK; adapter->uncorr_errors += (pbeccsts & E1000_PBECCSTS_UNCORR_ERR_CNT_MASK) >> E1000_PBECCSTS_UNCORR_ERR_CNT_SHIFT; } } /** * e1000_phy_read_status - Update the PHY register status snapshot * @adapter: board private structure **/ static void e1000_phy_read_status(struct e1000_adapter *adapter) { struct e1000_hw *hw = &adapter->hw; struct e1000_phy_regs *phy = &adapter->phy_regs; if (!pm_runtime_suspended((&adapter->pdev->dev)->parent) && (er32(STATUS) & E1000_STATUS_LU) && (adapter->hw.phy.media_type == e1000_media_type_copper)) { int ret_val; ret_val = e1e_rphy(hw, MII_BMCR, &phy->bmcr); ret_val |= e1e_rphy(hw, MII_BMSR, &phy->bmsr); ret_val |= e1e_rphy(hw, MII_ADVERTISE, &phy->advertise); ret_val |= e1e_rphy(hw, MII_LPA, &phy->lpa); ret_val |= e1e_rphy(hw, MII_EXPANSION, &phy->expansion); ret_val |= e1e_rphy(hw, MII_CTRL1000, &phy->ctrl1000); ret_val |= e1e_rphy(hw, MII_STAT1000, &phy->stat1000); ret_val |= e1e_rphy(hw, MII_ESTATUS, &phy->estatus); if (ret_val) e_warn("Error reading PHY register\n"); } else { /* Do not read PHY registers if link is not up * Set values to typical power-on defaults */ phy->bmcr = (BMCR_SPEED1000 | BMCR_ANENABLE | BMCR_FULLDPLX); phy->bmsr = (BMSR_100FULL | BMSR_100HALF | BMSR_10FULL | BMSR_10HALF | BMSR_ESTATEN | BMSR_ANEGCAPABLE | BMSR_ERCAP); phy->advertise = (ADVERTISE_PAUSE_ASYM | ADVERTISE_PAUSE_CAP | ADVERTISE_ALL | ADVERTISE_CSMA); phy->lpa = 0; phy->expansion = EXPANSION_ENABLENPAGE; phy->ctrl1000 = ADVERTISE_1000FULL; phy->stat1000 = 0; phy->estatus = (ESTATUS_1000_TFULL | ESTATUS_1000_THALF); } } static void e1000_print_link_info(struct e1000_adapter *adapter) { struct e1000_hw *hw = &adapter->hw; u32 ctrl = er32(CTRL); /* Link status message must follow this format for user tools */ netdev_info(adapter->netdev, "NIC Link is Up %d Mbps %s Duplex, Flow Control: %s\n", adapter->link_speed, adapter->link_duplex == FULL_DUPLEX ? "Full" : "Half", (ctrl & E1000_CTRL_TFCE) && (ctrl & E1000_CTRL_RFCE) ? "Rx/Tx" : (ctrl & E1000_CTRL_RFCE) ? "Rx" : (ctrl & E1000_CTRL_TFCE) ? "Tx" : "None"); } static bool e1000e_has_link(struct e1000_adapter *adapter) { struct e1000_hw *hw = &adapter->hw; bool link_active = false; s32 ret_val = 0; /* get_link_status is set on LSC (link status) interrupt or * Rx sequence error interrupt. get_link_status will stay * true until the check_for_link establishes link * for copper adapters ONLY */ switch (hw->phy.media_type) { case e1000_media_type_copper: if (hw->mac.get_link_status) { ret_val = hw->mac.ops.check_for_link(hw); link_active = !hw->mac.get_link_status; } else { link_active = true; } break; case e1000_media_type_fiber: ret_val = hw->mac.ops.check_for_link(hw); link_active = !!(er32(STATUS) & E1000_STATUS_LU); break; case e1000_media_type_internal_serdes: ret_val = hw->mac.ops.check_for_link(hw); link_active = hw->mac.serdes_has_link; break; default: case e1000_media_type_unknown: break; } if ((ret_val == -E1000_ERR_PHY) && (hw->phy.type == e1000_phy_igp_3) && (er32(CTRL) & E1000_PHY_CTRL_GBE_DISABLE)) { /* See e1000_kmrn_lock_loss_workaround_ich8lan() */ e_info("Gigabit has been disabled, downgrading speed\n"); } return link_active; } static void e1000e_enable_receives(struct e1000_adapter *adapter) { /* make sure the receive unit is started */ if ((adapter->flags & FLAG_RX_NEEDS_RESTART) && (adapter->flags & FLAG_RESTART_NOW)) { struct e1000_hw *hw = &adapter->hw; u32 rctl = er32(RCTL); ew32(RCTL, rctl | E1000_RCTL_EN); adapter->flags &= ~FLAG_RESTART_NOW; } } static void e1000e_check_82574_phy_workaround(struct e1000_adapter *adapter) { struct e1000_hw *hw = &adapter->hw; /* With 82574 controllers, PHY needs to be checked periodically * for hung state and reset, if two calls return true */ if (e1000_check_phy_82574(hw)) adapter->phy_hang_count++; else adapter->phy_hang_count = 0; if (adapter->phy_hang_count > 1) { adapter->phy_hang_count = 0; e_dbg("PHY appears hung - resetting\n"); schedule_work(&adapter->reset_task); } } /** * e1000_watchdog - Timer Call-back * @t: pointer to timer_list containing private info adapter **/ static void e1000_watchdog(struct timer_list *t) { struct e1000_adapter *adapter = from_timer(adapter, t, watchdog_timer); /* Do the rest outside of interrupt context */ schedule_work(&adapter->watchdog_task); /* TODO: make this use queue_delayed_work() */ } static void e1000_watchdog_task(struct work_struct *work) { struct e1000_adapter *adapter = container_of(work, struct e1000_adapter, watchdog_task); struct net_device *netdev = adapter->netdev; struct e1000_mac_info *mac = &adapter->hw.mac; struct e1000_phy_info *phy = &adapter->hw.phy; struct e1000_ring *tx_ring = adapter->tx_ring; u32 dmoff_exit_timeout = 100, tries = 0; struct e1000_hw *hw = &adapter->hw; u32 link, tctl, pcim_state; if (test_bit(__E1000_DOWN, &adapter->state)) return; link = e1000e_has_link(adapter); if ((netif_carrier_ok(netdev)) && link) { /* Cancel scheduled suspend requests. */ pm_runtime_resume(netdev->dev.parent); e1000e_enable_receives(adapter); goto link_up; } if ((e1000e_enable_tx_pkt_filtering(hw)) && (adapter->mng_vlan_id != adapter->hw.mng_cookie.vlan_id)) e1000_update_mng_vlan(adapter); if (link) { if (!netif_carrier_ok(netdev)) { bool txb2b = true; /* Cancel scheduled suspend requests. */ pm_runtime_resume(netdev->dev.parent); /* Checking if MAC is in DMoff state*/ if (er32(FWSM) & E1000_ICH_FWSM_FW_VALID) { pcim_state = er32(STATUS); while (pcim_state & E1000_STATUS_PCIM_STATE) { if (tries++ == dmoff_exit_timeout) { e_dbg("Error in exiting dmoff\n"); break; } usleep_range(10000, 20000); pcim_state = er32(STATUS); /* Checking if MAC exited DMoff state */ if (!(pcim_state & E1000_STATUS_PCIM_STATE)) e1000_phy_hw_reset(&adapter->hw); } } /* update snapshot of PHY registers on LSC */ e1000_phy_read_status(adapter); mac->ops.get_link_up_info(&adapter->hw, &adapter->link_speed, &adapter->link_duplex); e1000_print_link_info(adapter); /* check if SmartSpeed worked */ e1000e_check_downshift(hw); if (phy->speed_downgraded) netdev_warn(netdev, "Link Speed was downgraded by SmartSpeed\n"); /* On supported PHYs, check for duplex mismatch only * if link has autonegotiated at 10/100 half */ if ((hw->phy.type == e1000_phy_igp_3 || hw->phy.type == e1000_phy_bm) && hw->mac.autoneg && (adapter->link_speed == SPEED_10 || adapter->link_speed == SPEED_100) && (adapter->link_duplex == HALF_DUPLEX)) { u16 autoneg_exp; e1e_rphy(hw, MII_EXPANSION, &autoneg_exp); if (!(autoneg_exp & EXPANSION_NWAY)) e_info("Autonegotiated half duplex but link partner cannot autoneg. Try forcing full duplex if link gets many collisions.\n"); } /* adjust timeout factor according to speed/duplex */ adapter->tx_timeout_factor = 1; switch (adapter->link_speed) { case SPEED_10: txb2b = false; adapter->tx_timeout_factor = 16; break; case SPEED_100: txb2b = false; adapter->tx_timeout_factor = 10; break; } /* workaround: re-program speed mode bit after * link-up event */ if ((adapter->flags & FLAG_TARC_SPEED_MODE_BIT) && !txb2b) { u32 tarc0; tarc0 = er32(TARC(0)); tarc0 &= ~SPEED_MODE_BIT; ew32(TARC(0), tarc0); } /* enable transmits in the hardware, need to do this * after setting TARC(0) */ tctl = er32(TCTL); tctl |= E1000_TCTL_EN; ew32(TCTL, tctl); /* Perform any post-link-up configuration before * reporting link up. */ if (phy->ops.cfg_on_link_up) phy->ops.cfg_on_link_up(hw); netif_wake_queue(netdev); netif_carrier_on(netdev); if (!test_bit(__E1000_DOWN, &adapter->state)) mod_timer(&adapter->phy_info_timer, round_jiffies(jiffies + 2 * HZ)); } } else { if (netif_carrier_ok(netdev)) { adapter->link_speed = 0; adapter->link_duplex = 0; /* Link status message must follow this format */ netdev_info(netdev, "NIC Link is Down\n"); netif_carrier_off(netdev); netif_stop_queue(netdev); if (!test_bit(__E1000_DOWN, &adapter->state)) mod_timer(&adapter->phy_info_timer, round_jiffies(jiffies + 2 * HZ)); /* 8000ES2LAN requires a Rx packet buffer work-around * on link down event; reset the controller to flush * the Rx packet buffer. */ if (adapter->flags & FLAG_RX_NEEDS_RESTART) adapter->flags |= FLAG_RESTART_NOW; else pm_schedule_suspend(netdev->dev.parent, LINK_TIMEOUT); } } link_up: spin_lock(&adapter->stats64_lock); e1000e_update_stats(adapter); mac->tx_packet_delta = adapter->stats.tpt - adapter->tpt_old; adapter->tpt_old = adapter->stats.tpt; mac->collision_delta = adapter->stats.colc - adapter->colc_old; adapter->colc_old = adapter->stats.colc; adapter->gorc = adapter->stats.gorc - adapter->gorc_old; adapter->gorc_old = adapter->stats.gorc; adapter->gotc = adapter->stats.gotc - adapter->gotc_old; adapter->gotc_old = adapter->stats.gotc; spin_unlock(&adapter->stats64_lock); /* If the link is lost the controller stops DMA, but * if there is queued Tx work it cannot be done. So * reset the controller to flush the Tx packet buffers. */ if (!netif_carrier_ok(netdev) && (e1000_desc_unused(tx_ring) + 1 < tx_ring->count)) adapter->flags |= FLAG_RESTART_NOW; /* If reset is necessary, do it outside of interrupt context. */ if (adapter->flags & FLAG_RESTART_NOW) { schedule_work(&adapter->reset_task); /* return immediately since reset is imminent */ return; } e1000e_update_adaptive(&adapter->hw); /* Simple mode for Interrupt Throttle Rate (ITR) */ if (adapter->itr_setting == 4) { /* Symmetric Tx/Rx gets a reduced ITR=2000; * Total asymmetrical Tx or Rx gets ITR=8000; * everyone else is between 2000-8000. */ u32 goc = (adapter->gotc + adapter->gorc) / 10000; u32 dif = (adapter->gotc > adapter->gorc ? adapter->gotc - adapter->gorc : adapter->gorc - adapter->gotc) / 10000; u32 itr = goc > 0 ? (dif * 6000 / goc + 2000) : 8000; e1000e_write_itr(adapter, itr); } /* Cause software interrupt to ensure Rx ring is cleaned */ if (adapter->msix_entries) ew32(ICS, adapter->rx_ring->ims_val); else ew32(ICS, E1000_ICS_RXDMT0); /* flush pending descriptors to memory before detecting Tx hang */ e1000e_flush_descriptors(adapter); /* Force detection of hung controller every watchdog period */ adapter->detect_tx_hung = true; /* With 82571 controllers, LAA may be overwritten due to controller * reset from the other port. Set the appropriate LAA in RAR[0] */ if (e1000e_get_laa_state_82571(hw)) hw->mac.ops.rar_set(hw, adapter->hw.mac.addr, 0); if (adapter->flags2 & FLAG2_CHECK_PHY_HANG) e1000e_check_82574_phy_workaround(adapter); /* Clear valid timestamp stuck in RXSTMPL/H due to a Rx error */ if (adapter->hwtstamp_config.rx_filter != HWTSTAMP_FILTER_NONE) { if ((adapter->flags2 & FLAG2_CHECK_RX_HWTSTAMP) && (er32(TSYNCRXCTL) & E1000_TSYNCRXCTL_VALID)) { er32(RXSTMPH); adapter->rx_hwtstamp_cleared++; } else { adapter->flags2 |= FLAG2_CHECK_RX_HWTSTAMP; } } /* Reset the timer */ if (!test_bit(__E1000_DOWN, &adapter->state)) mod_timer(&adapter->watchdog_timer, round_jiffies(jiffies + 2 * HZ)); } #define E1000_TX_FLAGS_CSUM 0x00000001 #define E1000_TX_FLAGS_VLAN 0x00000002 #define E1000_TX_FLAGS_TSO 0x00000004 #define E1000_TX_FLAGS_IPV4 0x00000008 #define E1000_TX_FLAGS_NO_FCS 0x00000010 #define E1000_TX_FLAGS_HWTSTAMP 0x00000020 #define E1000_TX_FLAGS_VLAN_MASK 0xffff0000 #define E1000_TX_FLAGS_VLAN_SHIFT 16 static int e1000_tso(struct e1000_ring *tx_ring, struct sk_buff *skb, __be16 protocol) { struct e1000_context_desc *context_desc; struct e1000_buffer *buffer_info; unsigned int i; u32 cmd_length = 0; u16 ipcse = 0, mss; u8 ipcss, ipcso, tucss, tucso, hdr_len; int err; if (!skb_is_gso(skb)) return 0; err = skb_cow_head(skb, 0); if (err < 0) return err; hdr_len = skb_tcp_all_headers(skb); mss = skb_shinfo(skb)->gso_size; if (protocol == htons(ETH_P_IP)) { struct iphdr *iph = ip_hdr(skb); iph->tot_len = 0; iph->check = 0; tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr, 0, IPPROTO_TCP, 0); cmd_length = E1000_TXD_CMD_IP; ipcse = skb_transport_offset(skb) - 1; } else if (skb_is_gso_v6(skb)) { tcp_v6_gso_csum_prep(skb); ipcse = 0; } ipcss = skb_network_offset(skb); ipcso = (void *)&(ip_hdr(skb)->check) - (void *)skb->data; tucss = skb_transport_offset(skb); tucso = (void *)&(tcp_hdr(skb)->check) - (void *)skb->data; cmd_length |= (E1000_TXD_CMD_DEXT | E1000_TXD_CMD_TSE | E1000_TXD_CMD_TCP | (skb->len - (hdr_len))); i = tx_ring->next_to_use; context_desc = E1000_CONTEXT_DESC(*tx_ring, i); buffer_info = &tx_ring->buffer_info[i]; context_desc->lower_setup.ip_fields.ipcss = ipcss; context_desc->lower_setup.ip_fields.ipcso = ipcso; context_desc->lower_setup.ip_fields.ipcse = cpu_to_le16(ipcse); context_desc->upper_setup.tcp_fields.tucss = tucss; context_desc->upper_setup.tcp_fields.tucso = tucso; context_desc->upper_setup.tcp_fields.tucse = 0; context_desc->tcp_seg_setup.fields.mss = cpu_to_le16(mss); context_desc->tcp_seg_setup.fields.hdr_len = hdr_len; context_desc->cmd_and_length = cpu_to_le32(cmd_length); buffer_info->time_stamp = jiffies; buffer_info->next_to_watch = i; i++; if (i == tx_ring->count) i = 0; tx_ring->next_to_use = i; return 1; } static bool e1000_tx_csum(struct e1000_ring *tx_ring, struct sk_buff *skb, __be16 protocol) { struct e1000_adapter *adapter = tx_ring->adapter; struct e1000_context_desc *context_desc; struct e1000_buffer *buffer_info; unsigned int i; u8 css; u32 cmd_len = E1000_TXD_CMD_DEXT; if (skb->ip_summed != CHECKSUM_PARTIAL) return false; switch (protocol) { case cpu_to_be16(ETH_P_IP): if (ip_hdr(skb)->protocol == IPPROTO_TCP) cmd_len |= E1000_TXD_CMD_TCP; break; case cpu_to_be16(ETH_P_IPV6): /* XXX not handling all IPV6 headers */ if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP) cmd_len |= E1000_TXD_CMD_TCP; break; default: if (unlikely(net_ratelimit())) e_warn("checksum_partial proto=%x!\n", be16_to_cpu(protocol)); break; } css = skb_checksum_start_offset(skb); i = tx_ring->next_to_use; buffer_info = &tx_ring->buffer_info[i]; context_desc = E1000_CONTEXT_DESC(*tx_ring, i); context_desc->lower_setup.ip_config = 0; context_desc->upper_setup.tcp_fields.tucss = css; context_desc->upper_setup.tcp_fields.tucso = css + skb->csum_offset; context_desc->upper_setup.tcp_fields.tucse = 0; context_desc->tcp_seg_setup.data = 0; context_desc->cmd_and_length = cpu_to_le32(cmd_len); buffer_info->time_stamp = jiffies; buffer_info->next_to_watch = i; i++; if (i == tx_ring->count) i = 0; tx_ring->next_to_use = i; return true; } static int e1000_tx_map(struct e1000_ring *tx_ring, struct sk_buff *skb, unsigned int first, unsigned int max_per_txd, unsigned int nr_frags) { struct e1000_adapter *adapter = tx_ring->adapter; struct pci_dev *pdev = adapter->pdev; struct e1000_buffer *buffer_info; unsigned int len = skb_headlen(skb); unsigned int offset = 0, size, count = 0, i; unsigned int f, bytecount, segs; i = tx_ring->next_to_use; while (len) { buffer_info = &tx_ring->buffer_info[i]; size = min(len, max_per_txd); buffer_info->length = size; buffer_info->time_stamp = jiffies; buffer_info->next_to_watch = i; buffer_info->dma = dma_map_single(&pdev->dev, skb->data + offset, size, DMA_TO_DEVICE); buffer_info->mapped_as_page = false; if (dma_mapping_error(&pdev->dev, buffer_info->dma)) goto dma_error; len -= size; offset += size; count++; if (len) { i++; if (i == tx_ring->count) i = 0; } } for (f = 0; f < nr_frags; f++) { const skb_frag_t *frag = &skb_shinfo(skb)->frags[f]; len = skb_frag_size(frag); offset = 0; while (len) { i++; if (i == tx_ring->count) i = 0; buffer_info = &tx_ring->buffer_info[i]; size = min(len, max_per_txd); buffer_info->length = size; buffer_info->time_stamp = jiffies; buffer_info->next_to_watch = i; buffer_info->dma = skb_frag_dma_map(&pdev->dev, frag, offset, size, DMA_TO_DEVICE); buffer_info->mapped_as_page = true; if (dma_mapping_error(&pdev->dev, buffer_info->dma)) goto dma_error; len -= size; offset += size; count++; } } segs = skb_shinfo(skb)->gso_segs ? : 1; /* multiply data chunks by size of headers */ bytecount = ((segs - 1) * skb_headlen(skb)) + skb->len; tx_ring->buffer_info[i].skb = skb; tx_ring->buffer_info[i].segs = segs; tx_ring->buffer_info[i].bytecount = bytecount; tx_ring->buffer_info[first].next_to_watch = i; return count; dma_error: dev_err(&pdev->dev, "Tx DMA map failed\n"); buffer_info->dma = 0; if (count) count--; while (count--) { if (i == 0) i += tx_ring->count; i--; buffer_info = &tx_ring->buffer_info[i]; e1000_put_txbuf(tx_ring, buffer_info, true); } return 0; } static void e1000_tx_queue(struct e1000_ring *tx_ring, int tx_flags, int count) { struct e1000_adapter *adapter = tx_ring->adapter; struct e1000_tx_desc *tx_desc = NULL; struct e1000_buffer *buffer_info; u32 txd_upper = 0, txd_lower = E1000_TXD_CMD_IFCS; unsigned int i; if (tx_flags & E1000_TX_FLAGS_TSO) { txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D | E1000_TXD_CMD_TSE; txd_upper |= E1000_TXD_POPTS_TXSM << 8; if (tx_flags & E1000_TX_FLAGS_IPV4) txd_upper |= E1000_TXD_POPTS_IXSM << 8; } if (tx_flags & E1000_TX_FLAGS_CSUM) { txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D; txd_upper |= E1000_TXD_POPTS_TXSM << 8; } if (tx_flags & E1000_TX_FLAGS_VLAN) { txd_lower |= E1000_TXD_CMD_VLE; txd_upper |= (tx_flags & E1000_TX_FLAGS_VLAN_MASK); } if (unlikely(tx_flags & E1000_TX_FLAGS_NO_FCS)) txd_lower &= ~(E1000_TXD_CMD_IFCS); if (unlikely(tx_flags & E1000_TX_FLAGS_HWTSTAMP)) { txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D; txd_upper |= E1000_TXD_EXTCMD_TSTAMP; } i = tx_ring->next_to_use; do { buffer_info = &tx_ring->buffer_info[i]; tx_desc = E1000_TX_DESC(*tx_ring, i); tx_desc->buffer_addr = cpu_to_le64(buffer_info->dma); tx_desc->lower.data = cpu_to_le32(txd_lower | buffer_info->length); tx_desc->upper.data = cpu_to_le32(txd_upper); i++; if (i == tx_ring->count) i = 0; } while (--count > 0); tx_desc->lower.data |= cpu_to_le32(adapter->txd_cmd); /* txd_cmd re-enables FCS, so we'll re-disable it here as desired. */ if (unlikely(tx_flags & E1000_TX_FLAGS_NO_FCS)) tx_desc->lower.data &= ~(cpu_to_le32(E1000_TXD_CMD_IFCS)); /* Force memory writes to complete before letting h/w * know there are new descriptors to fetch. (Only * applicable for weak-ordered memory model archs, * such as IA-64). */ wmb(); tx_ring->next_to_use = i; } #define MINIMUM_DHCP_PACKET_SIZE 282 static int e1000_transfer_dhcp_info(struct e1000_adapter *adapter, struct sk_buff *skb) { struct e1000_hw *hw = &adapter->hw; u16 length, offset; if (skb_vlan_tag_present(skb) && !((skb_vlan_tag_get(skb) == adapter->hw.mng_cookie.vlan_id) && (adapter->hw.mng_cookie.status & E1000_MNG_DHCP_COOKIE_STATUS_VLAN))) return 0; if (skb->len <= MINIMUM_DHCP_PACKET_SIZE) return 0; if (((struct ethhdr *)skb->data)->h_proto != htons(ETH_P_IP)) return 0; { const struct iphdr *ip = (struct iphdr *)((u8 *)skb->data + 14); struct udphdr *udp; if (ip->protocol != IPPROTO_UDP) return 0; udp = (struct udphdr *)((u8 *)ip + (ip->ihl << 2)); if (ntohs(udp->dest) != 67) return 0; offset = (u8 *)udp + 8 - skb->data; length = skb->len - offset; return e1000e_mng_write_dhcp_info(hw, (u8 *)udp + 8, length); } return 0; } static int __e1000_maybe_stop_tx(struct e1000_ring *tx_ring, int size) { struct e1000_adapter *adapter = tx_ring->adapter; netif_stop_queue(adapter->netdev); /* Herbert's original patch had: * smp_mb__after_netif_stop_queue(); * but since that doesn't exist yet, just open code it. */ smp_mb(); /* We need to check again in a case another CPU has just * made room available. */ if (e1000_desc_unused(tx_ring) < size) return -EBUSY; /* A reprieve! */ netif_start_queue(adapter->netdev); ++adapter->restart_queue; return 0; } static int e1000_maybe_stop_tx(struct e1000_ring *tx_ring, int size) { BUG_ON(size > tx_ring->count); if (e1000_desc_unused(tx_ring) >= size) return 0; return __e1000_maybe_stop_tx(tx_ring, size); } static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev) { struct e1000_adapter *adapter = netdev_priv(netdev); struct e1000_ring *tx_ring = adapter->tx_ring; unsigned int first; unsigned int tx_flags = 0; unsigned int len = skb_headlen(skb); unsigned int nr_frags; unsigned int mss; int count = 0; int tso; unsigned int f; __be16 protocol = vlan_get_protocol(skb); if (test_bit(__E1000_DOWN, &adapter->state)) { dev_kfree_skb_any(skb); return NETDEV_TX_OK; } if (skb->len <= 0) { dev_kfree_skb_any(skb); return NETDEV_TX_OK; } /* The minimum packet size with TCTL.PSP set is 17 bytes so * pad skb in order to meet this minimum size requirement */ if (skb_put_padto(skb, 17)) return NETDEV_TX_OK; mss = skb_shinfo(skb)->gso_size; if (mss) { u8 hdr_len; /* TSO Workaround for 82571/2/3 Controllers -- if skb->data * points to just header, pull a few bytes of payload from * frags into skb->data */ hdr_len = skb_tcp_all_headers(skb); /* we do this workaround for ES2LAN, but it is un-necessary, * avoiding it could save a lot of cycles */ if (skb->data_len && (hdr_len == len)) { unsigned int pull_size; pull_size = min_t(unsigned int, 4, skb->data_len); if (!__pskb_pull_tail(skb, pull_size)) { e_err("__pskb_pull_tail failed.\n"); dev_kfree_skb_any(skb); return NETDEV_TX_OK; } len = skb_headlen(skb); } } /* reserve a descriptor for the offload context */ if ((mss) || (skb->ip_summed == CHECKSUM_PARTIAL)) count++; count++; count += DIV_ROUND_UP(len, adapter->tx_fifo_limit); nr_frags = skb_shinfo(skb)->nr_frags; for (f = 0; f < nr_frags; f++) count += DIV_ROUND_UP(skb_frag_size(&skb_shinfo(skb)->frags[f]), adapter->tx_fifo_limit); if (adapter->hw.mac.tx_pkt_filtering) e1000_transfer_dhcp_info(adapter, skb); /* need: count + 2 desc gap to keep tail from touching * head, otherwise try next time */ if (e1000_maybe_stop_tx(tx_ring, count + 2)) return NETDEV_TX_BUSY; if (skb_vlan_tag_present(skb)) { tx_flags |= E1000_TX_FLAGS_VLAN; tx_flags |= (skb_vlan_tag_get(skb) << E1000_TX_FLAGS_VLAN_SHIFT); } first = tx_ring->next_to_use; tso = e1000_tso(tx_ring, skb, protocol); if (tso < 0) { dev_kfree_skb_any(skb); return NETDEV_TX_OK; } if (tso) tx_flags |= E1000_TX_FLAGS_TSO; else if (e1000_tx_csum(tx_ring, skb, protocol)) tx_flags |= E1000_TX_FLAGS_CSUM; /* Old method was to assume IPv4 packet by default if TSO was enabled. * 82571 hardware supports TSO capabilities for IPv6 as well... * no longer assume, we must. */ if (protocol == htons(ETH_P_IP)) tx_flags |= E1000_TX_FLAGS_IPV4; if (unlikely(skb->no_fcs)) tx_flags |= E1000_TX_FLAGS_NO_FCS; /* if count is 0 then mapping error has occurred */ count = e1000_tx_map(tx_ring, skb, first, adapter->tx_fifo_limit, nr_frags); if (count) { if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && (adapter->flags & FLAG_HAS_HW_TIMESTAMP)) { if (!adapter->tx_hwtstamp_skb) { skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; tx_flags |= E1000_TX_FLAGS_HWTSTAMP; adapter->tx_hwtstamp_skb = skb_get(skb); adapter->tx_hwtstamp_start = jiffies; schedule_work(&adapter->tx_hwtstamp_work); } else { adapter->tx_hwtstamp_skipped++; } } skb_tx_timestamp(skb); netdev_sent_queue(netdev, skb->len); e1000_tx_queue(tx_ring, tx_flags, count); /* Make sure there is space in the ring for the next send. */ e1000_maybe_stop_tx(tx_ring, ((MAX_SKB_FRAGS + 1) * DIV_ROUND_UP(PAGE_SIZE, adapter->tx_fifo_limit) + 4)); if (!netdev_xmit_more() || netif_xmit_stopped(netdev_get_tx_queue(netdev, 0))) { if (adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA) e1000e_update_tdt_wa(tx_ring, tx_ring->next_to_use); else writel(tx_ring->next_to_use, tx_ring->tail); } } else { dev_kfree_skb_any(skb); tx_ring->buffer_info[first].time_stamp = 0; tx_ring->next_to_use = first; } return NETDEV_TX_OK; } /** * e1000_tx_timeout - Respond to a Tx Hang * @netdev: network interface device structure * @txqueue: index of the hung queue (unused) **/ static void e1000_tx_timeout(struct net_device *netdev, unsigned int __always_unused txqueue) { struct e1000_adapter *adapter = netdev_priv(netdev); /* Do the reset outside of interrupt context */ adapter->tx_timeout_count++; schedule_work(&adapter->reset_task); } static void e1000_reset_task(struct work_struct *work) { struct e1000_adapter *adapter; adapter = container_of(work, struct e1000_adapter, reset_task); rtnl_lock(); /* don't run the task if already down */ if (test_bit(__E1000_DOWN, &adapter->state)) { rtnl_unlock(); return; } if (!(adapter->flags & FLAG_RESTART_NOW)) { e1000e_dump(adapter); e_err("Reset adapter unexpectedly\n"); } e1000e_reinit_locked(adapter); rtnl_unlock(); } /** * e1000e_get_stats64 - Get System Network Statistics * @netdev: network interface device structure * @stats: rtnl_link_stats64 pointer * * Returns the address of the device statistics structure. **/ void e1000e_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats) { struct e1000_adapter *adapter = netdev_priv(netdev); spin_lock(&adapter->stats64_lock); e1000e_update_stats(adapter); /* Fill out the OS statistics structure */ stats->rx_bytes = adapter->stats.gorc; stats->rx_packets = adapter->stats.gprc; stats->tx_bytes = adapter->stats.gotc; stats->tx_packets = adapter->stats.gptc; stats->multicast = adapter->stats.mprc; stats->collisions = adapter->stats.colc; /* Rx Errors */ /* RLEC on some newer hardware can be incorrect so build * our own version based on RUC and ROC */ stats->rx_errors = adapter->stats.rxerrc + adapter->stats.crcerrs + adapter->stats.algnerrc + adapter->stats.ruc + adapter->stats.roc + adapter->stats.cexterr; stats->rx_length_errors = adapter->stats.ruc + adapter->stats.roc; stats->rx_crc_errors = adapter->stats.crcerrs; stats->rx_frame_errors = adapter->stats.algnerrc; stats->rx_missed_errors = adapter->stats.mpc; /* Tx Errors */ stats->tx_errors = adapter->stats.ecol + adapter->stats.latecol; stats->tx_aborted_errors = adapter->stats.ecol; stats->tx_window_errors = adapter->stats.latecol; stats->tx_carrier_errors = adapter->stats.tncrs; /* Tx Dropped needs to be maintained elsewhere */ spin_unlock(&adapter->stats64_lock); } /** * e1000_change_mtu - Change the Maximum Transfer Unit * @netdev: network interface device structure * @new_mtu: new value for maximum frame size * * Returns 0 on success, negative on failure **/ static int e1000_change_mtu(struct net_device *netdev, int new_mtu) { struct e1000_adapter *adapter = netdev_priv(netdev); int max_frame = new_mtu + VLAN_ETH_HLEN + ETH_FCS_LEN; /* Jumbo frame support */ if ((new_mtu > ETH_DATA_LEN) && !(adapter->flags & FLAG_HAS_JUMBO_FRAMES)) { e_err("Jumbo Frames not supported.\n"); return -EINVAL; } /* Jumbo frame workaround on 82579 and newer requires CRC be stripped */ if ((adapter->hw.mac.type >= e1000_pch2lan) && !(adapter->flags2 & FLAG2_CRC_STRIPPING) && (new_mtu > ETH_DATA_LEN)) { e_err("Jumbo Frames not supported on this device when CRC stripping is disabled.\n"); return -EINVAL; } while (test_and_set_bit(__E1000_RESETTING, &adapter->state)) usleep_range(1000, 1100); /* e1000e_down -> e1000e_reset dependent on max_frame_size & mtu */ adapter->max_frame_size = max_frame; netdev_dbg(netdev, "changing MTU from %d to %d\n", netdev->mtu, new_mtu); netdev->mtu = new_mtu; pm_runtime_get_sync(netdev->dev.parent); if (netif_running(netdev)) e1000e_down(adapter, true); /* NOTE: netdev_alloc_skb reserves 16 bytes, and typically NET_IP_ALIGN * means we reserve 2 more, this pushes us to allocate from the next * larger slab size. * i.e. RXBUFFER_2048 --> size-4096 slab * However with the new *_jumbo_rx* routines, jumbo receives will use * fragmented skbs */ if (max_frame <= 2048) adapter->rx_buffer_len = 2048; else adapter->rx_buffer_len = 4096; /* adjust allocation if LPE protects us, and we aren't using SBP */ if (max_frame <= (VLAN_ETH_FRAME_LEN + ETH_FCS_LEN)) adapter->rx_buffer_len = VLAN_ETH_FRAME_LEN + ETH_FCS_LEN; if (netif_running(netdev)) e1000e_up(adapter); else e1000e_reset(adapter); pm_runtime_put_sync(netdev->dev.parent); clear_bit(__E1000_RESETTING, &adapter->state); return 0; } static int e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) { struct e1000_adapter *adapter = netdev_priv(netdev); struct mii_ioctl_data *data = if_mii(ifr); if (adapter->hw.phy.media_type != e1000_media_type_copper) return -EOPNOTSUPP; switch (cmd) { case SIOCGMIIPHY: data->phy_id = adapter->hw.phy.addr; break; case SIOCGMIIREG: e1000_phy_read_status(adapter); switch (data->reg_num & 0x1F) { case MII_BMCR: data->val_out = adapter->phy_regs.bmcr; break; case MII_BMSR: data->val_out = adapter->phy_regs.bmsr; break; case MII_PHYSID1: data->val_out = (adapter->hw.phy.id >> 16); break; case MII_PHYSID2: data->val_out = (adapter->hw.phy.id & 0xFFFF); break; case MII_ADVERTISE: data->val_out = adapter->phy_regs.advertise; break; case MII_LPA: data->val_out = adapter->phy_regs.lpa; break; case MII_EXPANSION: data->val_out = adapter->phy_regs.expansion; break; case MII_CTRL1000: data->val_out = adapter->phy_regs.ctrl1000; break; case MII_STAT1000: data->val_out = adapter->phy_regs.stat1000; break; case MII_ESTATUS: data->val_out = adapter->phy_regs.estatus; break; default: return -EIO; } break; case SIOCSMIIREG: default: return -EOPNOTSUPP; } return 0; } /** * e1000e_hwtstamp_set - control hardware time stamping * @netdev: network interface device structure * @ifr: interface request * * Outgoing time stamping can be enabled and disabled. Play nice and * disable it when requested, although it shouldn't cause any overhead * when no packet needs it. At most one packet in the queue may be * marked for time stamping, otherwise it would be impossible to tell * for sure to which packet the hardware time stamp belongs. * * Incoming time stamping has to be configured via the hardware filters. * Not all combinations are supported, in particular event type has to be * specified. Matching the kind of event packet is not supported, with the * exception of "all V2 events regardless of level 2 or 4". **/ static int e1000e_hwtstamp_set(struct net_device *netdev, struct ifreq *ifr) { struct e1000_adapter *adapter = netdev_priv(netdev); struct hwtstamp_config config; int ret_val; if (copy_from_user(&config, ifr->ifr_data, sizeof(config))) return -EFAULT; ret_val = e1000e_config_hwtstamp(adapter, &config); if (ret_val) return ret_val; switch (config.rx_filter) { case HWTSTAMP_FILTER_PTP_V2_L4_SYNC: case HWTSTAMP_FILTER_PTP_V2_L2_SYNC: case HWTSTAMP_FILTER_PTP_V2_SYNC: case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ: case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ: case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ: /* With V2 type filters which specify a Sync or Delay Request, * Path Delay Request/Response messages are also time stamped * by hardware so notify the caller the requested packets plus * some others are time stamped. */ config.rx_filter = HWTSTAMP_FILTER_SOME; break; default: break; } return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ? -EFAULT : 0; } static int e1000e_hwtstamp_get(struct net_device *netdev, struct ifreq *ifr) { struct e1000_adapter *adapter = netdev_priv(netdev); return copy_to_user(ifr->ifr_data, &adapter->hwtstamp_config, sizeof(adapter->hwtstamp_config)) ? -EFAULT : 0; } static int e1000_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) { switch (cmd) { case SIOCGMIIPHY: case SIOCGMIIREG: case SIOCSMIIREG: return e1000_mii_ioctl(netdev, ifr, cmd); case SIOCSHWTSTAMP: return e1000e_hwtstamp_set(netdev, ifr); case SIOCGHWTSTAMP: return e1000e_hwtstamp_get(netdev, ifr); default: return -EOPNOTSUPP; } } static int e1000_init_phy_wakeup(struct e1000_adapter *adapter, u32 wufc) { struct e1000_hw *hw = &adapter->hw; u32 i, mac_reg, wuc; u16 phy_reg, wuc_enable; int retval; /* copy MAC RARs to PHY RARs */ e1000_copy_rx_addrs_to_phy_ich8lan(hw); retval = hw->phy.ops.acquire(hw); if (retval) { e_err("Could not acquire PHY\n"); return retval; } /* Enable access to wakeup registers on and set page to BM_WUC_PAGE */ retval = e1000_enable_phy_wakeup_reg_access_bm(hw, &wuc_enable); if (retval) goto release; /* copy MAC MTA to PHY MTA - only needed for pchlan */ for (i = 0; i < adapter->hw.mac.mta_reg_count; i++) { mac_reg = E1000_READ_REG_ARRAY(hw, E1000_MTA, i); hw->phy.ops.write_reg_page(hw, BM_MTA(i), (u16)(mac_reg & 0xFFFF)); hw->phy.ops.write_reg_page(hw, BM_MTA(i) + 1, (u16)((mac_reg >> 16) & 0xFFFF)); } /* configure PHY Rx Control register */ hw->phy.ops.read_reg_page(&adapter->hw, BM_RCTL, &phy_reg); mac_reg = er32(RCTL); if (mac_reg & E1000_RCTL_UPE) phy_reg |= BM_RCTL_UPE; if (mac_reg & E1000_RCTL_MPE) phy_reg |= BM_RCTL_MPE; phy_reg &= ~(BM_RCTL_MO_MASK); if (mac_reg & E1000_RCTL_MO_3) phy_reg |= (((mac_reg & E1000_RCTL_MO_3) >> E1000_RCTL_MO_SHIFT) << BM_RCTL_MO_SHIFT); if (mac_reg & E1000_RCTL_BAM) phy_reg |= BM_RCTL_BAM; if (mac_reg & E1000_RCTL_PMCF) phy_reg |= BM_RCTL_PMCF; mac_reg = er32(CTRL); if (mac_reg & E1000_CTRL_RFCE) phy_reg |= BM_RCTL_RFCE; hw->phy.ops.write_reg_page(&adapter->hw, BM_RCTL, phy_reg); wuc = E1000_WUC_PME_EN; if (wufc & (E1000_WUFC_MAG | E1000_WUFC_LNKC)) wuc |= E1000_WUC_APME; /* enable PHY wakeup in MAC register */ ew32(WUFC, wufc); ew32(WUC, (E1000_WUC_PHY_WAKE | E1000_WUC_APMPME | E1000_WUC_PME_STATUS | wuc)); /* configure and enable PHY wakeup in PHY registers */ hw->phy.ops.write_reg_page(&adapter->hw, BM_WUFC, wufc); hw->phy.ops.write_reg_page(&adapter->hw, BM_WUC, wuc); /* activate PHY wakeup */ wuc_enable |= BM_WUC_ENABLE_BIT | BM_WUC_HOST_WU_BIT; retval = e1000_disable_phy_wakeup_reg_access_bm(hw, &wuc_enable); if (retval) e_err("Could not set PHY Host Wakeup bit\n"); release: hw->phy.ops.release(hw); return retval; } static void e1000e_flush_lpic(struct pci_dev *pdev) { struct net_device *netdev = pci_get_drvdata(pdev); struct e1000_adapter *adapter = netdev_priv(netdev); struct e1000_hw *hw = &adapter->hw; u32 ret_val; pm_runtime_get_sync(netdev->dev.parent); ret_val = hw->phy.ops.acquire(hw); if (ret_val) goto fl_out; pr_info("EEE TX LPI TIMER: %08X\n", er32(LPIC) >> E1000_LPIC_LPIET_SHIFT); hw->phy.ops.release(hw); fl_out: pm_runtime_put_sync(netdev->dev.parent); } /* S0ix implementation */ static void e1000e_s0ix_entry_flow(struct e1000_adapter *adapter) { struct e1000_hw *hw = &adapter->hw; u32 mac_data; u16 phy_data; if (er32(FWSM) & E1000_ICH_FWSM_FW_VALID && hw->mac.type >= e1000_pch_adp) { /* Request ME configure the device for S0ix */ mac_data = er32(H2ME); mac_data |= E1000_H2ME_START_DPG; mac_data &= ~E1000_H2ME_EXIT_DPG; trace_e1000e_trace_mac_register(mac_data); ew32(H2ME, mac_data); } else { /* Request driver configure the device to S0ix */ /* Disable the periodic inband message, * don't request PCIe clock in K1 page770_17[10:9] = 10b */ e1e_rphy(hw, HV_PM_CTRL, &phy_data); phy_data &= ~HV_PM_CTRL_K1_CLK_REQ; phy_data |= BIT(10); e1e_wphy(hw, HV_PM_CTRL, phy_data); /* Make sure we don't exit K1 every time a new packet arrives * 772_29[5] = 1 CS_Mode_Stay_In_K1 */ e1e_rphy(hw, I217_CGFREG, &phy_data); phy_data |= BIT(5); e1e_wphy(hw, I217_CGFREG, phy_data); /* Change the MAC/PHY interface to SMBus * Force the SMBus in PHY page769_23[0] = 1 * Force the SMBus in MAC CTRL_EXT[11] = 1 */ e1e_rphy(hw, CV_SMB_CTRL, &phy_data); phy_data |= CV_SMB_CTRL_FORCE_SMBUS; e1e_wphy(hw, CV_SMB_CTRL, phy_data); mac_data = er32(CTRL_EXT); mac_data |= E1000_CTRL_EXT_FORCE_SMBUS; ew32(CTRL_EXT, mac_data); /* DFT control: PHY bit: page769_20[0] = 1 * page769_20[7] - PHY PLL stop * page769_20[8] - PHY go to the electrical idle * page769_20[9] - PHY serdes disable * Gate PPW via EXTCNF_CTRL - set 0x0F00[7] = 1 */ e1e_rphy(hw, I82579_DFT_CTRL, &phy_data); phy_data |= BIT(0); phy_data |= BIT(7); phy_data |= BIT(8); phy_data |= BIT(9); e1e_wphy(hw, I82579_DFT_CTRL, phy_data); mac_data = er32(EXTCNF_CTRL); mac_data |= E1000_EXTCNF_CTRL_GATE_PHY_CFG; ew32(EXTCNF_CTRL, mac_data); /* Enable the Dynamic Power Gating in the MAC */ mac_data = er32(FEXTNVM7); mac_data |= BIT(22); ew32(FEXTNVM7, mac_data); /* Disable disconnected cable conditioning for Power Gating */ mac_data = er32(DPGFR); mac_data |= BIT(2); ew32(DPGFR, mac_data); /* Don't wake from dynamic Power Gating with clock request */ mac_data = er32(FEXTNVM12); mac_data |= BIT(12); ew32(FEXTNVM12, mac_data); /* Ungate PGCB clock */ mac_data = er32(FEXTNVM9); mac_data &= ~BIT(28); ew32(FEXTNVM9, mac_data); /* Enable K1 off to enable mPHY Power Gating */ mac_data = er32(FEXTNVM6); mac_data |= BIT(31); ew32(FEXTNVM6, mac_data); /* Enable mPHY power gating for any link and speed */ mac_data = er32(FEXTNVM8); mac_data |= BIT(9); ew32(FEXTNVM8, mac_data); /* Enable the Dynamic Clock Gating in the DMA and MAC */ mac_data = er32(CTRL_EXT); mac_data |= E1000_CTRL_EXT_DMA_DYN_CLK_EN; ew32(CTRL_EXT, mac_data); /* No MAC DPG gating SLP_S0 in modern standby * Switch the logic of the lanphypc to use PMC counter */ mac_data = er32(FEXTNVM5); mac_data |= BIT(7); ew32(FEXTNVM5, mac_data); } /* Disable the time synchronization clock */ mac_data = er32(FEXTNVM7); mac_data |= BIT(31); mac_data &= ~BIT(0); ew32(FEXTNVM7, mac_data); /* Dynamic Power Gating Enable */ mac_data = er32(CTRL_EXT); mac_data |= BIT(3); ew32(CTRL_EXT, mac_data); /* Check MAC Tx/Rx packet buffer pointers. * Reset MAC Tx/Rx packet buffer pointers to suppress any * pending traffic indication that would prevent power gating. */ mac_data = er32(TDFH); if (mac_data) ew32(TDFH, 0); mac_data = er32(TDFT); if (mac_data) ew32(TDFT, 0); mac_data = er32(TDFHS); if (mac_data) ew32(TDFHS, 0); mac_data = er32(TDFTS); if (mac_data) ew32(TDFTS, 0); mac_data = er32(TDFPC); if (mac_data) ew32(TDFPC, 0); mac_data = er32(RDFH); if (mac_data) ew32(RDFH, 0); mac_data = er32(RDFT); if (mac_data) ew32(RDFT, 0); mac_data = er32(RDFHS); if (mac_data) ew32(RDFHS, 0); mac_data = er32(RDFTS); if (mac_data) ew32(RDFTS, 0); mac_data = er32(RDFPC); if (mac_data) ew32(RDFPC, 0); } static void e1000e_s0ix_exit_flow(struct e1000_adapter *adapter) { struct e1000_hw *hw = &adapter->hw; bool firmware_bug = false; u32 mac_data; u16 phy_data; u32 i = 0; if (er32(FWSM) & E1000_ICH_FWSM_FW_VALID && hw->mac.type >= e1000_pch_adp) { /* Keep the GPT clock enabled for CSME */ mac_data = er32(FEXTNVM); mac_data |= BIT(3); ew32(FEXTNVM, mac_data); /* Request ME unconfigure the device from S0ix */ mac_data = er32(H2ME); mac_data &= ~E1000_H2ME_START_DPG; mac_data |= E1000_H2ME_EXIT_DPG; trace_e1000e_trace_mac_register(mac_data); ew32(H2ME, mac_data); /* Poll up to 2.5 seconds for ME to unconfigure DPG. * If this takes more than 1 second, show a warning indicating a * firmware bug */ while (!(er32(EXFWSM) & E1000_EXFWSM_DPG_EXIT_DONE)) { if (i > 100 && !firmware_bug) firmware_bug = true; if (i++ == 250) { e_dbg("Timeout (firmware bug): %d msec\n", i * 10); break; } usleep_range(10000, 11000); } if (firmware_bug) e_warn("DPG_EXIT_DONE took %d msec. This is a firmware bug\n", i * 10); else e_dbg("DPG_EXIT_DONE cleared after %d msec\n", i * 10); } else { /* Request driver unconfigure the device from S0ix */ /* Disable the Dynamic Power Gating in the MAC */ mac_data = er32(FEXTNVM7); mac_data &= 0xFFBFFFFF; ew32(FEXTNVM7, mac_data); /* Disable mPHY power gating for any link and speed */ mac_data = er32(FEXTNVM8); mac_data &= ~BIT(9); ew32(FEXTNVM8, mac_data); /* Disable K1 off */ mac_data = er32(FEXTNVM6); mac_data &= ~BIT(31); ew32(FEXTNVM6, mac_data); /* Disable Ungate PGCB clock */ mac_data = er32(FEXTNVM9); mac_data |= BIT(28); ew32(FEXTNVM9, mac_data); /* Cancel not waking from dynamic * Power Gating with clock request */ mac_data = er32(FEXTNVM12); mac_data &= ~BIT(12); ew32(FEXTNVM12, mac_data); /* Cancel disable disconnected cable conditioning * for Power Gating */ mac_data = er32(DPGFR); mac_data &= ~BIT(2); ew32(DPGFR, mac_data); /* Disable the Dynamic Clock Gating in the DMA and MAC */ mac_data = er32(CTRL_EXT); mac_data &= 0xFFF7FFFF; ew32(CTRL_EXT, mac_data); /* Revert the lanphypc logic to use the internal Gbe counter * and not the PMC counter */ mac_data = er32(FEXTNVM5); mac_data &= 0xFFFFFF7F; ew32(FEXTNVM5, mac_data); /* Enable the periodic inband message, * Request PCIe clock in K1 page770_17[10:9] =01b */ e1e_rphy(hw, HV_PM_CTRL, &phy_data); phy_data &= 0xFBFF; phy_data |= HV_PM_CTRL_K1_CLK_REQ; e1e_wphy(hw, HV_PM_CTRL, phy_data); /* Return back configuration * 772_29[5] = 0 CS_Mode_Stay_In_K1 */ e1e_rphy(hw, I217_CGFREG, &phy_data); phy_data &= 0xFFDF; e1e_wphy(hw, I217_CGFREG, phy_data); /* Change the MAC/PHY interface to Kumeran * Unforce the SMBus in PHY page769_23[0] = 0 * Unforce the SMBus in MAC CTRL_EXT[11] = 0 */ e1e_rphy(hw, CV_SMB_CTRL, &phy_data); phy_data &= ~CV_SMB_CTRL_FORCE_SMBUS; e1e_wphy(hw, CV_SMB_CTRL, phy_data); mac_data = er32(CTRL_EXT); mac_data &= ~E1000_CTRL_EXT_FORCE_SMBUS; ew32(CTRL_EXT, mac_data); } /* Disable Dynamic Power Gating */ mac_data = er32(CTRL_EXT); mac_data &= 0xFFFFFFF7; ew32(CTRL_EXT, mac_data); /* Enable the time synchronization clock */ mac_data = er32(FEXTNVM7); mac_data &= ~BIT(31); mac_data |= BIT(0); ew32(FEXTNVM7, mac_data); } static int e1000e_pm_freeze(struct device *dev) { struct net_device *netdev = dev_get_drvdata(dev); struct e1000_adapter *adapter = netdev_priv(netdev); bool present; rtnl_lock(); present = netif_device_present(netdev); netif_device_detach(netdev); if (present && netif_running(netdev)) { int count = E1000_CHECK_RESET_COUNT; while (test_bit(__E1000_RESETTING, &adapter->state) && count--) usleep_range(10000, 11000); WARN_ON(test_bit(__E1000_RESETTING, &adapter->state)); /* Quiesce the device without resetting the hardware */ e1000e_down(adapter, false); e1000_free_irq(adapter); } rtnl_unlock(); e1000e_reset_interrupt_capability(adapter); /* Allow time for pending master requests to run */ e1000e_disable_pcie_master(&adapter->hw); return 0; } static int __e1000_shutdown(struct pci_dev *pdev, bool runtime) { struct net_device *netdev = pci_get_drvdata(pdev); struct e1000_adapter *adapter = netdev_priv(netdev); struct e1000_hw *hw = &adapter->hw; u32 ctrl, ctrl_ext, rctl, status, wufc; int retval = 0; /* Runtime suspend should only enable wakeup for link changes */ if (runtime) wufc = E1000_WUFC_LNKC; else if (device_may_wakeup(&pdev->dev)) wufc = adapter->wol; else wufc = 0; status = er32(STATUS); if (status & E1000_STATUS_LU) wufc &= ~E1000_WUFC_LNKC; if (wufc) { e1000_setup_rctl(adapter); e1000e_set_rx_mode(netdev); /* turn on all-multi mode if wake on multicast is enabled */ if (wufc & E1000_WUFC_MC) { rctl = er32(RCTL); rctl |= E1000_RCTL_MPE; ew32(RCTL, rctl); } ctrl = er32(CTRL); ctrl |= E1000_CTRL_ADVD3WUC; if (!(adapter->flags2 & FLAG2_HAS_PHY_WAKEUP)) ctrl |= E1000_CTRL_EN_PHY_PWR_MGMT; ew32(CTRL, ctrl); if (adapter->hw.phy.media_type == e1000_media_type_fiber || adapter->hw.phy.media_type == e1000_media_type_internal_serdes) { /* keep the laser running in D3 */ ctrl_ext = er32(CTRL_EXT); ctrl_ext |= E1000_CTRL_EXT_SDP3_DATA; ew32(CTRL_EXT, ctrl_ext); } if (!runtime) e1000e_power_up_phy(adapter); if (adapter->flags & FLAG_IS_ICH) e1000_suspend_workarounds_ich8lan(&adapter->hw); if (adapter->flags2 & FLAG2_HAS_PHY_WAKEUP) { /* enable wakeup by the PHY */ retval = e1000_init_phy_wakeup(adapter, wufc); if (retval) return retval; } else { /* enable wakeup by the MAC */ ew32(WUFC, wufc); ew32(WUC, E1000_WUC_PME_EN); } } else { ew32(WUC, 0); ew32(WUFC, 0); e1000_power_down_phy(adapter); } if (adapter->hw.phy.type == e1000_phy_igp_3) { e1000e_igp3_phy_powerdown_workaround_ich8lan(&adapter->hw); } else if (hw->mac.type >= e1000_pch_lpt) { if (wufc && !(wufc & (E1000_WUFC_EX | E1000_WUFC_MC | E1000_WUFC_BC))) /* ULP does not support wake from unicast, multicast * or broadcast. */ retval = e1000_enable_ulp_lpt_lp(hw, !runtime); if (retval) return retval; } /* Ensure that the appropriate bits are set in LPI_CTRL * for EEE in Sx */ if ((hw->phy.type >= e1000_phy_i217) && adapter->eee_advert && hw->dev_spec.ich8lan.eee_lp_ability) { u16 lpi_ctrl = 0; retval = hw->phy.ops.acquire(hw); if (!retval) { retval = e1e_rphy_locked(hw, I82579_LPI_CTRL, &lpi_ctrl); if (!retval) { if (adapter->eee_advert & hw->dev_spec.ich8lan.eee_lp_ability & I82579_EEE_100_SUPPORTED) lpi_ctrl |= I82579_LPI_CTRL_100_ENABLE; if (adapter->eee_advert & hw->dev_spec.ich8lan.eee_lp_ability & I82579_EEE_1000_SUPPORTED) lpi_ctrl |= I82579_LPI_CTRL_1000_ENABLE; retval = e1e_wphy_locked(hw, I82579_LPI_CTRL, lpi_ctrl); } } hw->phy.ops.release(hw); } /* Release control of h/w to f/w. If f/w is AMT enabled, this * would have already happened in close and is redundant. */ e1000e_release_hw_control(adapter); pci_clear_master(pdev); /* The pci-e switch on some quad port adapters will report a * correctable error when the MAC transitions from D0 to D3. To * prevent this we need to mask off the correctable errors on the * downstream port of the pci-e switch. * * We don't have the associated upstream bridge while assigning * the PCI device into guest. For example, the KVM on power is * one of the cases. */ if (adapter->flags & FLAG_IS_QUAD_PORT) { struct pci_dev *us_dev = pdev->bus->self; u16 devctl; if (!us_dev) return 0; pcie_capability_read_word(us_dev, PCI_EXP_DEVCTL, &devctl); pcie_capability_write_word(us_dev, PCI_EXP_DEVCTL, (devctl & ~PCI_EXP_DEVCTL_CERE)); pci_save_state(pdev); pci_prepare_to_sleep(pdev); pcie_capability_write_word(us_dev, PCI_EXP_DEVCTL, devctl); } return 0; } /** * __e1000e_disable_aspm - Disable ASPM states * @pdev: pointer to PCI device struct * @state: bit-mask of ASPM states to disable * @locked: indication if this context holds pci_bus_sem locked. * * Some devices *must* have certain ASPM states disabled per hardware errata. **/ static void __e1000e_disable_aspm(struct pci_dev *pdev, u16 state, int locked) { struct pci_dev *parent = pdev->bus->self; u16 aspm_dis_mask = 0; u16 pdev_aspmc, parent_aspmc; switch (state) { case PCIE_LINK_STATE_L0S: case PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1: aspm_dis_mask |= PCI_EXP_LNKCTL_ASPM_L0S; fallthrough; /* can't have L1 without L0s */ case PCIE_LINK_STATE_L1: aspm_dis_mask |= PCI_EXP_LNKCTL_ASPM_L1; break; default: return; } pcie_capability_read_word(pdev, PCI_EXP_LNKCTL, &pdev_aspmc); pdev_aspmc &= PCI_EXP_LNKCTL_ASPMC; if (parent) { pcie_capability_read_word(parent, PCI_EXP_LNKCTL, &parent_aspmc); parent_aspmc &= PCI_EXP_LNKCTL_ASPMC; } /* Nothing to do if the ASPM states to be disabled already are */ if (!(pdev_aspmc & aspm_dis_mask) && (!parent || !(parent_aspmc & aspm_dis_mask))) return; dev_info(&pdev->dev, "Disabling ASPM %s %s\n", (aspm_dis_mask & pdev_aspmc & PCI_EXP_LNKCTL_ASPM_L0S) ? "L0s" : "", (aspm_dis_mask & pdev_aspmc & PCI_EXP_LNKCTL_ASPM_L1) ? "L1" : ""); #ifdef CONFIG_PCIEASPM if (locked) pci_disable_link_state_locked(pdev, state); else pci_disable_link_state(pdev, state); /* Double-check ASPM control. If not disabled by the above, the * BIOS is preventing that from happening (or CONFIG_PCIEASPM is * not enabled); override by writing PCI config space directly. */ pcie_capability_read_word(pdev, PCI_EXP_LNKCTL, &pdev_aspmc); pdev_aspmc &= PCI_EXP_LNKCTL_ASPMC; if (!(aspm_dis_mask & pdev_aspmc)) return; #endif /* Both device and parent should have the same ASPM setting. * Disable ASPM in downstream component first and then upstream. */ pcie_capability_clear_word(pdev, PCI_EXP_LNKCTL, aspm_dis_mask); if (parent) pcie_capability_clear_word(parent, PCI_EXP_LNKCTL, aspm_dis_mask); } /** * e1000e_disable_aspm - Disable ASPM states. * @pdev: pointer to PCI device struct * @state: bit-mask of ASPM states to disable * * This function acquires the pci_bus_sem! * Some devices *must* have certain ASPM states disabled per hardware errata. **/ static void e1000e_disable_aspm(struct pci_dev *pdev, u16 state) { __e1000e_disable_aspm(pdev, state, 0); } /** * e1000e_disable_aspm_locked - Disable ASPM states. * @pdev: pointer to PCI device struct * @state: bit-mask of ASPM states to disable * * This function must be called with pci_bus_sem acquired! * Some devices *must* have certain ASPM states disabled per hardware errata. **/ static void e1000e_disable_aspm_locked(struct pci_dev *pdev, u16 state) { __e1000e_disable_aspm(pdev, state, 1); } static int e1000e_pm_thaw(struct device *dev) { struct net_device *netdev = dev_get_drvdata(dev); struct e1000_adapter *adapter = netdev_priv(netdev); int rc = 0; e1000e_set_interrupt_capability(adapter); rtnl_lock(); if (netif_running(netdev)) { rc = e1000_request_irq(adapter); if (rc) goto err_irq; e1000e_up(adapter); } netif_device_attach(netdev); err_irq: rtnl_unlock(); return rc; } static int __e1000_resume(struct pci_dev *pdev) { struct net_device *netdev = pci_get_drvdata(pdev); struct e1000_adapter *adapter = netdev_priv(netdev); struct e1000_hw *hw = &adapter->hw; u16 aspm_disable_flag = 0; if (adapter->flags2 & FLAG2_DISABLE_ASPM_L0S) aspm_disable_flag = PCIE_LINK_STATE_L0S; if (adapter->flags2 & FLAG2_DISABLE_ASPM_L1) aspm_disable_flag |= PCIE_LINK_STATE_L1; if (aspm_disable_flag) e1000e_disable_aspm(pdev, aspm_disable_flag); pci_set_master(pdev); if (hw->mac.type >= e1000_pch2lan) e1000_resume_workarounds_pchlan(&adapter->hw); e1000e_power_up_phy(adapter); /* report the system wakeup cause from S3/S4 */ if (adapter->flags2 & FLAG2_HAS_PHY_WAKEUP) { u16 phy_data; e1e_rphy(&adapter->hw, BM_WUS, &phy_data); if (phy_data) { e_info("PHY Wakeup cause - %s\n", phy_data & E1000_WUS_EX ? "Unicast Packet" : phy_data & E1000_WUS_MC ? "Multicast Packet" : phy_data & E1000_WUS_BC ? "Broadcast Packet" : phy_data & E1000_WUS_MAG ? "Magic Packet" : phy_data & E1000_WUS_LNKC ? "Link Status Change" : "other"); } e1e_wphy(&adapter->hw, BM_WUS, ~0); } else { u32 wus = er32(WUS); if (wus) { e_info("MAC Wakeup cause - %s\n", wus & E1000_WUS_EX ? "Unicast Packet" : wus & E1000_WUS_MC ? "Multicast Packet" : wus & E1000_WUS_BC ? "Broadcast Packet" : wus & E1000_WUS_MAG ? "Magic Packet" : wus & E1000_WUS_LNKC ? "Link Status Change" : "other"); } ew32(WUS, ~0); } e1000e_reset(adapter); e1000_init_manageability_pt(adapter); /* If the controller has AMT, do not set DRV_LOAD until the interface * is up. For all other cases, let the f/w know that the h/w is now * under the control of the driver. */ if (!(adapter->flags & FLAG_HAS_AMT)) e1000e_get_hw_control(adapter); return 0; } static __maybe_unused int e1000e_pm_prepare(struct device *dev) { return pm_runtime_suspended(dev) && pm_suspend_via_firmware(); } static __maybe_unused int e1000e_pm_suspend(struct device *dev) { struct net_device *netdev = pci_get_drvdata(to_pci_dev(dev)); struct e1000_adapter *adapter = netdev_priv(netdev); struct pci_dev *pdev = to_pci_dev(dev); int rc; e1000e_flush_lpic(pdev); e1000e_pm_freeze(dev); rc = __e1000_shutdown(pdev, false); if (rc) { e1000e_pm_thaw(dev); } else { /* Introduce S0ix implementation */ if (adapter->flags2 & FLAG2_ENABLE_S0IX_FLOWS) e1000e_s0ix_entry_flow(adapter); } return rc; } static __maybe_unused int e1000e_pm_resume(struct device *dev) { struct net_device *netdev = pci_get_drvdata(to_pci_dev(dev)); struct e1000_adapter *adapter = netdev_priv(netdev); struct pci_dev *pdev = to_pci_dev(dev); int rc; /* Introduce S0ix implementation */ if (adapter->flags2 & FLAG2_ENABLE_S0IX_FLOWS) e1000e_s0ix_exit_flow(adapter); rc = __e1000_resume(pdev); if (rc) return rc; return e1000e_pm_thaw(dev); } static __maybe_unused int e1000e_pm_runtime_idle(struct device *dev) { struct net_device *netdev = dev_get_drvdata(dev); struct e1000_adapter *adapter = netdev_priv(netdev); u16 eee_lp; eee_lp = adapter->hw.dev_spec.ich8lan.eee_lp_ability; if (!e1000e_has_link(adapter)) { adapter->hw.dev_spec.ich8lan.eee_lp_ability = eee_lp; pm_schedule_suspend(dev, 5 * MSEC_PER_SEC); } return -EBUSY; } static __maybe_unused int e1000e_pm_runtime_resume(struct device *dev) { struct pci_dev *pdev = to_pci_dev(dev); struct net_device *netdev = pci_get_drvdata(pdev); struct e1000_adapter *adapter = netdev_priv(netdev); int rc; pdev->pme_poll = true; rc = __e1000_resume(pdev); if (rc) return rc; if (netdev->flags & IFF_UP) e1000e_up(adapter); return rc; } static __maybe_unused int e1000e_pm_runtime_suspend(struct device *dev) { struct pci_dev *pdev = to_pci_dev(dev); struct net_device *netdev = pci_get_drvdata(pdev); struct e1000_adapter *adapter = netdev_priv(netdev); if (netdev->flags & IFF_UP) { int count = E1000_CHECK_RESET_COUNT; while (test_bit(__E1000_RESETTING, &adapter->state) && count--) usleep_range(10000, 11000); WARN_ON(test_bit(__E1000_RESETTING, &adapter->state)); /* Down the device without resetting the hardware */ e1000e_down(adapter, false); } if (__e1000_shutdown(pdev, true)) { e1000e_pm_runtime_resume(dev); return -EBUSY; } return 0; } static void e1000_shutdown(struct pci_dev *pdev) { e1000e_flush_lpic(pdev); e1000e_pm_freeze(&pdev->dev); __e1000_shutdown(pdev, false); } #ifdef CONFIG_NET_POLL_CONTROLLER static irqreturn_t e1000_intr_msix(int __always_unused irq, void *data) { struct net_device *netdev = data; struct e1000_adapter *adapter = netdev_priv(netdev); if (adapter->msix_entries) { int vector, msix_irq; vector = 0; msix_irq = adapter->msix_entries[vector].vector; if (disable_hardirq(msix_irq)) e1000_intr_msix_rx(msix_irq, netdev); enable_irq(msix_irq); vector++; msix_irq = adapter->msix_entries[vector].vector; if (disable_hardirq(msix_irq)) e1000_intr_msix_tx(msix_irq, netdev); enable_irq(msix_irq); vector++; msix_irq = adapter->msix_entries[vector].vector; if (disable_hardirq(msix_irq)) e1000_msix_other(msix_irq, netdev); enable_irq(msix_irq); } return IRQ_HANDLED; } /** * e1000_netpoll * @netdev: network interface device structure * * Polling 'interrupt' - used by things like netconsole to send skbs * without having to re-enable interrupts. It's not called while * the interrupt routine is executing. */ static void e1000_netpoll(struct net_device *netdev) { struct e1000_adapter *adapter = netdev_priv(netdev); switch (adapter->int_mode) { case E1000E_INT_MODE_MSIX: e1000_intr_msix(adapter->pdev->irq, netdev); break; case E1000E_INT_MODE_MSI: if (disable_hardirq(adapter->pdev->irq)) e1000_intr_msi(adapter->pdev->irq, netdev); enable_irq(adapter->pdev->irq); break; default: /* E1000E_INT_MODE_LEGACY */ if (disable_hardirq(adapter->pdev->irq)) e1000_intr(adapter->pdev->irq, netdev); enable_irq(adapter->pdev->irq); break; } } #endif /** * e1000_io_error_detected - called when PCI error is detected * @pdev: Pointer to PCI device * @state: The current pci connection state * * This function is called after a PCI bus error affecting * this device has been detected. */ static pci_ers_result_t e1000_io_error_detected(struct pci_dev *pdev, pci_channel_state_t state) { e1000e_pm_freeze(&pdev->dev); if (state == pci_channel_io_perm_failure) return PCI_ERS_RESULT_DISCONNECT; pci_disable_device(pdev); /* Request a slot reset. */ return PCI_ERS_RESULT_NEED_RESET; } /** * e1000_io_slot_reset - called after the pci bus has been reset. * @pdev: Pointer to PCI device * * Restart the card from scratch, as if from a cold-boot. Implementation * resembles the first-half of the e1000e_pm_resume routine. */ static pci_ers_result_t e1000_io_slot_reset(struct pci_dev *pdev) { struct net_device *netdev = pci_get_drvdata(pdev); struct e1000_adapter *adapter = netdev_priv(netdev); struct e1000_hw *hw = &adapter->hw; u16 aspm_disable_flag = 0; int err; pci_ers_result_t result; if (adapter->flags2 & FLAG2_DISABLE_ASPM_L0S) aspm_disable_flag = PCIE_LINK_STATE_L0S; if (adapter->flags2 & FLAG2_DISABLE_ASPM_L1) aspm_disable_flag |= PCIE_LINK_STATE_L1; if (aspm_disable_flag) e1000e_disable_aspm_locked(pdev, aspm_disable_flag); err = pci_enable_device_mem(pdev); if (err) { dev_err(&pdev->dev, "Cannot re-enable PCI device after reset.\n"); result = PCI_ERS_RESULT_DISCONNECT; } else { pdev->state_saved = true; pci_restore_state(pdev); pci_set_master(pdev); pci_enable_wake(pdev, PCI_D3hot, 0); pci_enable_wake(pdev, PCI_D3cold, 0); e1000e_reset(adapter); ew32(WUS, ~0); result = PCI_ERS_RESULT_RECOVERED; } return result; } /** * e1000_io_resume - called when traffic can start flowing again. * @pdev: Pointer to PCI device * * This callback is called when the error recovery driver tells us that * its OK to resume normal operation. Implementation resembles the * second-half of the e1000e_pm_resume routine. */ static void e1000_io_resume(struct pci_dev *pdev) { struct net_device *netdev = pci_get_drvdata(pdev); struct e1000_adapter *adapter = netdev_priv(netdev); e1000_init_manageability_pt(adapter); e1000e_pm_thaw(&pdev->dev); /* If the controller has AMT, do not set DRV_LOAD until the interface * is up. For all other cases, let the f/w know that the h/w is now * under the control of the driver. */ if (!(adapter->flags & FLAG_HAS_AMT)) e1000e_get_hw_control(adapter); } static void e1000_print_device_info(struct e1000_adapter *adapter) { struct e1000_hw *hw = &adapter->hw; struct net_device *netdev = adapter->netdev; u32 ret_val; u8 pba_str[E1000_PBANUM_LENGTH]; /* print bus type/speed/width info */ e_info("(PCI Express:2.5GT/s:%s) %pM\n", /* bus width */ ((hw->bus.width == e1000_bus_width_pcie_x4) ? "Width x4" : "Width x1"), /* MAC address */ netdev->dev_addr); e_info("Intel(R) PRO/%s Network Connection\n", (hw->phy.type == e1000_phy_ife) ? "10/100" : "1000"); ret_val = e1000_read_pba_string_generic(hw, pba_str, E1000_PBANUM_LENGTH); if (ret_val) strscpy((char *)pba_str, "Unknown", sizeof(pba_str)); e_info("MAC: %d, PHY: %d, PBA No: %s\n", hw->mac.type, hw->phy.type, pba_str); } static void e1000_eeprom_checks(struct e1000_adapter *adapter) { struct e1000_hw *hw = &adapter->hw; int ret_val; u16 buf = 0; if (hw->mac.type != e1000_82573) return; ret_val = e1000_read_nvm(hw, NVM_INIT_CONTROL2_REG, 1, &buf); le16_to_cpus(&buf); if (!ret_val && (!(buf & BIT(0)))) { /* Deep Smart Power Down (DSPD) */ dev_warn(&adapter->pdev->dev, "Warning: detected DSPD enabled in EEPROM\n"); } } static netdev_features_t e1000_fix_features(struct net_device *netdev, netdev_features_t features) { struct e1000_adapter *adapter = netdev_priv(netdev); struct e1000_hw *hw = &adapter->hw; /* Jumbo frame workaround on 82579 and newer requires CRC be stripped */ if ((hw->mac.type >= e1000_pch2lan) && (netdev->mtu > ETH_DATA_LEN)) features &= ~NETIF_F_RXFCS; /* Since there is no support for separate Rx/Tx vlan accel * enable/disable make sure Tx flag is always in same state as Rx. */ if (features & NETIF_F_HW_VLAN_CTAG_RX) features |= NETIF_F_HW_VLAN_CTAG_TX; else features &= ~NETIF_F_HW_VLAN_CTAG_TX; return features; } static int e1000_set_features(struct net_device *netdev, netdev_features_t features) { struct e1000_adapter *adapter = netdev_priv(netdev); netdev_features_t changed = features ^ netdev->features; if (changed & (NETIF_F_TSO | NETIF_F_TSO6)) adapter->flags |= FLAG_TSO_FORCE; if (!(changed & (NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_RXCSUM | NETIF_F_RXHASH | NETIF_F_RXFCS | NETIF_F_RXALL))) return 0; if (changed & NETIF_F_RXFCS) { if (features & NETIF_F_RXFCS) { adapter->flags2 &= ~FLAG2_CRC_STRIPPING; } else { /* We need to take it back to defaults, which might mean * stripping is still disabled at the adapter level. */ if (adapter->flags2 & FLAG2_DFLT_CRC_STRIPPING) adapter->flags2 |= FLAG2_CRC_STRIPPING; else adapter->flags2 &= ~FLAG2_CRC_STRIPPING; } } netdev->features = features; if (netif_running(netdev)) e1000e_reinit_locked(adapter); else e1000e_reset(adapter); return 1; } static const struct net_device_ops e1000e_netdev_ops = { .ndo_open = e1000e_open, .ndo_stop = e1000e_close, .ndo_start_xmit = e1000_xmit_frame, .ndo_get_stats64 = e1000e_get_stats64, .ndo_set_rx_mode = e1000e_set_rx_mode, .ndo_set_mac_address = e1000_set_mac, .ndo_change_mtu = e1000_change_mtu, .ndo_eth_ioctl = e1000_ioctl, .ndo_tx_timeout = e1000_tx_timeout, .ndo_validate_addr = eth_validate_addr, .ndo_vlan_rx_add_vid = e1000_vlan_rx_add_vid, .ndo_vlan_rx_kill_vid = e1000_vlan_rx_kill_vid, #ifdef CONFIG_NET_POLL_CONTROLLER .ndo_poll_controller = e1000_netpoll, #endif .ndo_set_features = e1000_set_features, .ndo_fix_features = e1000_fix_features, .ndo_features_check = passthru_features_check, }; /** * e1000_probe - Device Initialization Routine * @pdev: PCI device information struct * @ent: entry in e1000_pci_tbl * * Returns 0 on success, negative on failure * * e1000_probe initializes an adapter identified by a pci_dev structure. * The OS initialization, configuring of the adapter private structure, * and a hardware reset occur. **/ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent) { struct net_device *netdev; struct e1000_adapter *adapter; struct e1000_hw *hw; const struct e1000_info *ei = e1000_info_tbl[ent->driver_data]; resource_size_t mmio_start, mmio_len; resource_size_t flash_start, flash_len; static int cards_found; u16 aspm_disable_flag = 0; u16 eeprom_data = 0; u16 eeprom_apme_mask = E1000_EEPROM_APME; int bars, i, err; s32 ret_val = 0; if (ei->flags2 & FLAG2_DISABLE_ASPM_L0S) aspm_disable_flag = PCIE_LINK_STATE_L0S; if (ei->flags2 & FLAG2_DISABLE_ASPM_L1) aspm_disable_flag |= PCIE_LINK_STATE_L1; if (aspm_disable_flag) e1000e_disable_aspm(pdev, aspm_disable_flag); err = pci_enable_device_mem(pdev); if (err) return err; err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); if (err) { dev_err(&pdev->dev, "No usable DMA configuration, aborting\n"); goto err_dma; } bars = pci_select_bars(pdev, IORESOURCE_MEM); err = pci_request_selected_regions_exclusive(pdev, bars, e1000e_driver_name); if (err) goto err_pci_reg; pci_set_master(pdev); /* PCI config space info */ err = pci_save_state(pdev); if (err) goto err_alloc_etherdev; err = -ENOMEM; netdev = alloc_etherdev(sizeof(struct e1000_adapter)); if (!netdev) goto err_alloc_etherdev; SET_NETDEV_DEV(netdev, &pdev->dev); netdev->irq = pdev->irq; pci_set_drvdata(pdev, netdev); adapter = netdev_priv(netdev); hw = &adapter->hw; adapter->netdev = netdev; adapter->pdev = pdev; adapter->ei = ei; adapter->pba = ei->pba; adapter->flags = ei->flags; adapter->flags2 = ei->flags2; adapter->hw.adapter = adapter; adapter->hw.mac.type = ei->mac; adapter->max_hw_frame_size = ei->max_hw_frame_size; adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE); mmio_start = pci_resource_start(pdev, 0); mmio_len = pci_resource_len(pdev, 0); err = -EIO; adapter->hw.hw_addr = ioremap(mmio_start, mmio_len); if (!adapter->hw.hw_addr) goto err_ioremap; if ((adapter->flags & FLAG_HAS_FLASH) && (pci_resource_flags(pdev, 1) & IORESOURCE_MEM) && (hw->mac.type < e1000_pch_spt)) { flash_start = pci_resource_start(pdev, 1); flash_len = pci_resource_len(pdev, 1); adapter->hw.flash_address = ioremap(flash_start, flash_len); if (!adapter->hw.flash_address) goto err_flashmap; } /* Set default EEE advertisement */ if (adapter->flags2 & FLAG2_HAS_EEE) adapter->eee_advert = MDIO_EEE_100TX | MDIO_EEE_1000T; /* construct the net_device struct */ netdev->netdev_ops = &e1000e_netdev_ops; e1000e_set_ethtool_ops(netdev); netdev->watchdog_timeo = 5 * HZ; netif_napi_add(netdev, &adapter->napi, e1000e_poll); strscpy(netdev->name, pci_name(pdev), sizeof(netdev->name)); netdev->mem_start = mmio_start; netdev->mem_end = mmio_start + mmio_len; adapter->bd_number = cards_found++; e1000e_check_options(adapter); /* setup adapter struct */ err = e1000_sw_init(adapter); if (err) goto err_sw_init; memcpy(&hw->mac.ops, ei->mac_ops, sizeof(hw->mac.ops)); memcpy(&hw->nvm.ops, ei->nvm_ops, sizeof(hw->nvm.ops)); memcpy(&hw->phy.ops, ei->phy_ops, sizeof(hw->phy.ops)); err = ei->get_variants(adapter); if (err) goto err_hw_init; if ((adapter->flags & FLAG_IS_ICH) && (adapter->flags & FLAG_READ_ONLY_NVM) && (hw->mac.type < e1000_pch_spt)) e1000e_write_protect_nvm_ich8lan(&adapter->hw); hw->mac.ops.get_bus_info(&adapter->hw); adapter->hw.phy.autoneg_wait_to_complete = 0; /* Copper options */ if (adapter->hw.phy.media_type == e1000_media_type_copper) { adapter->hw.phy.mdix = AUTO_ALL_MODES; adapter->hw.phy.disable_polarity_correction = 0; adapter->hw.phy.ms_type = e1000_ms_hw_default; } if (hw->phy.ops.check_reset_block && hw->phy.ops.check_reset_block(hw)) dev_info(&pdev->dev, "PHY reset is blocked due to SOL/IDER session.\n"); /* Set initial default active device features */ netdev->features = (NETIF_F_SG | NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_RXHASH | NETIF_F_RXCSUM | NETIF_F_HW_CSUM); /* disable TSO for pcie and 10/100 speeds to avoid * some hardware issues and for i219 to fix transfer * speed being capped at 60% */ if (!(adapter->flags & FLAG_TSO_FORCE)) { switch (adapter->link_speed) { case SPEED_10: case SPEED_100: e_info("10/100 speed: disabling TSO\n"); netdev->features &= ~NETIF_F_TSO; netdev->features &= ~NETIF_F_TSO6; break; case SPEED_1000: netdev->features |= NETIF_F_TSO; netdev->features |= NETIF_F_TSO6; break; default: /* oops */ break; } if (hw->mac.type == e1000_pch_spt) { netdev->features &= ~NETIF_F_TSO; netdev->features &= ~NETIF_F_TSO6; } } /* Set user-changeable features (subset of all device features) */ netdev->hw_features = netdev->features; netdev->hw_features |= NETIF_F_RXFCS; netdev->priv_flags |= IFF_SUPP_NOFCS; netdev->hw_features |= NETIF_F_RXALL; if (adapter->flags & FLAG_HAS_HW_VLAN_FILTER) netdev->features |= NETIF_F_HW_VLAN_CTAG_FILTER; netdev->vlan_features |= (NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_HW_CSUM); netdev->priv_flags |= IFF_UNICAST_FLT; netdev->features |= NETIF_F_HIGHDMA; netdev->vlan_features |= NETIF_F_HIGHDMA; /* MTU range: 68 - max_hw_frame_size */ netdev->min_mtu = ETH_MIN_MTU; netdev->max_mtu = adapter->max_hw_frame_size - (VLAN_ETH_HLEN + ETH_FCS_LEN); if (e1000e_enable_mng_pass_thru(&adapter->hw)) adapter->flags |= FLAG_MNG_PT_ENABLED; /* before reading the NVM, reset the controller to * put the device in a known good starting state */ adapter->hw.mac.ops.reset_hw(&adapter->hw); /* systems with ASPM and others may see the checksum fail on the first * attempt. Let's give it a few tries */ for (i = 0;; i++) { if (e1000_validate_nvm_checksum(&adapter->hw) >= 0) break; if (i == 2) { dev_err(&pdev->dev, "The NVM Checksum Is Not Valid\n"); err = -EIO; goto err_eeprom; } } e1000_eeprom_checks(adapter); /* copy the MAC address */ if (e1000e_read_mac_addr(&adapter->hw)) dev_err(&pdev->dev, "NVM Read Error while reading MAC address\n"); eth_hw_addr_set(netdev, adapter->hw.mac.addr); if (!is_valid_ether_addr(netdev->dev_addr)) { dev_err(&pdev->dev, "Invalid MAC Address: %pM\n", netdev->dev_addr); err = -EIO; goto err_eeprom; } timer_setup(&adapter->watchdog_timer, e1000_watchdog, 0); timer_setup(&adapter->phy_info_timer, e1000_update_phy_info, 0); INIT_WORK(&adapter->reset_task, e1000_reset_task); INIT_WORK(&adapter->watchdog_task, e1000_watchdog_task); INIT_WORK(&adapter->downshift_task, e1000e_downshift_workaround); INIT_WORK(&adapter->update_phy_task, e1000e_update_phy_task); INIT_WORK(&adapter->print_hang_task, e1000_print_hw_hang); /* Initialize link parameters. User can change them with ethtool */ adapter->hw.mac.autoneg = 1; adapter->fc_autoneg = true; adapter->hw.fc.requested_mode = e1000_fc_default; adapter->hw.fc.current_mode = e1000_fc_default; adapter->hw.phy.autoneg_advertised = 0x2f; /* Initial Wake on LAN setting - If APM wake is enabled in * the EEPROM, enable the ACPI Magic Packet filter */ if (adapter->flags & FLAG_APME_IN_WUC) { /* APME bit in EEPROM is mapped to WUC.APME */ eeprom_data = er32(WUC); eeprom_apme_mask = E1000_WUC_APME; if ((hw->mac.type > e1000_ich10lan) && (eeprom_data & E1000_WUC_PHY_WAKE)) adapter->flags2 |= FLAG2_HAS_PHY_WAKEUP; } else if (adapter->flags & FLAG_APME_IN_CTRL3) { if (adapter->flags & FLAG_APME_CHECK_PORT_B && (adapter->hw.bus.func == 1)) ret_val = e1000_read_nvm(&adapter->hw, NVM_INIT_CONTROL3_PORT_B, 1, &eeprom_data); else ret_val = e1000_read_nvm(&adapter->hw, NVM_INIT_CONTROL3_PORT_A, 1, &eeprom_data); } /* fetch WoL from EEPROM */ if (ret_val) e_dbg("NVM read error getting WoL initial values: %d\n", ret_val); else if (eeprom_data & eeprom_apme_mask) adapter->eeprom_wol |= E1000_WUFC_MAG; /* now that we have the eeprom settings, apply the special cases * where the eeprom may be wrong or the board simply won't support * wake on lan on a particular port */ if (!(adapter->flags & FLAG_HAS_WOL)) adapter->eeprom_wol = 0; /* initialize the wol settings based on the eeprom settings */ adapter->wol = adapter->eeprom_wol; /* make sure adapter isn't asleep if manageability is enabled */ if (adapter->wol || (adapter->flags & FLAG_MNG_PT_ENABLED) || (hw->mac.ops.check_mng_mode(hw))) device_wakeup_enable(&pdev->dev); /* save off EEPROM version number */ ret_val = e1000_read_nvm(&adapter->hw, 5, 1, &adapter->eeprom_vers); if (ret_val) { e_dbg("NVM read error getting EEPROM version: %d\n", ret_val); adapter->eeprom_vers = 0; } /* init PTP hardware clock */ e1000e_ptp_init(adapter); /* reset the hardware with the new settings */ e1000e_reset(adapter); /* If the controller has AMT, do not set DRV_LOAD until the interface * is up. For all other cases, let the f/w know that the h/w is now * under the control of the driver. */ if (!(adapter->flags & FLAG_HAS_AMT)) e1000e_get_hw_control(adapter); if (hw->mac.type >= e1000_pch_cnp) adapter->flags2 |= FLAG2_ENABLE_S0IX_FLOWS; strscpy(netdev->name, "eth%d", sizeof(netdev->name)); err = register_netdev(netdev); if (err) goto err_register; /* carrier off reporting is important to ethtool even BEFORE open */ netif_carrier_off(netdev); e1000_print_device_info(adapter); dev_pm_set_driver_flags(&pdev->dev, DPM_FLAG_SMART_PREPARE); if (pci_dev_run_wake(pdev)) pm_runtime_put_noidle(&pdev->dev); return 0; err_register: if (!(adapter->flags & FLAG_HAS_AMT)) e1000e_release_hw_control(adapter); err_eeprom: if (hw->phy.ops.check_reset_block && !hw->phy.ops.check_reset_block(hw)) e1000_phy_hw_reset(&adapter->hw); err_hw_init: kfree(adapter->tx_ring); kfree(adapter->rx_ring); err_sw_init: if ((adapter->hw.flash_address) && (hw->mac.type < e1000_pch_spt)) iounmap(adapter->hw.flash_address); e1000e_reset_interrupt_capability(adapter); err_flashmap: iounmap(adapter->hw.hw_addr); err_ioremap: free_netdev(netdev); err_alloc_etherdev: pci_release_mem_regions(pdev); err_pci_reg: err_dma: pci_disable_device(pdev); return err; } /** * e1000_remove - Device Removal Routine * @pdev: PCI device information struct * * e1000_remove is called by the PCI subsystem to alert the driver * that it should release a PCI device. This could be caused by a * Hot-Plug event, or because the driver is going to be removed from * memory. **/ static void e1000_remove(struct pci_dev *pdev) { struct net_device *netdev = pci_get_drvdata(pdev); struct e1000_adapter *adapter = netdev_priv(netdev); e1000e_ptp_remove(adapter); /* The timers may be rescheduled, so explicitly disable them * from being rescheduled. */ set_bit(__E1000_DOWN, &adapter->state); del_timer_sync(&adapter->watchdog_timer); del_timer_sync(&adapter->phy_info_timer); cancel_work_sync(&adapter->reset_task); cancel_work_sync(&adapter->watchdog_task); cancel_work_sync(&adapter->downshift_task); cancel_work_sync(&adapter->update_phy_task); cancel_work_sync(&adapter->print_hang_task); if (adapter->flags & FLAG_HAS_HW_TIMESTAMP) { cancel_work_sync(&adapter->tx_hwtstamp_work); if (adapter->tx_hwtstamp_skb) { dev_consume_skb_any(adapter->tx_hwtstamp_skb); adapter->tx_hwtstamp_skb = NULL; } } unregister_netdev(netdev); if (pci_dev_run_wake(pdev)) pm_runtime_get_noresume(&pdev->dev); /* Release control of h/w to f/w. If f/w is AMT enabled, this * would have already happened in close and is redundant. */ e1000e_release_hw_control(adapter); e1000e_reset_interrupt_capability(adapter); kfree(adapter->tx_ring); kfree(adapter->rx_ring); iounmap(adapter->hw.hw_addr); if ((adapter->hw.flash_address) && (adapter->hw.mac.type < e1000_pch_spt)) iounmap(adapter->hw.flash_address); pci_release_mem_regions(pdev); free_netdev(netdev); pci_disable_device(pdev); } /* PCI Error Recovery (ERS) */ static const struct pci_error_handlers e1000_err_handler = { .error_detected = e1000_io_error_detected, .slot_reset = e1000_io_slot_reset, .resume = e1000_io_resume, }; static const struct pci_device_id e1000_pci_tbl[] = { { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_COPPER), board_82571 }, { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_FIBER), board_82571 }, { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_QUAD_COPPER), board_82571 }, { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_QUAD_COPPER_LP), board_82571 }, { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_QUAD_FIBER), board_82571 }, { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_SERDES), board_82571 }, { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_SERDES_DUAL), board_82571 }, { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_SERDES_QUAD), board_82571 }, { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571PT_QUAD_COPPER), board_82571 }, { PCI_VDEVICE(INTEL, E1000_DEV_ID_82572EI), board_82572 }, { PCI_VDEVICE(INTEL, E1000_DEV_ID_82572EI_COPPER), board_82572 }, { PCI_VDEVICE(INTEL, E1000_DEV_ID_82572EI_FIBER), board_82572 }, { PCI_VDEVICE(INTEL, E1000_DEV_ID_82572EI_SERDES), board_82572 }, { PCI_VDEVICE(INTEL, E1000_DEV_ID_82573E), board_82573 }, { PCI_VDEVICE(INTEL, E1000_DEV_ID_82573E_IAMT), board_82573 }, { PCI_VDEVICE(INTEL, E1000_DEV_ID_82573L), board_82573 }, { PCI_VDEVICE(INTEL, E1000_DEV_ID_82574L), board_82574 }, { PCI_VDEVICE(INTEL, E1000_DEV_ID_82574LA), board_82574 }, { PCI_VDEVICE(INTEL, E1000_DEV_ID_82583V), board_82583 }, { PCI_VDEVICE(INTEL, E1000_DEV_ID_80003ES2LAN_COPPER_DPT), board_80003es2lan }, { PCI_VDEVICE(INTEL, E1000_DEV_ID_80003ES2LAN_COPPER_SPT), board_80003es2lan }, { PCI_VDEVICE(INTEL, E1000_DEV_ID_80003ES2LAN_SERDES_DPT), board_80003es2lan }, { PCI_VDEVICE(INTEL, E1000_DEV_ID_80003ES2LAN_SERDES_SPT), board_80003es2lan }, { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IFE), board_ich8lan }, { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IFE_G), board_ich8lan }, { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IFE_GT), board_ich8lan }, { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IGP_AMT), board_ich8lan }, { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IGP_C), board_ich8lan }, { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IGP_M), board_ich8lan }, { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IGP_M_AMT), board_ich8lan }, { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_82567V_3), board_ich8lan }, { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IFE), board_ich9lan }, { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IFE_G), board_ich9lan }, { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IFE_GT), board_ich9lan }, { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IGP_AMT), board_ich9lan }, { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IGP_C), board_ich9lan }, { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_BM), board_ich9lan }, { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IGP_M), board_ich9lan }, { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IGP_M_AMT), board_ich9lan }, { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IGP_M_V), board_ich9lan }, { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH10_R_BM_LM), board_ich9lan }, { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH10_R_BM_LF), board_ich9lan }, { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH10_R_BM_V), board_ich9lan }, { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH10_D_BM_LM), board_ich10lan }, { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH10_D_BM_LF), board_ich10lan }, { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH10_D_BM_V), board_ich10lan }, { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_M_HV_LM), board_pchlan }, { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_M_HV_LC), board_pchlan }, { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_D_HV_DM), board_pchlan }, { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_D_HV_DC), board_pchlan }, { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH2_LV_LM), board_pch2lan }, { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH2_LV_V), board_pch2lan }, { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_LPT_I217_LM), board_pch_lpt }, { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_LPT_I217_V), board_pch_lpt }, { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_LPTLP_I218_LM), board_pch_lpt }, { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_LPTLP_I218_V), board_pch_lpt }, { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_I218_LM2), board_pch_lpt }, { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_I218_V2), board_pch_lpt }, { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_I218_LM3), board_pch_lpt }, { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_I218_V3), board_pch_lpt }, { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_SPT_I219_LM), board_pch_spt }, { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_SPT_I219_V), board_pch_spt }, { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_SPT_I219_LM2), board_pch_spt }, { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_SPT_I219_V2), board_pch_spt }, { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_LBG_I219_LM3), board_pch_spt }, { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_SPT_I219_LM4), board_pch_spt }, { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_SPT_I219_V4), board_pch_spt }, { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_SPT_I219_LM5), board_pch_spt }, { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_SPT_I219_V5), board_pch_spt }, { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_CNP_I219_LM6), board_pch_cnp }, { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_CNP_I219_V6), board_pch_cnp }, { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_CNP_I219_LM7), board_pch_cnp }, { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_CNP_I219_V7), board_pch_cnp }, { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_ICP_I219_LM8), board_pch_cnp }, { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_ICP_I219_V8), board_pch_cnp }, { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_ICP_I219_LM9), board_pch_cnp }, { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_ICP_I219_V9), board_pch_cnp }, { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_CMP_I219_LM10), board_pch_cnp }, { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_CMP_I219_V10), board_pch_cnp }, { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_CMP_I219_LM11), board_pch_cnp }, { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_CMP_I219_V11), board_pch_cnp }, { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_CMP_I219_LM12), board_pch_spt }, { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_CMP_I219_V12), board_pch_spt }, { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_TGP_I219_LM13), board_pch_tgp }, { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_TGP_I219_V13), board_pch_tgp }, { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_TGP_I219_LM14), board_pch_tgp }, { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_TGP_I219_V14), board_pch_tgp }, { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_TGP_I219_LM15), board_pch_tgp }, { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_TGP_I219_V15), board_pch_tgp }, { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_RPL_I219_LM23), board_pch_adp }, { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_RPL_I219_V23), board_pch_adp }, { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_ADP_I219_LM16), board_pch_adp }, { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_ADP_I219_V16), board_pch_adp }, { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_ADP_I219_LM17), board_pch_adp }, { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_ADP_I219_V17), board_pch_adp }, { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_RPL_I219_LM22), board_pch_adp }, { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_RPL_I219_V22), board_pch_adp }, { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_MTP_I219_LM18), board_pch_mtp }, { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_MTP_I219_V18), board_pch_mtp }, { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_MTP_I219_LM19), board_pch_mtp }, { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_MTP_I219_V19), board_pch_mtp }, { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_LNP_I219_LM20), board_pch_mtp }, { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_LNP_I219_V20), board_pch_mtp }, { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_LNP_I219_LM21), board_pch_mtp }, { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_LNP_I219_V21), board_pch_mtp }, { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_ARL_I219_LM24), board_pch_mtp }, { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_ARL_I219_V24), board_pch_mtp }, { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_PTP_I219_LM25), board_pch_mtp }, { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_PTP_I219_V25), board_pch_mtp }, { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_PTP_I219_LM26), board_pch_mtp }, { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_PTP_I219_V26), board_pch_mtp }, { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_PTP_I219_LM27), board_pch_mtp }, { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_PTP_I219_V27), board_pch_mtp }, { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_NVL_I219_LM29), board_pch_mtp }, { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_NVL_I219_V29), board_pch_mtp }, { 0, 0, 0, 0, 0, 0, 0 } /* terminate list */ }; MODULE_DEVICE_TABLE(pci, e1000_pci_tbl); static const struct dev_pm_ops e1000_pm_ops = { #ifdef CONFIG_PM_SLEEP .prepare = e1000e_pm_prepare, .suspend = e1000e_pm_suspend, .resume = e1000e_pm_resume, .freeze = e1000e_pm_freeze, .thaw = e1000e_pm_thaw, .poweroff = e1000e_pm_suspend, .restore = e1000e_pm_resume, #endif SET_RUNTIME_PM_OPS(e1000e_pm_runtime_suspend, e1000e_pm_runtime_resume, e1000e_pm_runtime_idle) }; /* PCI Device API Driver */ static struct pci_driver e1000_driver = { .name = e1000e_driver_name, .id_table = e1000_pci_tbl, .probe = e1000_probe, .remove = e1000_remove, .driver = { .pm = &e1000_pm_ops, }, .shutdown = e1000_shutdown, .err_handler = &e1000_err_handler }; /** * e1000_init_module - Driver Registration Routine * * e1000_init_module is the first routine called when the driver is * loaded. All it does is register with the PCI subsystem. **/ static int __init e1000_init_module(void) { pr_info("Intel(R) PRO/1000 Network Driver\n"); pr_info("Copyright(c) 1999 - 2015 Intel Corporation.\n"); return pci_register_driver(&e1000_driver); } module_init(e1000_init_module); /** * e1000_exit_module - Driver Exit Cleanup Routine * * e1000_exit_module is called just before the driver is removed * from memory. **/ static void __exit e1000_exit_module(void) { pci_unregister_driver(&e1000_driver); } module_exit(e1000_exit_module); MODULE_AUTHOR("Intel Corporation, <[email protected]>"); MODULE_DESCRIPTION("Intel(R) PRO/1000 Network Driver"); MODULE_LICENSE("GPL v2"); /* netdev.c */
linux-master
drivers/net/ethernet/intel/e1000e/netdev.c
/* Synopsys DesignWare Core Enterprise Ethernet (XLGMAC) Driver * * Copyright (c) 2017 Synopsys, Inc. (www.synopsys.com) * * This program is dual-licensed; you may select either version 2 of * the GNU General Public License ("GPL") or BSD license ("BSD"). * * This Synopsys DWC XLGMAC software driver and associated documentation * (hereinafter the "Software") is an unsupported proprietary work of * Synopsys, Inc. unless otherwise expressly agreed to in writing between * Synopsys and you. The Software IS NOT an item of Licensed Software or a * Licensed Product under any End User Software License Agreement or * Agreement for Licensed Products with Synopsys or any supplement thereto. * Synopsys is a registered trademark of Synopsys, Inc. Other names included * in the SOFTWARE may be the trademarks of their respective owners. */ #include <linux/netdevice.h> #include <linux/tcp.h> #include <linux/interrupt.h> #include "dwc-xlgmac.h" #include "dwc-xlgmac-reg.h" static int xlgmac_one_poll(struct napi_struct *, int); static int xlgmac_all_poll(struct napi_struct *, int); static inline unsigned int xlgmac_tx_avail_desc(struct xlgmac_ring *ring) { return (ring->dma_desc_count - (ring->cur - ring->dirty)); } static inline unsigned int xlgmac_rx_dirty_desc(struct xlgmac_ring *ring) { return (ring->cur - ring->dirty); } static int xlgmac_maybe_stop_tx_queue( struct xlgmac_channel *channel, struct xlgmac_ring *ring, unsigned int count) { struct xlgmac_pdata *pdata = channel->pdata; if (count > xlgmac_tx_avail_desc(ring)) { netif_info(pdata, drv, pdata->netdev, "Tx queue stopped, not enough descriptors available\n"); netif_stop_subqueue(pdata->netdev, channel->queue_index); ring->tx.queue_stopped = 1; /* If we haven't notified the hardware because of xmit_more * support, tell it now */ if (ring->tx.xmit_more) pdata->hw_ops.tx_start_xmit(channel, ring); return NETDEV_TX_BUSY; } return 0; } static void xlgmac_prep_vlan(struct sk_buff *skb, struct xlgmac_pkt_info *pkt_info) { if (skb_vlan_tag_present(skb)) pkt_info->vlan_ctag = skb_vlan_tag_get(skb); } static int xlgmac_prep_tso(struct sk_buff *skb, struct xlgmac_pkt_info *pkt_info) { int ret; if (!XLGMAC_GET_REG_BITS(pkt_info->attributes, TX_PACKET_ATTRIBUTES_TSO_ENABLE_POS, TX_PACKET_ATTRIBUTES_TSO_ENABLE_LEN)) return 0; ret = skb_cow_head(skb, 0); if (ret) return ret; pkt_info->header_len = skb_tcp_all_headers(skb); pkt_info->tcp_header_len = tcp_hdrlen(skb); pkt_info->tcp_payload_len = skb->len - pkt_info->header_len; pkt_info->mss = skb_shinfo(skb)->gso_size; XLGMAC_PR("header_len=%u\n", pkt_info->header_len); XLGMAC_PR("tcp_header_len=%u, tcp_payload_len=%u\n", pkt_info->tcp_header_len, pkt_info->tcp_payload_len); XLGMAC_PR("mss=%u\n", pkt_info->mss); /* Update the number of packets that will ultimately be transmitted * along with the extra bytes for each extra packet */ pkt_info->tx_packets = skb_shinfo(skb)->gso_segs; pkt_info->tx_bytes += (pkt_info->tx_packets - 1) * pkt_info->header_len; return 0; } static int xlgmac_is_tso(struct sk_buff *skb) { if (skb->ip_summed != CHECKSUM_PARTIAL) return 0; if (!skb_is_gso(skb)) return 0; return 1; } static void xlgmac_prep_tx_pkt(struct xlgmac_pdata *pdata, struct xlgmac_ring *ring, struct sk_buff *skb, struct xlgmac_pkt_info *pkt_info) { skb_frag_t *frag; unsigned int context_desc; unsigned int len; unsigned int i; pkt_info->skb = skb; context_desc = 0; pkt_info->desc_count = 0; pkt_info->tx_packets = 1; pkt_info->tx_bytes = skb->len; if (xlgmac_is_tso(skb)) { /* TSO requires an extra descriptor if mss is different */ if (skb_shinfo(skb)->gso_size != ring->tx.cur_mss) { context_desc = 1; pkt_info->desc_count++; } /* TSO requires an extra descriptor for TSO header */ pkt_info->desc_count++; pkt_info->attributes = XLGMAC_SET_REG_BITS( pkt_info->attributes, TX_PACKET_ATTRIBUTES_TSO_ENABLE_POS, TX_PACKET_ATTRIBUTES_TSO_ENABLE_LEN, 1); pkt_info->attributes = XLGMAC_SET_REG_BITS( pkt_info->attributes, TX_PACKET_ATTRIBUTES_CSUM_ENABLE_POS, TX_PACKET_ATTRIBUTES_CSUM_ENABLE_LEN, 1); } else if (skb->ip_summed == CHECKSUM_PARTIAL) pkt_info->attributes = XLGMAC_SET_REG_BITS( pkt_info->attributes, TX_PACKET_ATTRIBUTES_CSUM_ENABLE_POS, TX_PACKET_ATTRIBUTES_CSUM_ENABLE_LEN, 1); if (skb_vlan_tag_present(skb)) { /* VLAN requires an extra descriptor if tag is different */ if (skb_vlan_tag_get(skb) != ring->tx.cur_vlan_ctag) /* We can share with the TSO context descriptor */ if (!context_desc) { context_desc = 1; pkt_info->desc_count++; } pkt_info->attributes = XLGMAC_SET_REG_BITS( pkt_info->attributes, TX_PACKET_ATTRIBUTES_VLAN_CTAG_POS, TX_PACKET_ATTRIBUTES_VLAN_CTAG_LEN, 1); } for (len = skb_headlen(skb); len;) { pkt_info->desc_count++; len -= min_t(unsigned int, len, XLGMAC_TX_MAX_BUF_SIZE); } for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { frag = &skb_shinfo(skb)->frags[i]; for (len = skb_frag_size(frag); len; ) { pkt_info->desc_count++; len -= min_t(unsigned int, len, XLGMAC_TX_MAX_BUF_SIZE); } } } static int xlgmac_calc_rx_buf_size(struct net_device *netdev, unsigned int mtu) { unsigned int rx_buf_size; if (mtu > XLGMAC_JUMBO_PACKET_MTU) { netdev_alert(netdev, "MTU exceeds maximum supported value\n"); return -EINVAL; } rx_buf_size = mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN; rx_buf_size = clamp_val(rx_buf_size, XLGMAC_RX_MIN_BUF_SIZE, PAGE_SIZE); rx_buf_size = (rx_buf_size + XLGMAC_RX_BUF_ALIGN - 1) & ~(XLGMAC_RX_BUF_ALIGN - 1); return rx_buf_size; } static void xlgmac_enable_rx_tx_ints(struct xlgmac_pdata *pdata) { struct xlgmac_hw_ops *hw_ops = &pdata->hw_ops; struct xlgmac_channel *channel; enum xlgmac_int int_id; unsigned int i; channel = pdata->channel_head; for (i = 0; i < pdata->channel_count; i++, channel++) { if (channel->tx_ring && channel->rx_ring) int_id = XLGMAC_INT_DMA_CH_SR_TI_RI; else if (channel->tx_ring) int_id = XLGMAC_INT_DMA_CH_SR_TI; else if (channel->rx_ring) int_id = XLGMAC_INT_DMA_CH_SR_RI; else continue; hw_ops->enable_int(channel, int_id); } } static void xlgmac_disable_rx_tx_ints(struct xlgmac_pdata *pdata) { struct xlgmac_hw_ops *hw_ops = &pdata->hw_ops; struct xlgmac_channel *channel; enum xlgmac_int int_id; unsigned int i; channel = pdata->channel_head; for (i = 0; i < pdata->channel_count; i++, channel++) { if (channel->tx_ring && channel->rx_ring) int_id = XLGMAC_INT_DMA_CH_SR_TI_RI; else if (channel->tx_ring) int_id = XLGMAC_INT_DMA_CH_SR_TI; else if (channel->rx_ring) int_id = XLGMAC_INT_DMA_CH_SR_RI; else continue; hw_ops->disable_int(channel, int_id); } } static irqreturn_t xlgmac_isr(int irq, void *data) { unsigned int dma_isr, dma_ch_isr, mac_isr; struct xlgmac_pdata *pdata = data; struct xlgmac_channel *channel; struct xlgmac_hw_ops *hw_ops; unsigned int i, ti, ri; hw_ops = &pdata->hw_ops; /* The DMA interrupt status register also reports MAC and MTL * interrupts. So for polling mode, we just need to check for * this register to be non-zero */ dma_isr = readl(pdata->mac_regs + DMA_ISR); if (!dma_isr) return IRQ_HANDLED; netif_dbg(pdata, intr, pdata->netdev, "DMA_ISR=%#010x\n", dma_isr); for (i = 0; i < pdata->channel_count; i++) { if (!(dma_isr & (1 << i))) continue; channel = pdata->channel_head + i; dma_ch_isr = readl(XLGMAC_DMA_REG(channel, DMA_CH_SR)); netif_dbg(pdata, intr, pdata->netdev, "DMA_CH%u_ISR=%#010x\n", i, dma_ch_isr); /* The TI or RI interrupt bits may still be set even if using * per channel DMA interrupts. Check to be sure those are not * enabled before using the private data napi structure. */ ti = XLGMAC_GET_REG_BITS(dma_ch_isr, DMA_CH_SR_TI_POS, DMA_CH_SR_TI_LEN); ri = XLGMAC_GET_REG_BITS(dma_ch_isr, DMA_CH_SR_RI_POS, DMA_CH_SR_RI_LEN); if (!pdata->per_channel_irq && (ti || ri)) { if (napi_schedule_prep(&pdata->napi)) { /* Disable Tx and Rx interrupts */ xlgmac_disable_rx_tx_ints(pdata); pdata->stats.napi_poll_isr++; /* Turn on polling */ __napi_schedule_irqoff(&pdata->napi); } } if (XLGMAC_GET_REG_BITS(dma_ch_isr, DMA_CH_SR_TPS_POS, DMA_CH_SR_TPS_LEN)) pdata->stats.tx_process_stopped++; if (XLGMAC_GET_REG_BITS(dma_ch_isr, DMA_CH_SR_RPS_POS, DMA_CH_SR_RPS_LEN)) pdata->stats.rx_process_stopped++; if (XLGMAC_GET_REG_BITS(dma_ch_isr, DMA_CH_SR_TBU_POS, DMA_CH_SR_TBU_LEN)) pdata->stats.tx_buffer_unavailable++; if (XLGMAC_GET_REG_BITS(dma_ch_isr, DMA_CH_SR_RBU_POS, DMA_CH_SR_RBU_LEN)) pdata->stats.rx_buffer_unavailable++; /* Restart the device on a Fatal Bus Error */ if (XLGMAC_GET_REG_BITS(dma_ch_isr, DMA_CH_SR_FBE_POS, DMA_CH_SR_FBE_LEN)) { pdata->stats.fatal_bus_error++; schedule_work(&pdata->restart_work); } /* Clear all interrupt signals */ writel(dma_ch_isr, XLGMAC_DMA_REG(channel, DMA_CH_SR)); } if (XLGMAC_GET_REG_BITS(dma_isr, DMA_ISR_MACIS_POS, DMA_ISR_MACIS_LEN)) { mac_isr = readl(pdata->mac_regs + MAC_ISR); if (XLGMAC_GET_REG_BITS(mac_isr, MAC_ISR_MMCTXIS_POS, MAC_ISR_MMCTXIS_LEN)) hw_ops->tx_mmc_int(pdata); if (XLGMAC_GET_REG_BITS(mac_isr, MAC_ISR_MMCRXIS_POS, MAC_ISR_MMCRXIS_LEN)) hw_ops->rx_mmc_int(pdata); } return IRQ_HANDLED; } static irqreturn_t xlgmac_dma_isr(int irq, void *data) { struct xlgmac_channel *channel = data; /* Per channel DMA interrupts are enabled, so we use the per * channel napi structure and not the private data napi structure */ if (napi_schedule_prep(&channel->napi)) { /* Disable Tx and Rx interrupts */ disable_irq_nosync(channel->dma_irq); /* Turn on polling */ __napi_schedule_irqoff(&channel->napi); } return IRQ_HANDLED; } static void xlgmac_tx_timer(struct timer_list *t) { struct xlgmac_channel *channel = from_timer(channel, t, tx_timer); struct xlgmac_pdata *pdata = channel->pdata; struct napi_struct *napi; napi = (pdata->per_channel_irq) ? &channel->napi : &pdata->napi; if (napi_schedule_prep(napi)) { /* Disable Tx and Rx interrupts */ if (pdata->per_channel_irq) disable_irq_nosync(channel->dma_irq); else xlgmac_disable_rx_tx_ints(pdata); pdata->stats.napi_poll_txtimer++; /* Turn on polling */ __napi_schedule(napi); } channel->tx_timer_active = 0; } static void xlgmac_init_timers(struct xlgmac_pdata *pdata) { struct xlgmac_channel *channel; unsigned int i; channel = pdata->channel_head; for (i = 0; i < pdata->channel_count; i++, channel++) { if (!channel->tx_ring) break; timer_setup(&channel->tx_timer, xlgmac_tx_timer, 0); } } static void xlgmac_stop_timers(struct xlgmac_pdata *pdata) { struct xlgmac_channel *channel; unsigned int i; channel = pdata->channel_head; for (i = 0; i < pdata->channel_count; i++, channel++) { if (!channel->tx_ring) break; del_timer_sync(&channel->tx_timer); } } static void xlgmac_napi_enable(struct xlgmac_pdata *pdata, unsigned int add) { struct xlgmac_channel *channel; unsigned int i; if (pdata->per_channel_irq) { channel = pdata->channel_head; for (i = 0; i < pdata->channel_count; i++, channel++) { if (add) netif_napi_add(pdata->netdev, &channel->napi, xlgmac_one_poll); napi_enable(&channel->napi); } } else { if (add) netif_napi_add(pdata->netdev, &pdata->napi, xlgmac_all_poll); napi_enable(&pdata->napi); } } static void xlgmac_napi_disable(struct xlgmac_pdata *pdata, unsigned int del) { struct xlgmac_channel *channel; unsigned int i; if (pdata->per_channel_irq) { channel = pdata->channel_head; for (i = 0; i < pdata->channel_count; i++, channel++) { napi_disable(&channel->napi); if (del) netif_napi_del(&channel->napi); } } else { napi_disable(&pdata->napi); if (del) netif_napi_del(&pdata->napi); } } static int xlgmac_request_irqs(struct xlgmac_pdata *pdata) { struct net_device *netdev = pdata->netdev; struct xlgmac_channel *channel; unsigned int i; int ret; ret = devm_request_irq(pdata->dev, pdata->dev_irq, xlgmac_isr, IRQF_SHARED, netdev->name, pdata); if (ret) { netdev_alert(netdev, "error requesting irq %d\n", pdata->dev_irq); return ret; } if (!pdata->per_channel_irq) return 0; channel = pdata->channel_head; for (i = 0; i < pdata->channel_count; i++, channel++) { snprintf(channel->dma_irq_name, sizeof(channel->dma_irq_name) - 1, "%s-TxRx-%u", netdev_name(netdev), channel->queue_index); ret = devm_request_irq(pdata->dev, channel->dma_irq, xlgmac_dma_isr, 0, channel->dma_irq_name, channel); if (ret) { netdev_alert(netdev, "error requesting irq %d\n", channel->dma_irq); goto err_irq; } } return 0; err_irq: /* Using an unsigned int, 'i' will go to UINT_MAX and exit */ for (i--, channel--; i < pdata->channel_count; i--, channel--) devm_free_irq(pdata->dev, channel->dma_irq, channel); devm_free_irq(pdata->dev, pdata->dev_irq, pdata); return ret; } static void xlgmac_free_irqs(struct xlgmac_pdata *pdata) { struct xlgmac_channel *channel; unsigned int i; devm_free_irq(pdata->dev, pdata->dev_irq, pdata); if (!pdata->per_channel_irq) return; channel = pdata->channel_head; for (i = 0; i < pdata->channel_count; i++, channel++) devm_free_irq(pdata->dev, channel->dma_irq, channel); } static void xlgmac_free_tx_data(struct xlgmac_pdata *pdata) { struct xlgmac_desc_ops *desc_ops = &pdata->desc_ops; struct xlgmac_desc_data *desc_data; struct xlgmac_channel *channel; struct xlgmac_ring *ring; unsigned int i, j; channel = pdata->channel_head; for (i = 0; i < pdata->channel_count; i++, channel++) { ring = channel->tx_ring; if (!ring) break; for (j = 0; j < ring->dma_desc_count; j++) { desc_data = XLGMAC_GET_DESC_DATA(ring, j); desc_ops->unmap_desc_data(pdata, desc_data); } } } static void xlgmac_free_rx_data(struct xlgmac_pdata *pdata) { struct xlgmac_desc_ops *desc_ops = &pdata->desc_ops; struct xlgmac_desc_data *desc_data; struct xlgmac_channel *channel; struct xlgmac_ring *ring; unsigned int i, j; channel = pdata->channel_head; for (i = 0; i < pdata->channel_count; i++, channel++) { ring = channel->rx_ring; if (!ring) break; for (j = 0; j < ring->dma_desc_count; j++) { desc_data = XLGMAC_GET_DESC_DATA(ring, j); desc_ops->unmap_desc_data(pdata, desc_data); } } } static int xlgmac_start(struct xlgmac_pdata *pdata) { struct xlgmac_hw_ops *hw_ops = &pdata->hw_ops; struct net_device *netdev = pdata->netdev; int ret; hw_ops->init(pdata); xlgmac_napi_enable(pdata, 1); ret = xlgmac_request_irqs(pdata); if (ret) goto err_napi; hw_ops->enable_tx(pdata); hw_ops->enable_rx(pdata); netif_tx_start_all_queues(netdev); return 0; err_napi: xlgmac_napi_disable(pdata, 1); hw_ops->exit(pdata); return ret; } static void xlgmac_stop(struct xlgmac_pdata *pdata) { struct xlgmac_hw_ops *hw_ops = &pdata->hw_ops; struct net_device *netdev = pdata->netdev; struct xlgmac_channel *channel; struct netdev_queue *txq; unsigned int i; netif_tx_stop_all_queues(netdev); xlgmac_stop_timers(pdata); hw_ops->disable_tx(pdata); hw_ops->disable_rx(pdata); xlgmac_free_irqs(pdata); xlgmac_napi_disable(pdata, 1); hw_ops->exit(pdata); channel = pdata->channel_head; for (i = 0; i < pdata->channel_count; i++, channel++) { if (!channel->tx_ring) continue; txq = netdev_get_tx_queue(netdev, channel->queue_index); netdev_tx_reset_queue(txq); } } static void xlgmac_restart_dev(struct xlgmac_pdata *pdata) { /* If not running, "restart" will happen on open */ if (!netif_running(pdata->netdev)) return; xlgmac_stop(pdata); xlgmac_free_tx_data(pdata); xlgmac_free_rx_data(pdata); xlgmac_start(pdata); } static void xlgmac_restart(struct work_struct *work) { struct xlgmac_pdata *pdata = container_of(work, struct xlgmac_pdata, restart_work); rtnl_lock(); xlgmac_restart_dev(pdata); rtnl_unlock(); } static int xlgmac_open(struct net_device *netdev) { struct xlgmac_pdata *pdata = netdev_priv(netdev); struct xlgmac_desc_ops *desc_ops; int ret; desc_ops = &pdata->desc_ops; /* TODO: Initialize the phy */ /* Calculate the Rx buffer size before allocating rings */ ret = xlgmac_calc_rx_buf_size(netdev, netdev->mtu); if (ret < 0) return ret; pdata->rx_buf_size = ret; /* Allocate the channels and rings */ ret = desc_ops->alloc_channels_and_rings(pdata); if (ret) return ret; INIT_WORK(&pdata->restart_work, xlgmac_restart); xlgmac_init_timers(pdata); ret = xlgmac_start(pdata); if (ret) goto err_channels_and_rings; return 0; err_channels_and_rings: desc_ops->free_channels_and_rings(pdata); return ret; } static int xlgmac_close(struct net_device *netdev) { struct xlgmac_pdata *pdata = netdev_priv(netdev); struct xlgmac_desc_ops *desc_ops; desc_ops = &pdata->desc_ops; /* Stop the device */ xlgmac_stop(pdata); /* Free the channels and rings */ desc_ops->free_channels_and_rings(pdata); return 0; } static void xlgmac_tx_timeout(struct net_device *netdev, unsigned int txqueue) { struct xlgmac_pdata *pdata = netdev_priv(netdev); netdev_warn(netdev, "tx timeout, device restarting\n"); schedule_work(&pdata->restart_work); } static netdev_tx_t xlgmac_xmit(struct sk_buff *skb, struct net_device *netdev) { struct xlgmac_pdata *pdata = netdev_priv(netdev); struct xlgmac_pkt_info *tx_pkt_info; struct xlgmac_desc_ops *desc_ops; struct xlgmac_channel *channel; struct xlgmac_hw_ops *hw_ops; struct netdev_queue *txq; struct xlgmac_ring *ring; int ret; desc_ops = &pdata->desc_ops; hw_ops = &pdata->hw_ops; XLGMAC_PR("skb->len = %d\n", skb->len); channel = pdata->channel_head + skb->queue_mapping; txq = netdev_get_tx_queue(netdev, channel->queue_index); ring = channel->tx_ring; tx_pkt_info = &ring->pkt_info; if (skb->len == 0) { netif_err(pdata, tx_err, netdev, "empty skb received from stack\n"); dev_kfree_skb_any(skb); return NETDEV_TX_OK; } /* Prepare preliminary packet info for TX */ memset(tx_pkt_info, 0, sizeof(*tx_pkt_info)); xlgmac_prep_tx_pkt(pdata, ring, skb, tx_pkt_info); /* Check that there are enough descriptors available */ ret = xlgmac_maybe_stop_tx_queue(channel, ring, tx_pkt_info->desc_count); if (ret) return ret; ret = xlgmac_prep_tso(skb, tx_pkt_info); if (ret) { netif_err(pdata, tx_err, netdev, "error processing TSO packet\n"); dev_kfree_skb_any(skb); return ret; } xlgmac_prep_vlan(skb, tx_pkt_info); if (!desc_ops->map_tx_skb(channel, skb)) { dev_kfree_skb_any(skb); return NETDEV_TX_OK; } /* Report on the actual number of bytes (to be) sent */ netdev_tx_sent_queue(txq, tx_pkt_info->tx_bytes); /* Configure required descriptor fields for transmission */ hw_ops->dev_xmit(channel); if (netif_msg_pktdata(pdata)) xlgmac_print_pkt(netdev, skb, true); /* Stop the queue in advance if there may not be enough descriptors */ xlgmac_maybe_stop_tx_queue(channel, ring, XLGMAC_TX_MAX_DESC_NR); return NETDEV_TX_OK; } static void xlgmac_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *s) { struct xlgmac_pdata *pdata = netdev_priv(netdev); struct xlgmac_stats *pstats = &pdata->stats; pdata->hw_ops.read_mmc_stats(pdata); s->rx_packets = pstats->rxframecount_gb; s->rx_bytes = pstats->rxoctetcount_gb; s->rx_errors = pstats->rxframecount_gb - pstats->rxbroadcastframes_g - pstats->rxmulticastframes_g - pstats->rxunicastframes_g; s->multicast = pstats->rxmulticastframes_g; s->rx_length_errors = pstats->rxlengtherror; s->rx_crc_errors = pstats->rxcrcerror; s->rx_fifo_errors = pstats->rxfifooverflow; s->tx_packets = pstats->txframecount_gb; s->tx_bytes = pstats->txoctetcount_gb; s->tx_errors = pstats->txframecount_gb - pstats->txframecount_g; s->tx_dropped = netdev->stats.tx_dropped; } static int xlgmac_set_mac_address(struct net_device *netdev, void *addr) { struct xlgmac_pdata *pdata = netdev_priv(netdev); struct xlgmac_hw_ops *hw_ops = &pdata->hw_ops; struct sockaddr *saddr = addr; if (!is_valid_ether_addr(saddr->sa_data)) return -EADDRNOTAVAIL; eth_hw_addr_set(netdev, saddr->sa_data); hw_ops->set_mac_address(pdata, netdev->dev_addr); return 0; } static int xlgmac_ioctl(struct net_device *netdev, struct ifreq *ifreq, int cmd) { if (!netif_running(netdev)) return -ENODEV; return 0; } static int xlgmac_change_mtu(struct net_device *netdev, int mtu) { struct xlgmac_pdata *pdata = netdev_priv(netdev); int ret; ret = xlgmac_calc_rx_buf_size(netdev, mtu); if (ret < 0) return ret; pdata->rx_buf_size = ret; netdev->mtu = mtu; xlgmac_restart_dev(pdata); return 0; } static int xlgmac_vlan_rx_add_vid(struct net_device *netdev, __be16 proto, u16 vid) { struct xlgmac_pdata *pdata = netdev_priv(netdev); struct xlgmac_hw_ops *hw_ops = &pdata->hw_ops; set_bit(vid, pdata->active_vlans); hw_ops->update_vlan_hash_table(pdata); return 0; } static int xlgmac_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto, u16 vid) { struct xlgmac_pdata *pdata = netdev_priv(netdev); struct xlgmac_hw_ops *hw_ops = &pdata->hw_ops; clear_bit(vid, pdata->active_vlans); hw_ops->update_vlan_hash_table(pdata); return 0; } #ifdef CONFIG_NET_POLL_CONTROLLER static void xlgmac_poll_controller(struct net_device *netdev) { struct xlgmac_pdata *pdata = netdev_priv(netdev); struct xlgmac_channel *channel; unsigned int i; if (pdata->per_channel_irq) { channel = pdata->channel_head; for (i = 0; i < pdata->channel_count; i++, channel++) xlgmac_dma_isr(channel->dma_irq, channel); } else { disable_irq(pdata->dev_irq); xlgmac_isr(pdata->dev_irq, pdata); enable_irq(pdata->dev_irq); } } #endif /* CONFIG_NET_POLL_CONTROLLER */ static int xlgmac_set_features(struct net_device *netdev, netdev_features_t features) { netdev_features_t rxhash, rxcsum, rxvlan, rxvlan_filter; struct xlgmac_pdata *pdata = netdev_priv(netdev); struct xlgmac_hw_ops *hw_ops = &pdata->hw_ops; int ret = 0; rxhash = pdata->netdev_features & NETIF_F_RXHASH; rxcsum = pdata->netdev_features & NETIF_F_RXCSUM; rxvlan = pdata->netdev_features & NETIF_F_HW_VLAN_CTAG_RX; rxvlan_filter = pdata->netdev_features & NETIF_F_HW_VLAN_CTAG_FILTER; if ((features & NETIF_F_RXHASH) && !rxhash) ret = hw_ops->enable_rss(pdata); else if (!(features & NETIF_F_RXHASH) && rxhash) ret = hw_ops->disable_rss(pdata); if (ret) return ret; if ((features & NETIF_F_RXCSUM) && !rxcsum) hw_ops->enable_rx_csum(pdata); else if (!(features & NETIF_F_RXCSUM) && rxcsum) hw_ops->disable_rx_csum(pdata); if ((features & NETIF_F_HW_VLAN_CTAG_RX) && !rxvlan) hw_ops->enable_rx_vlan_stripping(pdata); else if (!(features & NETIF_F_HW_VLAN_CTAG_RX) && rxvlan) hw_ops->disable_rx_vlan_stripping(pdata); if ((features & NETIF_F_HW_VLAN_CTAG_FILTER) && !rxvlan_filter) hw_ops->enable_rx_vlan_filtering(pdata); else if (!(features & NETIF_F_HW_VLAN_CTAG_FILTER) && rxvlan_filter) hw_ops->disable_rx_vlan_filtering(pdata); pdata->netdev_features = features; return 0; } static void xlgmac_set_rx_mode(struct net_device *netdev) { struct xlgmac_pdata *pdata = netdev_priv(netdev); struct xlgmac_hw_ops *hw_ops = &pdata->hw_ops; hw_ops->config_rx_mode(pdata); } static const struct net_device_ops xlgmac_netdev_ops = { .ndo_open = xlgmac_open, .ndo_stop = xlgmac_close, .ndo_start_xmit = xlgmac_xmit, .ndo_tx_timeout = xlgmac_tx_timeout, .ndo_get_stats64 = xlgmac_get_stats64, .ndo_change_mtu = xlgmac_change_mtu, .ndo_set_mac_address = xlgmac_set_mac_address, .ndo_validate_addr = eth_validate_addr, .ndo_eth_ioctl = xlgmac_ioctl, .ndo_vlan_rx_add_vid = xlgmac_vlan_rx_add_vid, .ndo_vlan_rx_kill_vid = xlgmac_vlan_rx_kill_vid, #ifdef CONFIG_NET_POLL_CONTROLLER .ndo_poll_controller = xlgmac_poll_controller, #endif .ndo_set_features = xlgmac_set_features, .ndo_set_rx_mode = xlgmac_set_rx_mode, }; const struct net_device_ops *xlgmac_get_netdev_ops(void) { return &xlgmac_netdev_ops; } static void xlgmac_rx_refresh(struct xlgmac_channel *channel) { struct xlgmac_pdata *pdata = channel->pdata; struct xlgmac_ring *ring = channel->rx_ring; struct xlgmac_desc_data *desc_data; struct xlgmac_desc_ops *desc_ops; struct xlgmac_hw_ops *hw_ops; desc_ops = &pdata->desc_ops; hw_ops = &pdata->hw_ops; while (ring->dirty != ring->cur) { desc_data = XLGMAC_GET_DESC_DATA(ring, ring->dirty); /* Reset desc_data values */ desc_ops->unmap_desc_data(pdata, desc_data); if (desc_ops->map_rx_buffer(pdata, ring, desc_data)) break; hw_ops->rx_desc_reset(pdata, desc_data, ring->dirty); ring->dirty++; } /* Make sure everything is written before the register write */ wmb(); /* Update the Rx Tail Pointer Register with address of * the last cleaned entry */ desc_data = XLGMAC_GET_DESC_DATA(ring, ring->dirty - 1); writel(lower_32_bits(desc_data->dma_desc_addr), XLGMAC_DMA_REG(channel, DMA_CH_RDTR_LO)); } static struct sk_buff *xlgmac_create_skb(struct xlgmac_pdata *pdata, struct napi_struct *napi, struct xlgmac_desc_data *desc_data, unsigned int len) { unsigned int copy_len; struct sk_buff *skb; u8 *packet; skb = napi_alloc_skb(napi, desc_data->rx.hdr.dma_len); if (!skb) return NULL; /* Start with the header buffer which may contain just the header * or the header plus data */ dma_sync_single_range_for_cpu(pdata->dev, desc_data->rx.hdr.dma_base, desc_data->rx.hdr.dma_off, desc_data->rx.hdr.dma_len, DMA_FROM_DEVICE); packet = page_address(desc_data->rx.hdr.pa.pages) + desc_data->rx.hdr.pa.pages_offset; copy_len = (desc_data->rx.hdr_len) ? desc_data->rx.hdr_len : len; copy_len = min(desc_data->rx.hdr.dma_len, copy_len); skb_copy_to_linear_data(skb, packet, copy_len); skb_put(skb, copy_len); len -= copy_len; if (len) { /* Add the remaining data as a frag */ dma_sync_single_range_for_cpu(pdata->dev, desc_data->rx.buf.dma_base, desc_data->rx.buf.dma_off, desc_data->rx.buf.dma_len, DMA_FROM_DEVICE); skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, desc_data->rx.buf.pa.pages, desc_data->rx.buf.pa.pages_offset, len, desc_data->rx.buf.dma_len); desc_data->rx.buf.pa.pages = NULL; } return skb; } static int xlgmac_tx_poll(struct xlgmac_channel *channel) { struct xlgmac_pdata *pdata = channel->pdata; struct xlgmac_ring *ring = channel->tx_ring; struct net_device *netdev = pdata->netdev; unsigned int tx_packets = 0, tx_bytes = 0; struct xlgmac_desc_data *desc_data; struct xlgmac_dma_desc *dma_desc; struct xlgmac_desc_ops *desc_ops; struct xlgmac_hw_ops *hw_ops; struct netdev_queue *txq; int processed = 0; unsigned int cur; desc_ops = &pdata->desc_ops; hw_ops = &pdata->hw_ops; /* Nothing to do if there isn't a Tx ring for this channel */ if (!ring) return 0; cur = ring->cur; /* Be sure we get ring->cur before accessing descriptor data */ smp_rmb(); txq = netdev_get_tx_queue(netdev, channel->queue_index); while ((processed < XLGMAC_TX_DESC_MAX_PROC) && (ring->dirty != cur)) { desc_data = XLGMAC_GET_DESC_DATA(ring, ring->dirty); dma_desc = desc_data->dma_desc; if (!hw_ops->tx_complete(dma_desc)) break; /* Make sure descriptor fields are read after reading * the OWN bit */ dma_rmb(); if (netif_msg_tx_done(pdata)) xlgmac_dump_tx_desc(pdata, ring, ring->dirty, 1, 0); if (hw_ops->is_last_desc(dma_desc)) { tx_packets += desc_data->tx.packets; tx_bytes += desc_data->tx.bytes; } /* Free the SKB and reset the descriptor for re-use */ desc_ops->unmap_desc_data(pdata, desc_data); hw_ops->tx_desc_reset(desc_data); processed++; ring->dirty++; } if (!processed) return 0; netdev_tx_completed_queue(txq, tx_packets, tx_bytes); if ((ring->tx.queue_stopped == 1) && (xlgmac_tx_avail_desc(ring) > XLGMAC_TX_DESC_MIN_FREE)) { ring->tx.queue_stopped = 0; netif_tx_wake_queue(txq); } XLGMAC_PR("processed=%d\n", processed); return processed; } static int xlgmac_rx_poll(struct xlgmac_channel *channel, int budget) { struct xlgmac_pdata *pdata = channel->pdata; struct xlgmac_ring *ring = channel->rx_ring; struct net_device *netdev = pdata->netdev; unsigned int len, dma_desc_len, max_len; unsigned int context_next, context; struct xlgmac_desc_data *desc_data; struct xlgmac_pkt_info *pkt_info; unsigned int incomplete, error; struct xlgmac_hw_ops *hw_ops; unsigned int received = 0; struct napi_struct *napi; struct sk_buff *skb; int packet_count = 0; hw_ops = &pdata->hw_ops; /* Nothing to do if there isn't a Rx ring for this channel */ if (!ring) return 0; incomplete = 0; context_next = 0; napi = (pdata->per_channel_irq) ? &channel->napi : &pdata->napi; desc_data = XLGMAC_GET_DESC_DATA(ring, ring->cur); pkt_info = &ring->pkt_info; while (packet_count < budget) { /* First time in loop see if we need to restore state */ if (!received && desc_data->state_saved) { skb = desc_data->state.skb; error = desc_data->state.error; len = desc_data->state.len; } else { memset(pkt_info, 0, sizeof(*pkt_info)); skb = NULL; error = 0; len = 0; } read_again: desc_data = XLGMAC_GET_DESC_DATA(ring, ring->cur); if (xlgmac_rx_dirty_desc(ring) > XLGMAC_RX_DESC_MAX_DIRTY) xlgmac_rx_refresh(channel); if (hw_ops->dev_read(channel)) break; received++; ring->cur++; incomplete = XLGMAC_GET_REG_BITS( pkt_info->attributes, RX_PACKET_ATTRIBUTES_INCOMPLETE_POS, RX_PACKET_ATTRIBUTES_INCOMPLETE_LEN); context_next = XLGMAC_GET_REG_BITS( pkt_info->attributes, RX_PACKET_ATTRIBUTES_CONTEXT_NEXT_POS, RX_PACKET_ATTRIBUTES_CONTEXT_NEXT_LEN); context = XLGMAC_GET_REG_BITS( pkt_info->attributes, RX_PACKET_ATTRIBUTES_CONTEXT_POS, RX_PACKET_ATTRIBUTES_CONTEXT_LEN); /* Earlier error, just drain the remaining data */ if ((incomplete || context_next) && error) goto read_again; if (error || pkt_info->errors) { if (pkt_info->errors) netif_err(pdata, rx_err, netdev, "error in received packet\n"); dev_kfree_skb(skb); goto next_packet; } if (!context) { /* Length is cumulative, get this descriptor's length */ dma_desc_len = desc_data->rx.len - len; len += dma_desc_len; if (dma_desc_len && !skb) { skb = xlgmac_create_skb(pdata, napi, desc_data, dma_desc_len); if (!skb) error = 1; } else if (dma_desc_len) { dma_sync_single_range_for_cpu( pdata->dev, desc_data->rx.buf.dma_base, desc_data->rx.buf.dma_off, desc_data->rx.buf.dma_len, DMA_FROM_DEVICE); skb_add_rx_frag( skb, skb_shinfo(skb)->nr_frags, desc_data->rx.buf.pa.pages, desc_data->rx.buf.pa.pages_offset, dma_desc_len, desc_data->rx.buf.dma_len); desc_data->rx.buf.pa.pages = NULL; } } if (incomplete || context_next) goto read_again; if (!skb) goto next_packet; /* Be sure we don't exceed the configured MTU */ max_len = netdev->mtu + ETH_HLEN; if (!(netdev->features & NETIF_F_HW_VLAN_CTAG_RX) && (skb->protocol == htons(ETH_P_8021Q))) max_len += VLAN_HLEN; if (skb->len > max_len) { netif_err(pdata, rx_err, netdev, "packet length exceeds configured MTU\n"); dev_kfree_skb(skb); goto next_packet; } if (netif_msg_pktdata(pdata)) xlgmac_print_pkt(netdev, skb, false); skb_checksum_none_assert(skb); if (XLGMAC_GET_REG_BITS(pkt_info->attributes, RX_PACKET_ATTRIBUTES_CSUM_DONE_POS, RX_PACKET_ATTRIBUTES_CSUM_DONE_LEN)) skb->ip_summed = CHECKSUM_UNNECESSARY; if (XLGMAC_GET_REG_BITS(pkt_info->attributes, RX_PACKET_ATTRIBUTES_VLAN_CTAG_POS, RX_PACKET_ATTRIBUTES_VLAN_CTAG_LEN)) { __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), pkt_info->vlan_ctag); pdata->stats.rx_vlan_packets++; } if (XLGMAC_GET_REG_BITS(pkt_info->attributes, RX_PACKET_ATTRIBUTES_RSS_HASH_POS, RX_PACKET_ATTRIBUTES_RSS_HASH_LEN)) skb_set_hash(skb, pkt_info->rss_hash, pkt_info->rss_hash_type); skb->dev = netdev; skb->protocol = eth_type_trans(skb, netdev); skb_record_rx_queue(skb, channel->queue_index); napi_gro_receive(napi, skb); next_packet: packet_count++; } /* Check if we need to save state before leaving */ if (received && (incomplete || context_next)) { desc_data = XLGMAC_GET_DESC_DATA(ring, ring->cur); desc_data->state_saved = 1; desc_data->state.skb = skb; desc_data->state.len = len; desc_data->state.error = error; } XLGMAC_PR("packet_count = %d\n", packet_count); return packet_count; } static int xlgmac_one_poll(struct napi_struct *napi, int budget) { struct xlgmac_channel *channel = container_of(napi, struct xlgmac_channel, napi); int processed = 0; XLGMAC_PR("budget=%d\n", budget); /* Cleanup Tx ring first */ xlgmac_tx_poll(channel); /* Process Rx ring next */ processed = xlgmac_rx_poll(channel, budget); /* If we processed everything, we are done */ if (processed < budget) { /* Turn off polling */ napi_complete_done(napi, processed); /* Enable Tx and Rx interrupts */ enable_irq(channel->dma_irq); } XLGMAC_PR("received = %d\n", processed); return processed; } static int xlgmac_all_poll(struct napi_struct *napi, int budget) { struct xlgmac_pdata *pdata = container_of(napi, struct xlgmac_pdata, napi); struct xlgmac_channel *channel; int processed, last_processed; int ring_budget; unsigned int i; XLGMAC_PR("budget=%d\n", budget); processed = 0; ring_budget = budget / pdata->rx_ring_count; do { last_processed = processed; channel = pdata->channel_head; for (i = 0; i < pdata->channel_count; i++, channel++) { /* Cleanup Tx ring first */ xlgmac_tx_poll(channel); /* Process Rx ring next */ if (ring_budget > (budget - processed)) ring_budget = budget - processed; processed += xlgmac_rx_poll(channel, ring_budget); } } while ((processed < budget) && (processed != last_processed)); /* If we processed everything, we are done */ if (processed < budget) { /* Turn off polling */ napi_complete_done(napi, processed); /* Enable Tx and Rx interrupts */ xlgmac_enable_rx_tx_ints(pdata); } XLGMAC_PR("received = %d\n", processed); return processed; }
linux-master
drivers/net/ethernet/synopsys/dwc-xlgmac-net.c
/* Synopsys DesignWare Core Enterprise Ethernet (XLGMAC) Driver * * Copyright (c) 2017 Synopsys, Inc. (www.synopsys.com) * * This program is dual-licensed; you may select either version 2 of * the GNU General Public License ("GPL") or BSD license ("BSD"). * * This Synopsys DWC XLGMAC software driver and associated documentation * (hereinafter the "Software") is an unsupported proprietary work of * Synopsys, Inc. unless otherwise expressly agreed to in writing between * Synopsys and you. The Software IS NOT an item of Licensed Software or a * Licensed Product under any End User Software License Agreement or * Agreement for Licensed Products with Synopsys or any supplement thereto. * Synopsys is a registered trademark of Synopsys, Inc. Other names included * in the SOFTWARE may be the trademarks of their respective owners. */ #include "dwc-xlgmac.h" #include "dwc-xlgmac-reg.h" static void xlgmac_unmap_desc_data(struct xlgmac_pdata *pdata, struct xlgmac_desc_data *desc_data) { if (desc_data->skb_dma) { if (desc_data->mapped_as_page) { dma_unmap_page(pdata->dev, desc_data->skb_dma, desc_data->skb_dma_len, DMA_TO_DEVICE); } else { dma_unmap_single(pdata->dev, desc_data->skb_dma, desc_data->skb_dma_len, DMA_TO_DEVICE); } desc_data->skb_dma = 0; desc_data->skb_dma_len = 0; } if (desc_data->skb) { dev_kfree_skb_any(desc_data->skb); desc_data->skb = NULL; } if (desc_data->rx.hdr.pa.pages) put_page(desc_data->rx.hdr.pa.pages); if (desc_data->rx.hdr.pa_unmap.pages) { dma_unmap_page(pdata->dev, desc_data->rx.hdr.pa_unmap.pages_dma, desc_data->rx.hdr.pa_unmap.pages_len, DMA_FROM_DEVICE); put_page(desc_data->rx.hdr.pa_unmap.pages); } if (desc_data->rx.buf.pa.pages) put_page(desc_data->rx.buf.pa.pages); if (desc_data->rx.buf.pa_unmap.pages) { dma_unmap_page(pdata->dev, desc_data->rx.buf.pa_unmap.pages_dma, desc_data->rx.buf.pa_unmap.pages_len, DMA_FROM_DEVICE); put_page(desc_data->rx.buf.pa_unmap.pages); } memset(&desc_data->tx, 0, sizeof(desc_data->tx)); memset(&desc_data->rx, 0, sizeof(desc_data->rx)); desc_data->mapped_as_page = 0; if (desc_data->state_saved) { desc_data->state_saved = 0; desc_data->state.skb = NULL; desc_data->state.len = 0; desc_data->state.error = 0; } } static void xlgmac_free_ring(struct xlgmac_pdata *pdata, struct xlgmac_ring *ring) { struct xlgmac_desc_data *desc_data; unsigned int i; if (!ring) return; if (ring->desc_data_head) { for (i = 0; i < ring->dma_desc_count; i++) { desc_data = XLGMAC_GET_DESC_DATA(ring, i); xlgmac_unmap_desc_data(pdata, desc_data); } kfree(ring->desc_data_head); ring->desc_data_head = NULL; } if (ring->rx_hdr_pa.pages) { dma_unmap_page(pdata->dev, ring->rx_hdr_pa.pages_dma, ring->rx_hdr_pa.pages_len, DMA_FROM_DEVICE); put_page(ring->rx_hdr_pa.pages); ring->rx_hdr_pa.pages = NULL; ring->rx_hdr_pa.pages_len = 0; ring->rx_hdr_pa.pages_offset = 0; ring->rx_hdr_pa.pages_dma = 0; } if (ring->rx_buf_pa.pages) { dma_unmap_page(pdata->dev, ring->rx_buf_pa.pages_dma, ring->rx_buf_pa.pages_len, DMA_FROM_DEVICE); put_page(ring->rx_buf_pa.pages); ring->rx_buf_pa.pages = NULL; ring->rx_buf_pa.pages_len = 0; ring->rx_buf_pa.pages_offset = 0; ring->rx_buf_pa.pages_dma = 0; } if (ring->dma_desc_head) { dma_free_coherent(pdata->dev, (sizeof(struct xlgmac_dma_desc) * ring->dma_desc_count), ring->dma_desc_head, ring->dma_desc_head_addr); ring->dma_desc_head = NULL; } } static int xlgmac_init_ring(struct xlgmac_pdata *pdata, struct xlgmac_ring *ring, unsigned int dma_desc_count) { if (!ring) return 0; /* Descriptors */ ring->dma_desc_count = dma_desc_count; ring->dma_desc_head = dma_alloc_coherent(pdata->dev, (sizeof(struct xlgmac_dma_desc) * dma_desc_count), &ring->dma_desc_head_addr, GFP_KERNEL); if (!ring->dma_desc_head) return -ENOMEM; /* Array of descriptor data */ ring->desc_data_head = kcalloc(dma_desc_count, sizeof(struct xlgmac_desc_data), GFP_KERNEL); if (!ring->desc_data_head) return -ENOMEM; netif_dbg(pdata, drv, pdata->netdev, "dma_desc_head=%p, dma_desc_head_addr=%pad, desc_data_head=%p\n", ring->dma_desc_head, &ring->dma_desc_head_addr, ring->desc_data_head); return 0; } static void xlgmac_free_rings(struct xlgmac_pdata *pdata) { struct xlgmac_channel *channel; unsigned int i; if (!pdata->channel_head) return; channel = pdata->channel_head; for (i = 0; i < pdata->channel_count; i++, channel++) { xlgmac_free_ring(pdata, channel->tx_ring); xlgmac_free_ring(pdata, channel->rx_ring); } } static int xlgmac_alloc_rings(struct xlgmac_pdata *pdata) { struct xlgmac_channel *channel; unsigned int i; int ret; channel = pdata->channel_head; for (i = 0; i < pdata->channel_count; i++, channel++) { netif_dbg(pdata, drv, pdata->netdev, "%s - Tx ring:\n", channel->name); ret = xlgmac_init_ring(pdata, channel->tx_ring, pdata->tx_desc_count); if (ret) { netdev_alert(pdata->netdev, "error initializing Tx ring"); goto err_init_ring; } netif_dbg(pdata, drv, pdata->netdev, "%s - Rx ring:\n", channel->name); ret = xlgmac_init_ring(pdata, channel->rx_ring, pdata->rx_desc_count); if (ret) { netdev_alert(pdata->netdev, "error initializing Rx ring\n"); goto err_init_ring; } } return 0; err_init_ring: xlgmac_free_rings(pdata); return ret; } static void xlgmac_free_channels(struct xlgmac_pdata *pdata) { if (!pdata->channel_head) return; kfree(pdata->channel_head->tx_ring); pdata->channel_head->tx_ring = NULL; kfree(pdata->channel_head->rx_ring); pdata->channel_head->rx_ring = NULL; kfree(pdata->channel_head); pdata->channel_head = NULL; pdata->channel_count = 0; } static int xlgmac_alloc_channels(struct xlgmac_pdata *pdata) { struct xlgmac_channel *channel_head, *channel; struct xlgmac_ring *tx_ring, *rx_ring; int ret = -ENOMEM; unsigned int i; channel_head = kcalloc(pdata->channel_count, sizeof(struct xlgmac_channel), GFP_KERNEL); if (!channel_head) return ret; netif_dbg(pdata, drv, pdata->netdev, "channel_head=%p\n", channel_head); tx_ring = kcalloc(pdata->tx_ring_count, sizeof(struct xlgmac_ring), GFP_KERNEL); if (!tx_ring) goto err_tx_ring; rx_ring = kcalloc(pdata->rx_ring_count, sizeof(struct xlgmac_ring), GFP_KERNEL); if (!rx_ring) goto err_rx_ring; for (i = 0, channel = channel_head; i < pdata->channel_count; i++, channel++) { snprintf(channel->name, sizeof(channel->name), "channel-%u", i); channel->pdata = pdata; channel->queue_index = i; channel->dma_regs = pdata->mac_regs + DMA_CH_BASE + (DMA_CH_INC * i); if (pdata->per_channel_irq) { /* Get the per DMA interrupt */ ret = pdata->channel_irq[i]; if (ret < 0) { netdev_err(pdata->netdev, "get_irq %u failed\n", i + 1); goto err_irq; } channel->dma_irq = ret; } if (i < pdata->tx_ring_count) channel->tx_ring = tx_ring++; if (i < pdata->rx_ring_count) channel->rx_ring = rx_ring++; netif_dbg(pdata, drv, pdata->netdev, "%s: dma_regs=%p, tx_ring=%p, rx_ring=%p\n", channel->name, channel->dma_regs, channel->tx_ring, channel->rx_ring); } pdata->channel_head = channel_head; return 0; err_irq: kfree(rx_ring); err_rx_ring: kfree(tx_ring); err_tx_ring: kfree(channel_head); return ret; } static void xlgmac_free_channels_and_rings(struct xlgmac_pdata *pdata) { xlgmac_free_rings(pdata); xlgmac_free_channels(pdata); } static int xlgmac_alloc_channels_and_rings(struct xlgmac_pdata *pdata) { int ret; ret = xlgmac_alloc_channels(pdata); if (ret) goto err_alloc; ret = xlgmac_alloc_rings(pdata); if (ret) goto err_alloc; return 0; err_alloc: xlgmac_free_channels_and_rings(pdata); return ret; } static int xlgmac_alloc_pages(struct xlgmac_pdata *pdata, struct xlgmac_page_alloc *pa, gfp_t gfp, int order) { struct page *pages = NULL; dma_addr_t pages_dma; /* Try to obtain pages, decreasing order if necessary */ gfp |= __GFP_COMP | __GFP_NOWARN; while (order >= 0) { pages = alloc_pages(gfp, order); if (pages) break; order--; } if (!pages) return -ENOMEM; /* Map the pages */ pages_dma = dma_map_page(pdata->dev, pages, 0, PAGE_SIZE << order, DMA_FROM_DEVICE); if (dma_mapping_error(pdata->dev, pages_dma)) { put_page(pages); return -ENOMEM; } pa->pages = pages; pa->pages_len = PAGE_SIZE << order; pa->pages_offset = 0; pa->pages_dma = pages_dma; return 0; } static void xlgmac_set_buffer_data(struct xlgmac_buffer_data *bd, struct xlgmac_page_alloc *pa, unsigned int len) { get_page(pa->pages); bd->pa = *pa; bd->dma_base = pa->pages_dma; bd->dma_off = pa->pages_offset; bd->dma_len = len; pa->pages_offset += len; if ((pa->pages_offset + len) > pa->pages_len) { /* This data descriptor is responsible for unmapping page(s) */ bd->pa_unmap = *pa; /* Get a new allocation next time */ pa->pages = NULL; pa->pages_len = 0; pa->pages_offset = 0; pa->pages_dma = 0; } } static int xlgmac_map_rx_buffer(struct xlgmac_pdata *pdata, struct xlgmac_ring *ring, struct xlgmac_desc_data *desc_data) { int order, ret; if (!ring->rx_hdr_pa.pages) { ret = xlgmac_alloc_pages(pdata, &ring->rx_hdr_pa, GFP_ATOMIC, 0); if (ret) return ret; } if (!ring->rx_buf_pa.pages) { order = max_t(int, PAGE_ALLOC_COSTLY_ORDER - 1, 0); ret = xlgmac_alloc_pages(pdata, &ring->rx_buf_pa, GFP_ATOMIC, order); if (ret) return ret; } /* Set up the header page info */ xlgmac_set_buffer_data(&desc_data->rx.hdr, &ring->rx_hdr_pa, XLGMAC_SKB_ALLOC_SIZE); /* Set up the buffer page info */ xlgmac_set_buffer_data(&desc_data->rx.buf, &ring->rx_buf_pa, pdata->rx_buf_size); return 0; } static void xlgmac_tx_desc_init(struct xlgmac_pdata *pdata) { struct xlgmac_hw_ops *hw_ops = &pdata->hw_ops; struct xlgmac_desc_data *desc_data; struct xlgmac_dma_desc *dma_desc; struct xlgmac_channel *channel; struct xlgmac_ring *ring; dma_addr_t dma_desc_addr; unsigned int i, j; channel = pdata->channel_head; for (i = 0; i < pdata->channel_count; i++, channel++) { ring = channel->tx_ring; if (!ring) break; dma_desc = ring->dma_desc_head; dma_desc_addr = ring->dma_desc_head_addr; for (j = 0; j < ring->dma_desc_count; j++) { desc_data = XLGMAC_GET_DESC_DATA(ring, j); desc_data->dma_desc = dma_desc; desc_data->dma_desc_addr = dma_desc_addr; dma_desc++; dma_desc_addr += sizeof(struct xlgmac_dma_desc); } ring->cur = 0; ring->dirty = 0; memset(&ring->tx, 0, sizeof(ring->tx)); hw_ops->tx_desc_init(channel); } } static void xlgmac_rx_desc_init(struct xlgmac_pdata *pdata) { struct xlgmac_hw_ops *hw_ops = &pdata->hw_ops; struct xlgmac_desc_data *desc_data; struct xlgmac_dma_desc *dma_desc; struct xlgmac_channel *channel; struct xlgmac_ring *ring; dma_addr_t dma_desc_addr; unsigned int i, j; channel = pdata->channel_head; for (i = 0; i < pdata->channel_count; i++, channel++) { ring = channel->rx_ring; if (!ring) break; dma_desc = ring->dma_desc_head; dma_desc_addr = ring->dma_desc_head_addr; for (j = 0; j < ring->dma_desc_count; j++) { desc_data = XLGMAC_GET_DESC_DATA(ring, j); desc_data->dma_desc = dma_desc; desc_data->dma_desc_addr = dma_desc_addr; if (xlgmac_map_rx_buffer(pdata, ring, desc_data)) break; dma_desc++; dma_desc_addr += sizeof(struct xlgmac_dma_desc); } ring->cur = 0; ring->dirty = 0; hw_ops->rx_desc_init(channel); } } static int xlgmac_map_tx_skb(struct xlgmac_channel *channel, struct sk_buff *skb) { struct xlgmac_pdata *pdata = channel->pdata; struct xlgmac_ring *ring = channel->tx_ring; unsigned int start_index, cur_index; struct xlgmac_desc_data *desc_data; unsigned int offset, datalen, len; struct xlgmac_pkt_info *pkt_info; skb_frag_t *frag; unsigned int tso, vlan; dma_addr_t skb_dma; unsigned int i; offset = 0; start_index = ring->cur; cur_index = ring->cur; pkt_info = &ring->pkt_info; pkt_info->desc_count = 0; pkt_info->length = 0; tso = XLGMAC_GET_REG_BITS(pkt_info->attributes, TX_PACKET_ATTRIBUTES_TSO_ENABLE_POS, TX_PACKET_ATTRIBUTES_TSO_ENABLE_LEN); vlan = XLGMAC_GET_REG_BITS(pkt_info->attributes, TX_PACKET_ATTRIBUTES_VLAN_CTAG_POS, TX_PACKET_ATTRIBUTES_VLAN_CTAG_LEN); /* Save space for a context descriptor if needed */ if ((tso && (pkt_info->mss != ring->tx.cur_mss)) || (vlan && (pkt_info->vlan_ctag != ring->tx.cur_vlan_ctag))) cur_index++; desc_data = XLGMAC_GET_DESC_DATA(ring, cur_index); if (tso) { /* Map the TSO header */ skb_dma = dma_map_single(pdata->dev, skb->data, pkt_info->header_len, DMA_TO_DEVICE); if (dma_mapping_error(pdata->dev, skb_dma)) { netdev_alert(pdata->netdev, "dma_map_single failed\n"); goto err_out; } desc_data->skb_dma = skb_dma; desc_data->skb_dma_len = pkt_info->header_len; netif_dbg(pdata, tx_queued, pdata->netdev, "skb header: index=%u, dma=%pad, len=%u\n", cur_index, &skb_dma, pkt_info->header_len); offset = pkt_info->header_len; pkt_info->length += pkt_info->header_len; cur_index++; desc_data = XLGMAC_GET_DESC_DATA(ring, cur_index); } /* Map the (remainder of the) packet */ for (datalen = skb_headlen(skb) - offset; datalen; ) { len = min_t(unsigned int, datalen, XLGMAC_TX_MAX_BUF_SIZE); skb_dma = dma_map_single(pdata->dev, skb->data + offset, len, DMA_TO_DEVICE); if (dma_mapping_error(pdata->dev, skb_dma)) { netdev_alert(pdata->netdev, "dma_map_single failed\n"); goto err_out; } desc_data->skb_dma = skb_dma; desc_data->skb_dma_len = len; netif_dbg(pdata, tx_queued, pdata->netdev, "skb data: index=%u, dma=%pad, len=%u\n", cur_index, &skb_dma, len); datalen -= len; offset += len; pkt_info->length += len; cur_index++; desc_data = XLGMAC_GET_DESC_DATA(ring, cur_index); } for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { netif_dbg(pdata, tx_queued, pdata->netdev, "mapping frag %u\n", i); frag = &skb_shinfo(skb)->frags[i]; offset = 0; for (datalen = skb_frag_size(frag); datalen; ) { len = min_t(unsigned int, datalen, XLGMAC_TX_MAX_BUF_SIZE); skb_dma = skb_frag_dma_map(pdata->dev, frag, offset, len, DMA_TO_DEVICE); if (dma_mapping_error(pdata->dev, skb_dma)) { netdev_alert(pdata->netdev, "skb_frag_dma_map failed\n"); goto err_out; } desc_data->skb_dma = skb_dma; desc_data->skb_dma_len = len; desc_data->mapped_as_page = 1; netif_dbg(pdata, tx_queued, pdata->netdev, "skb frag: index=%u, dma=%pad, len=%u\n", cur_index, &skb_dma, len); datalen -= len; offset += len; pkt_info->length += len; cur_index++; desc_data = XLGMAC_GET_DESC_DATA(ring, cur_index); } } /* Save the skb address in the last entry. We always have some data * that has been mapped so desc_data is always advanced past the last * piece of mapped data - use the entry pointed to by cur_index - 1. */ desc_data = XLGMAC_GET_DESC_DATA(ring, cur_index - 1); desc_data->skb = skb; /* Save the number of descriptor entries used */ pkt_info->desc_count = cur_index - start_index; return pkt_info->desc_count; err_out: while (start_index < cur_index) { desc_data = XLGMAC_GET_DESC_DATA(ring, start_index++); xlgmac_unmap_desc_data(pdata, desc_data); } return 0; } void xlgmac_init_desc_ops(struct xlgmac_desc_ops *desc_ops) { desc_ops->alloc_channels_and_rings = xlgmac_alloc_channels_and_rings; desc_ops->free_channels_and_rings = xlgmac_free_channels_and_rings; desc_ops->map_tx_skb = xlgmac_map_tx_skb; desc_ops->map_rx_buffer = xlgmac_map_rx_buffer; desc_ops->unmap_desc_data = xlgmac_unmap_desc_data; desc_ops->tx_desc_init = xlgmac_tx_desc_init; desc_ops->rx_desc_init = xlgmac_rx_desc_init; }
linux-master
drivers/net/ethernet/synopsys/dwc-xlgmac-desc.c
/* Synopsys DesignWare Core Enterprise Ethernet (XLGMAC) Driver * * Copyright (c) 2017 Synopsys, Inc. (www.synopsys.com) * * This program is dual-licensed; you may select either version 2 of * the GNU General Public License ("GPL") or BSD license ("BSD"). * * This Synopsys DWC XLGMAC software driver and associated documentation * (hereinafter the "Software") is an unsupported proprietary work of * Synopsys, Inc. unless otherwise expressly agreed to in writing between * Synopsys and you. The Software IS NOT an item of Licensed Software or a * Licensed Product under any End User Software License Agreement or * Agreement for Licensed Products with Synopsys or any supplement thereto. * Synopsys is a registered trademark of Synopsys, Inc. Other names included * in the SOFTWARE may be the trademarks of their respective owners. */ #include <linux/ethtool.h> #include <linux/kernel.h> #include <linux/netdevice.h> #include "dwc-xlgmac.h" #include "dwc-xlgmac-reg.h" struct xlgmac_stats_desc { char stat_string[ETH_GSTRING_LEN]; int stat_offset; }; #define XLGMAC_STAT(str, var) \ { \ str, \ offsetof(struct xlgmac_pdata, stats.var), \ } static const struct xlgmac_stats_desc xlgmac_gstring_stats[] = { /* MMC TX counters */ XLGMAC_STAT("tx_bytes", txoctetcount_gb), XLGMAC_STAT("tx_bytes_good", txoctetcount_g), XLGMAC_STAT("tx_packets", txframecount_gb), XLGMAC_STAT("tx_packets_good", txframecount_g), XLGMAC_STAT("tx_unicast_packets", txunicastframes_gb), XLGMAC_STAT("tx_broadcast_packets", txbroadcastframes_gb), XLGMAC_STAT("tx_broadcast_packets_good", txbroadcastframes_g), XLGMAC_STAT("tx_multicast_packets", txmulticastframes_gb), XLGMAC_STAT("tx_multicast_packets_good", txmulticastframes_g), XLGMAC_STAT("tx_vlan_packets_good", txvlanframes_g), XLGMAC_STAT("tx_64_byte_packets", tx64octets_gb), XLGMAC_STAT("tx_65_to_127_byte_packets", tx65to127octets_gb), XLGMAC_STAT("tx_128_to_255_byte_packets", tx128to255octets_gb), XLGMAC_STAT("tx_256_to_511_byte_packets", tx256to511octets_gb), XLGMAC_STAT("tx_512_to_1023_byte_packets", tx512to1023octets_gb), XLGMAC_STAT("tx_1024_to_max_byte_packets", tx1024tomaxoctets_gb), XLGMAC_STAT("tx_underflow_errors", txunderflowerror), XLGMAC_STAT("tx_pause_frames", txpauseframes), /* MMC RX counters */ XLGMAC_STAT("rx_bytes", rxoctetcount_gb), XLGMAC_STAT("rx_bytes_good", rxoctetcount_g), XLGMAC_STAT("rx_packets", rxframecount_gb), XLGMAC_STAT("rx_unicast_packets_good", rxunicastframes_g), XLGMAC_STAT("rx_broadcast_packets_good", rxbroadcastframes_g), XLGMAC_STAT("rx_multicast_packets_good", rxmulticastframes_g), XLGMAC_STAT("rx_vlan_packets", rxvlanframes_gb), XLGMAC_STAT("rx_64_byte_packets", rx64octets_gb), XLGMAC_STAT("rx_65_to_127_byte_packets", rx65to127octets_gb), XLGMAC_STAT("rx_128_to_255_byte_packets", rx128to255octets_gb), XLGMAC_STAT("rx_256_to_511_byte_packets", rx256to511octets_gb), XLGMAC_STAT("rx_512_to_1023_byte_packets", rx512to1023octets_gb), XLGMAC_STAT("rx_1024_to_max_byte_packets", rx1024tomaxoctets_gb), XLGMAC_STAT("rx_undersize_packets_good", rxundersize_g), XLGMAC_STAT("rx_oversize_packets_good", rxoversize_g), XLGMAC_STAT("rx_crc_errors", rxcrcerror), XLGMAC_STAT("rx_crc_errors_small_packets", rxrunterror), XLGMAC_STAT("rx_crc_errors_giant_packets", rxjabbererror), XLGMAC_STAT("rx_length_errors", rxlengtherror), XLGMAC_STAT("rx_out_of_range_errors", rxoutofrangetype), XLGMAC_STAT("rx_fifo_overflow_errors", rxfifooverflow), XLGMAC_STAT("rx_watchdog_errors", rxwatchdogerror), XLGMAC_STAT("rx_pause_frames", rxpauseframes), /* Extra counters */ XLGMAC_STAT("tx_tso_packets", tx_tso_packets), XLGMAC_STAT("rx_split_header_packets", rx_split_header_packets), XLGMAC_STAT("tx_process_stopped", tx_process_stopped), XLGMAC_STAT("rx_process_stopped", rx_process_stopped), XLGMAC_STAT("tx_buffer_unavailable", tx_buffer_unavailable), XLGMAC_STAT("rx_buffer_unavailable", rx_buffer_unavailable), XLGMAC_STAT("fatal_bus_error", fatal_bus_error), XLGMAC_STAT("tx_vlan_packets", tx_vlan_packets), XLGMAC_STAT("rx_vlan_packets", rx_vlan_packets), XLGMAC_STAT("napi_poll_isr", napi_poll_isr), XLGMAC_STAT("napi_poll_txtimer", napi_poll_txtimer), }; #define XLGMAC_STATS_COUNT ARRAY_SIZE(xlgmac_gstring_stats) static void xlgmac_ethtool_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo) { struct xlgmac_pdata *pdata = netdev_priv(netdev); u32 ver = pdata->hw_feat.version; u32 snpsver, devid, userver; strscpy(drvinfo->driver, pdata->drv_name, sizeof(drvinfo->driver)); strscpy(drvinfo->version, pdata->drv_ver, sizeof(drvinfo->version)); strscpy(drvinfo->bus_info, dev_name(pdata->dev), sizeof(drvinfo->bus_info)); /* S|SNPSVER: Synopsys-defined Version * D|DEVID: Indicates the Device family * U|USERVER: User-defined Version */ snpsver = XLGMAC_GET_REG_BITS(ver, MAC_VR_SNPSVER_POS, MAC_VR_SNPSVER_LEN); devid = XLGMAC_GET_REG_BITS(ver, MAC_VR_DEVID_POS, MAC_VR_DEVID_LEN); userver = XLGMAC_GET_REG_BITS(ver, MAC_VR_USERVER_POS, MAC_VR_USERVER_LEN); snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version), "S.D.U: %x.%x.%x", snpsver, devid, userver); } static u32 xlgmac_ethtool_get_msglevel(struct net_device *netdev) { struct xlgmac_pdata *pdata = netdev_priv(netdev); return pdata->msg_enable; } static void xlgmac_ethtool_set_msglevel(struct net_device *netdev, u32 msglevel) { struct xlgmac_pdata *pdata = netdev_priv(netdev); pdata->msg_enable = msglevel; } static void xlgmac_ethtool_get_channels(struct net_device *netdev, struct ethtool_channels *channel) { struct xlgmac_pdata *pdata = netdev_priv(netdev); channel->max_rx = XLGMAC_MAX_DMA_CHANNELS; channel->max_tx = XLGMAC_MAX_DMA_CHANNELS; channel->rx_count = pdata->rx_q_count; channel->tx_count = pdata->tx_q_count; } static int xlgmac_ethtool_get_coalesce(struct net_device *netdev, struct ethtool_coalesce *ec, struct kernel_ethtool_coalesce *kernel_coal, struct netlink_ext_ack *extack) { struct xlgmac_pdata *pdata = netdev_priv(netdev); ec->rx_coalesce_usecs = pdata->rx_usecs; ec->rx_max_coalesced_frames = pdata->rx_frames; ec->tx_max_coalesced_frames = pdata->tx_frames; return 0; } static int xlgmac_ethtool_set_coalesce(struct net_device *netdev, struct ethtool_coalesce *ec, struct kernel_ethtool_coalesce *kernel_coal, struct netlink_ext_ack *extack) { struct xlgmac_pdata *pdata = netdev_priv(netdev); struct xlgmac_hw_ops *hw_ops = &pdata->hw_ops; unsigned int rx_frames, rx_riwt, rx_usecs; unsigned int tx_frames; rx_usecs = ec->rx_coalesce_usecs; rx_riwt = hw_ops->usec_to_riwt(pdata, rx_usecs); rx_frames = ec->rx_max_coalesced_frames; tx_frames = ec->tx_max_coalesced_frames; if ((rx_riwt > XLGMAC_MAX_DMA_RIWT) || (rx_riwt < XLGMAC_MIN_DMA_RIWT) || (rx_frames > pdata->rx_desc_count)) return -EINVAL; if (tx_frames > pdata->tx_desc_count) return -EINVAL; pdata->rx_riwt = rx_riwt; pdata->rx_usecs = rx_usecs; pdata->rx_frames = rx_frames; hw_ops->config_rx_coalesce(pdata); pdata->tx_frames = tx_frames; hw_ops->config_tx_coalesce(pdata); return 0; } static void xlgmac_ethtool_get_strings(struct net_device *netdev, u32 stringset, u8 *data) { int i; switch (stringset) { case ETH_SS_STATS: for (i = 0; i < XLGMAC_STATS_COUNT; i++) { memcpy(data, xlgmac_gstring_stats[i].stat_string, ETH_GSTRING_LEN); data += ETH_GSTRING_LEN; } break; default: WARN_ON(1); break; } } static int xlgmac_ethtool_get_sset_count(struct net_device *netdev, int stringset) { int ret; switch (stringset) { case ETH_SS_STATS: ret = XLGMAC_STATS_COUNT; break; default: ret = -EOPNOTSUPP; } return ret; } static void xlgmac_ethtool_get_ethtool_stats(struct net_device *netdev, struct ethtool_stats *stats, u64 *data) { struct xlgmac_pdata *pdata = netdev_priv(netdev); u8 *stat; int i; pdata->hw_ops.read_mmc_stats(pdata); for (i = 0; i < XLGMAC_STATS_COUNT; i++) { stat = (u8 *)pdata + xlgmac_gstring_stats[i].stat_offset; *data++ = *(u64 *)stat; } } static const struct ethtool_ops xlgmac_ethtool_ops = { .supported_coalesce_params = ETHTOOL_COALESCE_RX_USECS | ETHTOOL_COALESCE_MAX_FRAMES, .get_drvinfo = xlgmac_ethtool_get_drvinfo, .get_link = ethtool_op_get_link, .get_msglevel = xlgmac_ethtool_get_msglevel, .set_msglevel = xlgmac_ethtool_set_msglevel, .get_channels = xlgmac_ethtool_get_channels, .get_coalesce = xlgmac_ethtool_get_coalesce, .set_coalesce = xlgmac_ethtool_set_coalesce, .get_strings = xlgmac_ethtool_get_strings, .get_sset_count = xlgmac_ethtool_get_sset_count, .get_ethtool_stats = xlgmac_ethtool_get_ethtool_stats, }; const struct ethtool_ops *xlgmac_get_ethtool_ops(void) { return &xlgmac_ethtool_ops; }
linux-master
drivers/net/ethernet/synopsys/dwc-xlgmac-ethtool.c
/* Synopsys DesignWare Core Enterprise Ethernet (XLGMAC) Driver * * Copyright (c) 2017 Synopsys, Inc. (www.synopsys.com) * * This program is dual-licensed; you may select either version 2 of * the GNU General Public License ("GPL") or BSD license ("BSD"). * * This Synopsys DWC XLGMAC software driver and associated documentation * (hereinafter the "Software") is an unsupported proprietary work of * Synopsys, Inc. unless otherwise expressly agreed to in writing between * Synopsys and you. The Software IS NOT an item of Licensed Software or a * Licensed Product under any End User Software License Agreement or * Agreement for Licensed Products with Synopsys or any supplement thereto. * Synopsys is a registered trademark of Synopsys, Inc. Other names included * in the SOFTWARE may be the trademarks of their respective owners. */ #include <linux/kernel.h> #include <linux/module.h> #include "dwc-xlgmac.h" #include "dwc-xlgmac-reg.h" MODULE_LICENSE("Dual BSD/GPL"); static int debug = -1; module_param(debug, int, 0644); MODULE_PARM_DESC(debug, "DWC ethernet debug level (0=none,...,16=all)"); static const u32 default_msg_level = (NETIF_MSG_LINK | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP); static unsigned char dev_addr[6] = {0, 0x55, 0x7b, 0xb5, 0x7d, 0xf7}; static void xlgmac_read_mac_addr(struct xlgmac_pdata *pdata) { struct net_device *netdev = pdata->netdev; /* Currently it uses a static mac address for test */ memcpy(pdata->mac_addr, dev_addr, netdev->addr_len); } static void xlgmac_default_config(struct xlgmac_pdata *pdata) { pdata->tx_osp_mode = DMA_OSP_ENABLE; pdata->tx_sf_mode = MTL_TSF_ENABLE; pdata->rx_sf_mode = MTL_RSF_DISABLE; pdata->pblx8 = DMA_PBL_X8_ENABLE; pdata->tx_pbl = DMA_PBL_32; pdata->rx_pbl = DMA_PBL_32; pdata->tx_threshold = MTL_TX_THRESHOLD_128; pdata->rx_threshold = MTL_RX_THRESHOLD_128; pdata->tx_pause = 1; pdata->rx_pause = 1; pdata->phy_speed = SPEED_25000; pdata->sysclk_rate = XLGMAC_SYSCLOCK; strscpy(pdata->drv_name, XLGMAC_DRV_NAME, sizeof(pdata->drv_name)); strscpy(pdata->drv_ver, XLGMAC_DRV_VERSION, sizeof(pdata->drv_ver)); } static void xlgmac_init_all_ops(struct xlgmac_pdata *pdata) { xlgmac_init_desc_ops(&pdata->desc_ops); xlgmac_init_hw_ops(&pdata->hw_ops); } static int xlgmac_init(struct xlgmac_pdata *pdata) { struct xlgmac_hw_ops *hw_ops = &pdata->hw_ops; struct net_device *netdev = pdata->netdev; unsigned int i; int ret; /* Set default configuration data */ xlgmac_default_config(pdata); /* Set irq, base_addr, MAC address, */ netdev->irq = pdata->dev_irq; netdev->base_addr = (unsigned long)pdata->mac_regs; xlgmac_read_mac_addr(pdata); eth_hw_addr_set(netdev, pdata->mac_addr); /* Set all the function pointers */ xlgmac_init_all_ops(pdata); /* Issue software reset to device */ hw_ops->exit(pdata); /* Populate the hardware features */ xlgmac_get_all_hw_features(pdata); xlgmac_print_all_hw_features(pdata); /* TODO: Set the PHY mode to XLGMII */ /* Set the DMA mask */ ret = dma_set_mask_and_coherent(pdata->dev, DMA_BIT_MASK(pdata->hw_feat.dma_width)); if (ret) { dev_err(pdata->dev, "dma_set_mask_and_coherent failed\n"); return ret; } /* Channel and ring params initializtion * pdata->channel_count; * pdata->tx_ring_count; * pdata->rx_ring_count; * pdata->tx_desc_count; * pdata->rx_desc_count; */ BUILD_BUG_ON_NOT_POWER_OF_2(XLGMAC_TX_DESC_CNT); pdata->tx_desc_count = XLGMAC_TX_DESC_CNT; if (pdata->tx_desc_count & (pdata->tx_desc_count - 1)) { dev_err(pdata->dev, "tx descriptor count (%d) is not valid\n", pdata->tx_desc_count); ret = -EINVAL; return ret; } BUILD_BUG_ON_NOT_POWER_OF_2(XLGMAC_RX_DESC_CNT); pdata->rx_desc_count = XLGMAC_RX_DESC_CNT; if (pdata->rx_desc_count & (pdata->rx_desc_count - 1)) { dev_err(pdata->dev, "rx descriptor count (%d) is not valid\n", pdata->rx_desc_count); ret = -EINVAL; return ret; } pdata->tx_ring_count = min_t(unsigned int, num_online_cpus(), pdata->hw_feat.tx_ch_cnt); pdata->tx_ring_count = min_t(unsigned int, pdata->tx_ring_count, pdata->hw_feat.tx_q_cnt); pdata->tx_q_count = pdata->tx_ring_count; ret = netif_set_real_num_tx_queues(netdev, pdata->tx_q_count); if (ret) { dev_err(pdata->dev, "error setting real tx queue count\n"); return ret; } pdata->rx_ring_count = min_t(unsigned int, netif_get_num_default_rss_queues(), pdata->hw_feat.rx_ch_cnt); pdata->rx_ring_count = min_t(unsigned int, pdata->rx_ring_count, pdata->hw_feat.rx_q_cnt); pdata->rx_q_count = pdata->rx_ring_count; ret = netif_set_real_num_rx_queues(netdev, pdata->rx_q_count); if (ret) { dev_err(pdata->dev, "error setting real rx queue count\n"); return ret; } pdata->channel_count = max_t(unsigned int, pdata->tx_ring_count, pdata->rx_ring_count); /* Initialize RSS hash key and lookup table */ netdev_rss_key_fill(pdata->rss_key, sizeof(pdata->rss_key)); for (i = 0; i < XLGMAC_RSS_MAX_TABLE_SIZE; i++) pdata->rss_table[i] = XLGMAC_SET_REG_BITS( pdata->rss_table[i], MAC_RSSDR_DMCH_POS, MAC_RSSDR_DMCH_LEN, i % pdata->rx_ring_count); pdata->rss_options = XLGMAC_SET_REG_BITS( pdata->rss_options, MAC_RSSCR_IP2TE_POS, MAC_RSSCR_IP2TE_LEN, 1); pdata->rss_options = XLGMAC_SET_REG_BITS( pdata->rss_options, MAC_RSSCR_TCP4TE_POS, MAC_RSSCR_TCP4TE_LEN, 1); pdata->rss_options = XLGMAC_SET_REG_BITS( pdata->rss_options, MAC_RSSCR_UDP4TE_POS, MAC_RSSCR_UDP4TE_LEN, 1); /* Set device operations */ netdev->netdev_ops = xlgmac_get_netdev_ops(); netdev->ethtool_ops = xlgmac_get_ethtool_ops(); /* Set device features */ if (pdata->hw_feat.tso) { netdev->hw_features = NETIF_F_TSO; netdev->hw_features |= NETIF_F_TSO6; netdev->hw_features |= NETIF_F_SG; netdev->hw_features |= NETIF_F_IP_CSUM; netdev->hw_features |= NETIF_F_IPV6_CSUM; } else if (pdata->hw_feat.tx_coe) { netdev->hw_features = NETIF_F_IP_CSUM; netdev->hw_features |= NETIF_F_IPV6_CSUM; } if (pdata->hw_feat.rx_coe) { netdev->hw_features |= NETIF_F_RXCSUM; netdev->hw_features |= NETIF_F_GRO; } if (pdata->hw_feat.rss) netdev->hw_features |= NETIF_F_RXHASH; netdev->vlan_features |= netdev->hw_features; netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX; if (pdata->hw_feat.sa_vlan_ins) netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX; if (pdata->hw_feat.vlhash) netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER; netdev->features |= netdev->hw_features; pdata->netdev_features = netdev->features; netdev->priv_flags |= IFF_UNICAST_FLT; /* Use default watchdog timeout */ netdev->watchdog_timeo = 0; /* Tx coalesce parameters initialization */ pdata->tx_usecs = XLGMAC_INIT_DMA_TX_USECS; pdata->tx_frames = XLGMAC_INIT_DMA_TX_FRAMES; /* Rx coalesce parameters initialization */ pdata->rx_riwt = hw_ops->usec_to_riwt(pdata, XLGMAC_INIT_DMA_RX_USECS); pdata->rx_usecs = XLGMAC_INIT_DMA_RX_USECS; pdata->rx_frames = XLGMAC_INIT_DMA_RX_FRAMES; return 0; } int xlgmac_drv_probe(struct device *dev, struct xlgmac_resources *res) { struct xlgmac_pdata *pdata; struct net_device *netdev; int ret; netdev = alloc_etherdev_mq(sizeof(struct xlgmac_pdata), XLGMAC_MAX_DMA_CHANNELS); if (!netdev) { dev_err(dev, "alloc_etherdev failed\n"); return -ENOMEM; } SET_NETDEV_DEV(netdev, dev); dev_set_drvdata(dev, netdev); pdata = netdev_priv(netdev); pdata->dev = dev; pdata->netdev = netdev; pdata->dev_irq = res->irq; pdata->mac_regs = res->addr; mutex_init(&pdata->rss_mutex); pdata->msg_enable = netif_msg_init(debug, default_msg_level); ret = xlgmac_init(pdata); if (ret) { dev_err(dev, "xlgmac init failed\n"); goto err_free_netdev; } ret = register_netdev(netdev); if (ret) { dev_err(dev, "net device registration failed\n"); goto err_free_netdev; } return 0; err_free_netdev: free_netdev(netdev); return ret; } int xlgmac_drv_remove(struct device *dev) { struct net_device *netdev = dev_get_drvdata(dev); unregister_netdev(netdev); free_netdev(netdev); return 0; } void xlgmac_dump_tx_desc(struct xlgmac_pdata *pdata, struct xlgmac_ring *ring, unsigned int idx, unsigned int count, unsigned int flag) { struct xlgmac_desc_data *desc_data; struct xlgmac_dma_desc *dma_desc; while (count--) { desc_data = XLGMAC_GET_DESC_DATA(ring, idx); dma_desc = desc_data->dma_desc; netdev_dbg(pdata->netdev, "TX: dma_desc=%p, dma_desc_addr=%pad\n", desc_data->dma_desc, &desc_data->dma_desc_addr); netdev_dbg(pdata->netdev, "TX_NORMAL_DESC[%d %s] = %08x:%08x:%08x:%08x\n", idx, (flag == 1) ? "QUEUED FOR TX" : "TX BY DEVICE", le32_to_cpu(dma_desc->desc0), le32_to_cpu(dma_desc->desc1), le32_to_cpu(dma_desc->desc2), le32_to_cpu(dma_desc->desc3)); idx++; } } void xlgmac_dump_rx_desc(struct xlgmac_pdata *pdata, struct xlgmac_ring *ring, unsigned int idx) { struct xlgmac_desc_data *desc_data; struct xlgmac_dma_desc *dma_desc; desc_data = XLGMAC_GET_DESC_DATA(ring, idx); dma_desc = desc_data->dma_desc; netdev_dbg(pdata->netdev, "RX: dma_desc=%p, dma_desc_addr=%pad\n", desc_data->dma_desc, &desc_data->dma_desc_addr); netdev_dbg(pdata->netdev, "RX_NORMAL_DESC[%d RX BY DEVICE] = %08x:%08x:%08x:%08x\n", idx, le32_to_cpu(dma_desc->desc0), le32_to_cpu(dma_desc->desc1), le32_to_cpu(dma_desc->desc2), le32_to_cpu(dma_desc->desc3)); } void xlgmac_print_pkt(struct net_device *netdev, struct sk_buff *skb, bool tx_rx) { struct ethhdr *eth = (struct ethhdr *)skb->data; unsigned char buffer[128]; unsigned int i; netdev_dbg(netdev, "\n************** SKB dump ****************\n"); netdev_dbg(netdev, "%s packet of %d bytes\n", (tx_rx ? "TX" : "RX"), skb->len); netdev_dbg(netdev, "Dst MAC addr: %pM\n", eth->h_dest); netdev_dbg(netdev, "Src MAC addr: %pM\n", eth->h_source); netdev_dbg(netdev, "Protocol: %#06hx\n", ntohs(eth->h_proto)); for (i = 0; i < skb->len; i += 32) { unsigned int len = min(skb->len - i, 32U); hex_dump_to_buffer(&skb->data[i], len, 32, 1, buffer, sizeof(buffer), false); netdev_dbg(netdev, " %#06x: %s\n", i, buffer); } netdev_dbg(netdev, "\n************** SKB dump ****************\n"); } void xlgmac_get_all_hw_features(struct xlgmac_pdata *pdata) { struct xlgmac_hw_features *hw_feat = &pdata->hw_feat; unsigned int mac_hfr0, mac_hfr1, mac_hfr2; mac_hfr0 = readl(pdata->mac_regs + MAC_HWF0R); mac_hfr1 = readl(pdata->mac_regs + MAC_HWF1R); mac_hfr2 = readl(pdata->mac_regs + MAC_HWF2R); memset(hw_feat, 0, sizeof(*hw_feat)); hw_feat->version = readl(pdata->mac_regs + MAC_VR); /* Hardware feature register 0 */ hw_feat->phyifsel = XLGMAC_GET_REG_BITS(mac_hfr0, MAC_HWF0R_PHYIFSEL_POS, MAC_HWF0R_PHYIFSEL_LEN); hw_feat->vlhash = XLGMAC_GET_REG_BITS(mac_hfr0, MAC_HWF0R_VLHASH_POS, MAC_HWF0R_VLHASH_LEN); hw_feat->sma = XLGMAC_GET_REG_BITS(mac_hfr0, MAC_HWF0R_SMASEL_POS, MAC_HWF0R_SMASEL_LEN); hw_feat->rwk = XLGMAC_GET_REG_BITS(mac_hfr0, MAC_HWF0R_RWKSEL_POS, MAC_HWF0R_RWKSEL_LEN); hw_feat->mgk = XLGMAC_GET_REG_BITS(mac_hfr0, MAC_HWF0R_MGKSEL_POS, MAC_HWF0R_MGKSEL_LEN); hw_feat->mmc = XLGMAC_GET_REG_BITS(mac_hfr0, MAC_HWF0R_MMCSEL_POS, MAC_HWF0R_MMCSEL_LEN); hw_feat->aoe = XLGMAC_GET_REG_BITS(mac_hfr0, MAC_HWF0R_ARPOFFSEL_POS, MAC_HWF0R_ARPOFFSEL_LEN); hw_feat->ts = XLGMAC_GET_REG_BITS(mac_hfr0, MAC_HWF0R_TSSEL_POS, MAC_HWF0R_TSSEL_LEN); hw_feat->eee = XLGMAC_GET_REG_BITS(mac_hfr0, MAC_HWF0R_EEESEL_POS, MAC_HWF0R_EEESEL_LEN); hw_feat->tx_coe = XLGMAC_GET_REG_BITS(mac_hfr0, MAC_HWF0R_TXCOESEL_POS, MAC_HWF0R_TXCOESEL_LEN); hw_feat->rx_coe = XLGMAC_GET_REG_BITS(mac_hfr0, MAC_HWF0R_RXCOESEL_POS, MAC_HWF0R_RXCOESEL_LEN); hw_feat->addn_mac = XLGMAC_GET_REG_BITS(mac_hfr0, MAC_HWF0R_ADDMACADRSEL_POS, MAC_HWF0R_ADDMACADRSEL_LEN); hw_feat->ts_src = XLGMAC_GET_REG_BITS(mac_hfr0, MAC_HWF0R_TSSTSSEL_POS, MAC_HWF0R_TSSTSSEL_LEN); hw_feat->sa_vlan_ins = XLGMAC_GET_REG_BITS(mac_hfr0, MAC_HWF0R_SAVLANINS_POS, MAC_HWF0R_SAVLANINS_LEN); /* Hardware feature register 1 */ hw_feat->rx_fifo_size = XLGMAC_GET_REG_BITS(mac_hfr1, MAC_HWF1R_RXFIFOSIZE_POS, MAC_HWF1R_RXFIFOSIZE_LEN); hw_feat->tx_fifo_size = XLGMAC_GET_REG_BITS(mac_hfr1, MAC_HWF1R_TXFIFOSIZE_POS, MAC_HWF1R_TXFIFOSIZE_LEN); hw_feat->adv_ts_hi = XLGMAC_GET_REG_BITS(mac_hfr1, MAC_HWF1R_ADVTHWORD_POS, MAC_HWF1R_ADVTHWORD_LEN); hw_feat->dma_width = XLGMAC_GET_REG_BITS(mac_hfr1, MAC_HWF1R_ADDR64_POS, MAC_HWF1R_ADDR64_LEN); hw_feat->dcb = XLGMAC_GET_REG_BITS(mac_hfr1, MAC_HWF1R_DCBEN_POS, MAC_HWF1R_DCBEN_LEN); hw_feat->sph = XLGMAC_GET_REG_BITS(mac_hfr1, MAC_HWF1R_SPHEN_POS, MAC_HWF1R_SPHEN_LEN); hw_feat->tso = XLGMAC_GET_REG_BITS(mac_hfr1, MAC_HWF1R_TSOEN_POS, MAC_HWF1R_TSOEN_LEN); hw_feat->dma_debug = XLGMAC_GET_REG_BITS(mac_hfr1, MAC_HWF1R_DBGMEMA_POS, MAC_HWF1R_DBGMEMA_LEN); hw_feat->rss = XLGMAC_GET_REG_BITS(mac_hfr1, MAC_HWF1R_RSSEN_POS, MAC_HWF1R_RSSEN_LEN); hw_feat->tc_cnt = XLGMAC_GET_REG_BITS(mac_hfr1, MAC_HWF1R_NUMTC_POS, MAC_HWF1R_NUMTC_LEN); hw_feat->hash_table_size = XLGMAC_GET_REG_BITS(mac_hfr1, MAC_HWF1R_HASHTBLSZ_POS, MAC_HWF1R_HASHTBLSZ_LEN); hw_feat->l3l4_filter_num = XLGMAC_GET_REG_BITS(mac_hfr1, MAC_HWF1R_L3L4FNUM_POS, MAC_HWF1R_L3L4FNUM_LEN); /* Hardware feature register 2 */ hw_feat->rx_q_cnt = XLGMAC_GET_REG_BITS(mac_hfr2, MAC_HWF2R_RXQCNT_POS, MAC_HWF2R_RXQCNT_LEN); hw_feat->tx_q_cnt = XLGMAC_GET_REG_BITS(mac_hfr2, MAC_HWF2R_TXQCNT_POS, MAC_HWF2R_TXQCNT_LEN); hw_feat->rx_ch_cnt = XLGMAC_GET_REG_BITS(mac_hfr2, MAC_HWF2R_RXCHCNT_POS, MAC_HWF2R_RXCHCNT_LEN); hw_feat->tx_ch_cnt = XLGMAC_GET_REG_BITS(mac_hfr2, MAC_HWF2R_TXCHCNT_POS, MAC_HWF2R_TXCHCNT_LEN); hw_feat->pps_out_num = XLGMAC_GET_REG_BITS(mac_hfr2, MAC_HWF2R_PPSOUTNUM_POS, MAC_HWF2R_PPSOUTNUM_LEN); hw_feat->aux_snap_num = XLGMAC_GET_REG_BITS(mac_hfr2, MAC_HWF2R_AUXSNAPNUM_POS, MAC_HWF2R_AUXSNAPNUM_LEN); /* Translate the Hash Table size into actual number */ switch (hw_feat->hash_table_size) { case 0: break; case 1: hw_feat->hash_table_size = 64; break; case 2: hw_feat->hash_table_size = 128; break; case 3: hw_feat->hash_table_size = 256; break; } /* Translate the address width setting into actual number */ switch (hw_feat->dma_width) { case 0: hw_feat->dma_width = 32; break; case 1: hw_feat->dma_width = 40; break; case 2: hw_feat->dma_width = 48; break; default: hw_feat->dma_width = 32; } /* The Queue, Channel and TC counts are zero based so increment them * to get the actual number */ hw_feat->rx_q_cnt++; hw_feat->tx_q_cnt++; hw_feat->rx_ch_cnt++; hw_feat->tx_ch_cnt++; hw_feat->tc_cnt++; } void xlgmac_print_all_hw_features(struct xlgmac_pdata *pdata) { char __maybe_unused *str = NULL; XLGMAC_PR("\n"); XLGMAC_PR("=====================================================\n"); XLGMAC_PR("\n"); XLGMAC_PR("HW support following features\n"); XLGMAC_PR("\n"); /* HW Feature Register0 */ XLGMAC_PR("VLAN Hash Filter Selected : %s\n", pdata->hw_feat.vlhash ? "YES" : "NO"); XLGMAC_PR("SMA (MDIO) Interface : %s\n", pdata->hw_feat.sma ? "YES" : "NO"); XLGMAC_PR("PMT Remote Wake-up Packet Enable : %s\n", pdata->hw_feat.rwk ? "YES" : "NO"); XLGMAC_PR("PMT Magic Packet Enable : %s\n", pdata->hw_feat.mgk ? "YES" : "NO"); XLGMAC_PR("RMON/MMC Module Enable : %s\n", pdata->hw_feat.mmc ? "YES" : "NO"); XLGMAC_PR("ARP Offload Enabled : %s\n", pdata->hw_feat.aoe ? "YES" : "NO"); XLGMAC_PR("IEEE 1588-2008 Timestamp Enabled : %s\n", pdata->hw_feat.ts ? "YES" : "NO"); XLGMAC_PR("Energy Efficient Ethernet Enabled : %s\n", pdata->hw_feat.eee ? "YES" : "NO"); XLGMAC_PR("Transmit Checksum Offload Enabled : %s\n", pdata->hw_feat.tx_coe ? "YES" : "NO"); XLGMAC_PR("Receive Checksum Offload Enabled : %s\n", pdata->hw_feat.rx_coe ? "YES" : "NO"); XLGMAC_PR("Additional MAC Addresses 1-31 Selected : %s\n", pdata->hw_feat.addn_mac ? "YES" : "NO"); switch (pdata->hw_feat.ts_src) { case 0: str = "RESERVED"; break; case 1: str = "INTERNAL"; break; case 2: str = "EXTERNAL"; break; case 3: str = "BOTH"; break; } XLGMAC_PR("Timestamp System Time Source : %s\n", str); XLGMAC_PR("Source Address or VLAN Insertion Enable : %s\n", pdata->hw_feat.sa_vlan_ins ? "YES" : "NO"); /* HW Feature Register1 */ switch (pdata->hw_feat.rx_fifo_size) { case 0: str = "128 bytes"; break; case 1: str = "256 bytes"; break; case 2: str = "512 bytes"; break; case 3: str = "1 KBytes"; break; case 4: str = "2 KBytes"; break; case 5: str = "4 KBytes"; break; case 6: str = "8 KBytes"; break; case 7: str = "16 KBytes"; break; case 8: str = "32 kBytes"; break; case 9: str = "64 KBytes"; break; case 10: str = "128 KBytes"; break; case 11: str = "256 KBytes"; break; default: str = "RESERVED"; } XLGMAC_PR("MTL Receive FIFO Size : %s\n", str); switch (pdata->hw_feat.tx_fifo_size) { case 0: str = "128 bytes"; break; case 1: str = "256 bytes"; break; case 2: str = "512 bytes"; break; case 3: str = "1 KBytes"; break; case 4: str = "2 KBytes"; break; case 5: str = "4 KBytes"; break; case 6: str = "8 KBytes"; break; case 7: str = "16 KBytes"; break; case 8: str = "32 kBytes"; break; case 9: str = "64 KBytes"; break; case 10: str = "128 KBytes"; break; case 11: str = "256 KBytes"; break; default: str = "RESERVED"; } XLGMAC_PR("MTL Transmit FIFO Size : %s\n", str); XLGMAC_PR("IEEE 1588 High Word Register Enable : %s\n", pdata->hw_feat.adv_ts_hi ? "YES" : "NO"); XLGMAC_PR("Address width : %u\n", pdata->hw_feat.dma_width); XLGMAC_PR("DCB Feature Enable : %s\n", pdata->hw_feat.dcb ? "YES" : "NO"); XLGMAC_PR("Split Header Feature Enable : %s\n", pdata->hw_feat.sph ? "YES" : "NO"); XLGMAC_PR("TCP Segmentation Offload Enable : %s\n", pdata->hw_feat.tso ? "YES" : "NO"); XLGMAC_PR("DMA Debug Registers Enabled : %s\n", pdata->hw_feat.dma_debug ? "YES" : "NO"); XLGMAC_PR("RSS Feature Enabled : %s\n", pdata->hw_feat.rss ? "YES" : "NO"); XLGMAC_PR("Number of Traffic classes : %u\n", (pdata->hw_feat.tc_cnt)); XLGMAC_PR("Hash Table Size : %u\n", pdata->hw_feat.hash_table_size); XLGMAC_PR("Total number of L3 or L4 Filters : %u\n", pdata->hw_feat.l3l4_filter_num); /* HW Feature Register2 */ XLGMAC_PR("Number of MTL Receive Queues : %u\n", pdata->hw_feat.rx_q_cnt); XLGMAC_PR("Number of MTL Transmit Queues : %u\n", pdata->hw_feat.tx_q_cnt); XLGMAC_PR("Number of DMA Receive Channels : %u\n", pdata->hw_feat.rx_ch_cnt); XLGMAC_PR("Number of DMA Transmit Channels : %u\n", pdata->hw_feat.tx_ch_cnt); switch (pdata->hw_feat.pps_out_num) { case 0: str = "No PPS output"; break; case 1: str = "1 PPS output"; break; case 2: str = "2 PPS output"; break; case 3: str = "3 PPS output"; break; case 4: str = "4 PPS output"; break; default: str = "RESERVED"; } XLGMAC_PR("Number of PPS Outputs : %s\n", str); switch (pdata->hw_feat.aux_snap_num) { case 0: str = "No auxiliary input"; break; case 1: str = "1 auxiliary input"; break; case 2: str = "2 auxiliary input"; break; case 3: str = "3 auxiliary input"; break; case 4: str = "4 auxiliary input"; break; default: str = "RESERVED"; } XLGMAC_PR("Number of Auxiliary Snapshot Inputs : %s", str); XLGMAC_PR("\n"); XLGMAC_PR("=====================================================\n"); XLGMAC_PR("\n"); }
linux-master
drivers/net/ethernet/synopsys/dwc-xlgmac-common.c
/* Synopsys DesignWare Core Enterprise Ethernet (XLGMAC) Driver * * Copyright (c) 2017 Synopsys, Inc. (www.synopsys.com) * * This program is dual-licensed; you may select either version 2 of * the GNU General Public License ("GPL") or BSD license ("BSD"). * * This Synopsys DWC XLGMAC software driver and associated documentation * (hereinafter the "Software") is an unsupported proprietary work of * Synopsys, Inc. unless otherwise expressly agreed to in writing between * Synopsys and you. The Software IS NOT an item of Licensed Software or a * Licensed Product under any End User Software License Agreement or * Agreement for Licensed Products with Synopsys or any supplement thereto. * Synopsys is a registered trademark of Synopsys, Inc. Other names included * in the SOFTWARE may be the trademarks of their respective owners. */ #include <linux/phy.h> #include <linux/mdio.h> #include <linux/clk.h> #include <linux/bitrev.h> #include <linux/crc32.h> #include <linux/crc32poly.h> #include <linux/dcbnl.h> #include "dwc-xlgmac.h" #include "dwc-xlgmac-reg.h" static int xlgmac_tx_complete(struct xlgmac_dma_desc *dma_desc) { return !XLGMAC_GET_REG_BITS_LE(dma_desc->desc3, TX_NORMAL_DESC3_OWN_POS, TX_NORMAL_DESC3_OWN_LEN); } static int xlgmac_disable_rx_csum(struct xlgmac_pdata *pdata) { u32 regval; regval = readl(pdata->mac_regs + MAC_RCR); regval = XLGMAC_SET_REG_BITS(regval, MAC_RCR_IPC_POS, MAC_RCR_IPC_LEN, 0); writel(regval, pdata->mac_regs + MAC_RCR); return 0; } static int xlgmac_enable_rx_csum(struct xlgmac_pdata *pdata) { u32 regval; regval = readl(pdata->mac_regs + MAC_RCR); regval = XLGMAC_SET_REG_BITS(regval, MAC_RCR_IPC_POS, MAC_RCR_IPC_LEN, 1); writel(regval, pdata->mac_regs + MAC_RCR); return 0; } static int xlgmac_set_mac_address(struct xlgmac_pdata *pdata, const u8 *addr) { unsigned int mac_addr_hi, mac_addr_lo; mac_addr_hi = (addr[5] << 8) | (addr[4] << 0); mac_addr_lo = (addr[3] << 24) | (addr[2] << 16) | (addr[1] << 8) | (addr[0] << 0); writel(mac_addr_hi, pdata->mac_regs + MAC_MACA0HR); writel(mac_addr_lo, pdata->mac_regs + MAC_MACA0LR); return 0; } static void xlgmac_set_mac_reg(struct xlgmac_pdata *pdata, struct netdev_hw_addr *ha, unsigned int *mac_reg) { unsigned int mac_addr_hi, mac_addr_lo; u8 *mac_addr; mac_addr_lo = 0; mac_addr_hi = 0; if (ha) { mac_addr = (u8 *)&mac_addr_lo; mac_addr[0] = ha->addr[0]; mac_addr[1] = ha->addr[1]; mac_addr[2] = ha->addr[2]; mac_addr[3] = ha->addr[3]; mac_addr = (u8 *)&mac_addr_hi; mac_addr[0] = ha->addr[4]; mac_addr[1] = ha->addr[5]; netif_dbg(pdata, drv, pdata->netdev, "adding mac address %pM at %#x\n", ha->addr, *mac_reg); mac_addr_hi = XLGMAC_SET_REG_BITS(mac_addr_hi, MAC_MACA1HR_AE_POS, MAC_MACA1HR_AE_LEN, 1); } writel(mac_addr_hi, pdata->mac_regs + *mac_reg); *mac_reg += MAC_MACA_INC; writel(mac_addr_lo, pdata->mac_regs + *mac_reg); *mac_reg += MAC_MACA_INC; } static int xlgmac_enable_rx_vlan_stripping(struct xlgmac_pdata *pdata) { u32 regval; regval = readl(pdata->mac_regs + MAC_VLANTR); /* Put the VLAN tag in the Rx descriptor */ regval = XLGMAC_SET_REG_BITS(regval, MAC_VLANTR_EVLRXS_POS, MAC_VLANTR_EVLRXS_LEN, 1); /* Don't check the VLAN type */ regval = XLGMAC_SET_REG_BITS(regval, MAC_VLANTR_DOVLTC_POS, MAC_VLANTR_DOVLTC_LEN, 1); /* Check only C-TAG (0x8100) packets */ regval = XLGMAC_SET_REG_BITS(regval, MAC_VLANTR_ERSVLM_POS, MAC_VLANTR_ERSVLM_LEN, 0); /* Don't consider an S-TAG (0x88A8) packet as a VLAN packet */ regval = XLGMAC_SET_REG_BITS(regval, MAC_VLANTR_ESVL_POS, MAC_VLANTR_ESVL_LEN, 0); /* Enable VLAN tag stripping */ regval = XLGMAC_SET_REG_BITS(regval, MAC_VLANTR_EVLS_POS, MAC_VLANTR_EVLS_LEN, 0x3); writel(regval, pdata->mac_regs + MAC_VLANTR); return 0; } static int xlgmac_disable_rx_vlan_stripping(struct xlgmac_pdata *pdata) { u32 regval; regval = readl(pdata->mac_regs + MAC_VLANTR); regval = XLGMAC_SET_REG_BITS(regval, MAC_VLANTR_EVLS_POS, MAC_VLANTR_EVLS_LEN, 0); writel(regval, pdata->mac_regs + MAC_VLANTR); return 0; } static int xlgmac_enable_rx_vlan_filtering(struct xlgmac_pdata *pdata) { u32 regval; regval = readl(pdata->mac_regs + MAC_PFR); /* Enable VLAN filtering */ regval = XLGMAC_SET_REG_BITS(regval, MAC_PFR_VTFE_POS, MAC_PFR_VTFE_LEN, 1); writel(regval, pdata->mac_regs + MAC_PFR); regval = readl(pdata->mac_regs + MAC_VLANTR); /* Enable VLAN Hash Table filtering */ regval = XLGMAC_SET_REG_BITS(regval, MAC_VLANTR_VTHM_POS, MAC_VLANTR_VTHM_LEN, 1); /* Disable VLAN tag inverse matching */ regval = XLGMAC_SET_REG_BITS(regval, MAC_VLANTR_VTIM_POS, MAC_VLANTR_VTIM_LEN, 0); /* Only filter on the lower 12-bits of the VLAN tag */ regval = XLGMAC_SET_REG_BITS(regval, MAC_VLANTR_ETV_POS, MAC_VLANTR_ETV_LEN, 1); /* In order for the VLAN Hash Table filtering to be effective, * the VLAN tag identifier in the VLAN Tag Register must not * be zero. Set the VLAN tag identifier to "1" to enable the * VLAN Hash Table filtering. This implies that a VLAN tag of * 1 will always pass filtering. */ regval = XLGMAC_SET_REG_BITS(regval, MAC_VLANTR_VL_POS, MAC_VLANTR_VL_LEN, 1); writel(regval, pdata->mac_regs + MAC_VLANTR); return 0; } static int xlgmac_disable_rx_vlan_filtering(struct xlgmac_pdata *pdata) { u32 regval; regval = readl(pdata->mac_regs + MAC_PFR); /* Disable VLAN filtering */ regval = XLGMAC_SET_REG_BITS(regval, MAC_PFR_VTFE_POS, MAC_PFR_VTFE_LEN, 0); writel(regval, pdata->mac_regs + MAC_PFR); return 0; } static u32 xlgmac_vid_crc32_le(__le16 vid_le) { unsigned char *data = (unsigned char *)&vid_le; unsigned char data_byte = 0; u32 crc = ~0; u32 temp = 0; int i, bits; bits = get_bitmask_order(VLAN_VID_MASK); for (i = 0; i < bits; i++) { if ((i % 8) == 0) data_byte = data[i / 8]; temp = ((crc & 1) ^ data_byte) & 1; crc >>= 1; data_byte >>= 1; if (temp) crc ^= CRC32_POLY_LE; } return crc; } static int xlgmac_update_vlan_hash_table(struct xlgmac_pdata *pdata) { u16 vlan_hash_table = 0; __le16 vid_le; u32 regval; u32 crc; u16 vid; /* Generate the VLAN Hash Table value */ for_each_set_bit(vid, pdata->active_vlans, VLAN_N_VID) { /* Get the CRC32 value of the VLAN ID */ vid_le = cpu_to_le16(vid); crc = bitrev32(~xlgmac_vid_crc32_le(vid_le)) >> 28; vlan_hash_table |= (1 << crc); } regval = readl(pdata->mac_regs + MAC_VLANHTR); /* Set the VLAN Hash Table filtering register */ regval = XLGMAC_SET_REG_BITS(regval, MAC_VLANHTR_VLHT_POS, MAC_VLANHTR_VLHT_LEN, vlan_hash_table); writel(regval, pdata->mac_regs + MAC_VLANHTR); return 0; } static int xlgmac_set_promiscuous_mode(struct xlgmac_pdata *pdata, unsigned int enable) { unsigned int val = enable ? 1 : 0; u32 regval; regval = XLGMAC_GET_REG_BITS(readl(pdata->mac_regs + MAC_PFR), MAC_PFR_PR_POS, MAC_PFR_PR_LEN); if (regval == val) return 0; netif_dbg(pdata, drv, pdata->netdev, "%s promiscuous mode\n", enable ? "entering" : "leaving"); regval = readl(pdata->mac_regs + MAC_PFR); regval = XLGMAC_SET_REG_BITS(regval, MAC_PFR_PR_POS, MAC_PFR_PR_LEN, val); writel(regval, pdata->mac_regs + MAC_PFR); /* Hardware will still perform VLAN filtering in promiscuous mode */ if (enable) { xlgmac_disable_rx_vlan_filtering(pdata); } else { if (pdata->netdev->features & NETIF_F_HW_VLAN_CTAG_FILTER) xlgmac_enable_rx_vlan_filtering(pdata); } return 0; } static int xlgmac_set_all_multicast_mode(struct xlgmac_pdata *pdata, unsigned int enable) { unsigned int val = enable ? 1 : 0; u32 regval; regval = XLGMAC_GET_REG_BITS(readl(pdata->mac_regs + MAC_PFR), MAC_PFR_PM_POS, MAC_PFR_PM_LEN); if (regval == val) return 0; netif_dbg(pdata, drv, pdata->netdev, "%s allmulti mode\n", enable ? "entering" : "leaving"); regval = readl(pdata->mac_regs + MAC_PFR); regval = XLGMAC_SET_REG_BITS(regval, MAC_PFR_PM_POS, MAC_PFR_PM_LEN, val); writel(regval, pdata->mac_regs + MAC_PFR); return 0; } static void xlgmac_set_mac_addn_addrs(struct xlgmac_pdata *pdata) { struct net_device *netdev = pdata->netdev; struct netdev_hw_addr *ha; unsigned int addn_macs; unsigned int mac_reg; mac_reg = MAC_MACA1HR; addn_macs = pdata->hw_feat.addn_mac; if (netdev_uc_count(netdev) > addn_macs) { xlgmac_set_promiscuous_mode(pdata, 1); } else { netdev_for_each_uc_addr(ha, netdev) { xlgmac_set_mac_reg(pdata, ha, &mac_reg); addn_macs--; } if (netdev_mc_count(netdev) > addn_macs) { xlgmac_set_all_multicast_mode(pdata, 1); } else { netdev_for_each_mc_addr(ha, netdev) { xlgmac_set_mac_reg(pdata, ha, &mac_reg); addn_macs--; } } } /* Clear remaining additional MAC address entries */ while (addn_macs--) xlgmac_set_mac_reg(pdata, NULL, &mac_reg); } static void xlgmac_set_mac_hash_table(struct xlgmac_pdata *pdata) { unsigned int hash_table_shift, hash_table_count; u32 hash_table[XLGMAC_MAC_HASH_TABLE_SIZE]; struct net_device *netdev = pdata->netdev; struct netdev_hw_addr *ha; unsigned int hash_reg; unsigned int i; u32 crc; hash_table_shift = 26 - (pdata->hw_feat.hash_table_size >> 7); hash_table_count = pdata->hw_feat.hash_table_size / 32; memset(hash_table, 0, sizeof(hash_table)); /* Build the MAC Hash Table register values */ netdev_for_each_uc_addr(ha, netdev) { crc = bitrev32(~crc32_le(~0, ha->addr, ETH_ALEN)); crc >>= hash_table_shift; hash_table[crc >> 5] |= (1 << (crc & 0x1f)); } netdev_for_each_mc_addr(ha, netdev) { crc = bitrev32(~crc32_le(~0, ha->addr, ETH_ALEN)); crc >>= hash_table_shift; hash_table[crc >> 5] |= (1 << (crc & 0x1f)); } /* Set the MAC Hash Table registers */ hash_reg = MAC_HTR0; for (i = 0; i < hash_table_count; i++) { writel(hash_table[i], pdata->mac_regs + hash_reg); hash_reg += MAC_HTR_INC; } } static int xlgmac_add_mac_addresses(struct xlgmac_pdata *pdata) { if (pdata->hw_feat.hash_table_size) xlgmac_set_mac_hash_table(pdata); else xlgmac_set_mac_addn_addrs(pdata); return 0; } static void xlgmac_config_mac_address(struct xlgmac_pdata *pdata) { u32 regval; xlgmac_set_mac_address(pdata, pdata->netdev->dev_addr); /* Filtering is done using perfect filtering and hash filtering */ if (pdata->hw_feat.hash_table_size) { regval = readl(pdata->mac_regs + MAC_PFR); regval = XLGMAC_SET_REG_BITS(regval, MAC_PFR_HPF_POS, MAC_PFR_HPF_LEN, 1); regval = XLGMAC_SET_REG_BITS(regval, MAC_PFR_HUC_POS, MAC_PFR_HUC_LEN, 1); regval = XLGMAC_SET_REG_BITS(regval, MAC_PFR_HMC_POS, MAC_PFR_HMC_LEN, 1); writel(regval, pdata->mac_regs + MAC_PFR); } } static void xlgmac_config_jumbo_enable(struct xlgmac_pdata *pdata) { unsigned int val; u32 regval; val = (pdata->netdev->mtu > XLGMAC_STD_PACKET_MTU) ? 1 : 0; regval = readl(pdata->mac_regs + MAC_RCR); regval = XLGMAC_SET_REG_BITS(regval, MAC_RCR_JE_POS, MAC_RCR_JE_LEN, val); writel(regval, pdata->mac_regs + MAC_RCR); } static void xlgmac_config_checksum_offload(struct xlgmac_pdata *pdata) { if (pdata->netdev->features & NETIF_F_RXCSUM) xlgmac_enable_rx_csum(pdata); else xlgmac_disable_rx_csum(pdata); } static void xlgmac_config_vlan_support(struct xlgmac_pdata *pdata) { u32 regval; regval = readl(pdata->mac_regs + MAC_VLANIR); /* Indicate that VLAN Tx CTAGs come from context descriptors */ regval = XLGMAC_SET_REG_BITS(regval, MAC_VLANIR_CSVL_POS, MAC_VLANIR_CSVL_LEN, 0); regval = XLGMAC_SET_REG_BITS(regval, MAC_VLANIR_VLTI_POS, MAC_VLANIR_VLTI_LEN, 1); writel(regval, pdata->mac_regs + MAC_VLANIR); /* Set the current VLAN Hash Table register value */ xlgmac_update_vlan_hash_table(pdata); if (pdata->netdev->features & NETIF_F_HW_VLAN_CTAG_FILTER) xlgmac_enable_rx_vlan_filtering(pdata); else xlgmac_disable_rx_vlan_filtering(pdata); if (pdata->netdev->features & NETIF_F_HW_VLAN_CTAG_RX) xlgmac_enable_rx_vlan_stripping(pdata); else xlgmac_disable_rx_vlan_stripping(pdata); } static int xlgmac_config_rx_mode(struct xlgmac_pdata *pdata) { struct net_device *netdev = pdata->netdev; unsigned int pr_mode, am_mode; pr_mode = ((netdev->flags & IFF_PROMISC) != 0); am_mode = ((netdev->flags & IFF_ALLMULTI) != 0); xlgmac_set_promiscuous_mode(pdata, pr_mode); xlgmac_set_all_multicast_mode(pdata, am_mode); xlgmac_add_mac_addresses(pdata); return 0; } static void xlgmac_prepare_tx_stop(struct xlgmac_pdata *pdata, struct xlgmac_channel *channel) { unsigned int tx_dsr, tx_pos, tx_qidx; unsigned long tx_timeout; unsigned int tx_status; /* Calculate the status register to read and the position within */ if (channel->queue_index < DMA_DSRX_FIRST_QUEUE) { tx_dsr = DMA_DSR0; tx_pos = (channel->queue_index * DMA_DSR_Q_LEN) + DMA_DSR0_TPS_START; } else { tx_qidx = channel->queue_index - DMA_DSRX_FIRST_QUEUE; tx_dsr = DMA_DSR1 + ((tx_qidx / DMA_DSRX_QPR) * DMA_DSRX_INC); tx_pos = ((tx_qidx % DMA_DSRX_QPR) * DMA_DSR_Q_LEN) + DMA_DSRX_TPS_START; } /* The Tx engine cannot be stopped if it is actively processing * descriptors. Wait for the Tx engine to enter the stopped or * suspended state. Don't wait forever though... */ tx_timeout = jiffies + (XLGMAC_DMA_STOP_TIMEOUT * HZ); while (time_before(jiffies, tx_timeout)) { tx_status = readl(pdata->mac_regs + tx_dsr); tx_status = XLGMAC_GET_REG_BITS(tx_status, tx_pos, DMA_DSR_TPS_LEN); if ((tx_status == DMA_TPS_STOPPED) || (tx_status == DMA_TPS_SUSPENDED)) break; usleep_range(500, 1000); } if (!time_before(jiffies, tx_timeout)) netdev_info(pdata->netdev, "timed out waiting for Tx DMA channel %u to stop\n", channel->queue_index); } static void xlgmac_enable_tx(struct xlgmac_pdata *pdata) { struct xlgmac_channel *channel; unsigned int i; u32 regval; /* Enable each Tx DMA channel */ channel = pdata->channel_head; for (i = 0; i < pdata->channel_count; i++, channel++) { if (!channel->tx_ring) break; regval = readl(XLGMAC_DMA_REG(channel, DMA_CH_TCR)); regval = XLGMAC_SET_REG_BITS(regval, DMA_CH_TCR_ST_POS, DMA_CH_TCR_ST_LEN, 1); writel(regval, XLGMAC_DMA_REG(channel, DMA_CH_TCR)); } /* Enable each Tx queue */ for (i = 0; i < pdata->tx_q_count; i++) { regval = readl(XLGMAC_MTL_REG(pdata, i, MTL_Q_TQOMR)); regval = XLGMAC_SET_REG_BITS(regval, MTL_Q_TQOMR_TXQEN_POS, MTL_Q_TQOMR_TXQEN_LEN, MTL_Q_ENABLED); writel(regval, XLGMAC_MTL_REG(pdata, i, MTL_Q_TQOMR)); } /* Enable MAC Tx */ regval = readl(pdata->mac_regs + MAC_TCR); regval = XLGMAC_SET_REG_BITS(regval, MAC_TCR_TE_POS, MAC_TCR_TE_LEN, 1); writel(regval, pdata->mac_regs + MAC_TCR); } static void xlgmac_disable_tx(struct xlgmac_pdata *pdata) { struct xlgmac_channel *channel; unsigned int i; u32 regval; /* Prepare for Tx DMA channel stop */ channel = pdata->channel_head; for (i = 0; i < pdata->channel_count; i++, channel++) { if (!channel->tx_ring) break; xlgmac_prepare_tx_stop(pdata, channel); } /* Disable MAC Tx */ regval = readl(pdata->mac_regs + MAC_TCR); regval = XLGMAC_SET_REG_BITS(regval, MAC_TCR_TE_POS, MAC_TCR_TE_LEN, 0); writel(regval, pdata->mac_regs + MAC_TCR); /* Disable each Tx queue */ for (i = 0; i < pdata->tx_q_count; i++) { regval = readl(XLGMAC_MTL_REG(pdata, i, MTL_Q_TQOMR)); regval = XLGMAC_SET_REG_BITS(regval, MTL_Q_TQOMR_TXQEN_POS, MTL_Q_TQOMR_TXQEN_LEN, 0); writel(regval, XLGMAC_MTL_REG(pdata, i, MTL_Q_TQOMR)); } /* Disable each Tx DMA channel */ channel = pdata->channel_head; for (i = 0; i < pdata->channel_count; i++, channel++) { if (!channel->tx_ring) break; regval = readl(XLGMAC_DMA_REG(channel, DMA_CH_TCR)); regval = XLGMAC_SET_REG_BITS(regval, DMA_CH_TCR_ST_POS, DMA_CH_TCR_ST_LEN, 0); writel(regval, XLGMAC_DMA_REG(channel, DMA_CH_TCR)); } } static void xlgmac_prepare_rx_stop(struct xlgmac_pdata *pdata, unsigned int queue) { unsigned int rx_status, prxq, rxqsts; unsigned long rx_timeout; /* The Rx engine cannot be stopped if it is actively processing * packets. Wait for the Rx queue to empty the Rx fifo. Don't * wait forever though... */ rx_timeout = jiffies + (XLGMAC_DMA_STOP_TIMEOUT * HZ); while (time_before(jiffies, rx_timeout)) { rx_status = readl(XLGMAC_MTL_REG(pdata, queue, MTL_Q_RQDR)); prxq = XLGMAC_GET_REG_BITS(rx_status, MTL_Q_RQDR_PRXQ_POS, MTL_Q_RQDR_PRXQ_LEN); rxqsts = XLGMAC_GET_REG_BITS(rx_status, MTL_Q_RQDR_RXQSTS_POS, MTL_Q_RQDR_RXQSTS_LEN); if ((prxq == 0) && (rxqsts == 0)) break; usleep_range(500, 1000); } if (!time_before(jiffies, rx_timeout)) netdev_info(pdata->netdev, "timed out waiting for Rx queue %u to empty\n", queue); } static void xlgmac_enable_rx(struct xlgmac_pdata *pdata) { struct xlgmac_channel *channel; unsigned int regval, i; /* Enable each Rx DMA channel */ channel = pdata->channel_head; for (i = 0; i < pdata->channel_count; i++, channel++) { if (!channel->rx_ring) break; regval = readl(XLGMAC_DMA_REG(channel, DMA_CH_RCR)); regval = XLGMAC_SET_REG_BITS(regval, DMA_CH_RCR_SR_POS, DMA_CH_RCR_SR_LEN, 1); writel(regval, XLGMAC_DMA_REG(channel, DMA_CH_RCR)); } /* Enable each Rx queue */ regval = 0; for (i = 0; i < pdata->rx_q_count; i++) regval |= (0x02 << (i << 1)); writel(regval, pdata->mac_regs + MAC_RQC0R); /* Enable MAC Rx */ regval = readl(pdata->mac_regs + MAC_RCR); regval = XLGMAC_SET_REG_BITS(regval, MAC_RCR_DCRCC_POS, MAC_RCR_DCRCC_LEN, 1); regval = XLGMAC_SET_REG_BITS(regval, MAC_RCR_CST_POS, MAC_RCR_CST_LEN, 1); regval = XLGMAC_SET_REG_BITS(regval, MAC_RCR_ACS_POS, MAC_RCR_ACS_LEN, 1); regval = XLGMAC_SET_REG_BITS(regval, MAC_RCR_RE_POS, MAC_RCR_RE_LEN, 1); writel(regval, pdata->mac_regs + MAC_RCR); } static void xlgmac_disable_rx(struct xlgmac_pdata *pdata) { struct xlgmac_channel *channel; unsigned int i; u32 regval; /* Disable MAC Rx */ regval = readl(pdata->mac_regs + MAC_RCR); regval = XLGMAC_SET_REG_BITS(regval, MAC_RCR_DCRCC_POS, MAC_RCR_DCRCC_LEN, 0); regval = XLGMAC_SET_REG_BITS(regval, MAC_RCR_CST_POS, MAC_RCR_CST_LEN, 0); regval = XLGMAC_SET_REG_BITS(regval, MAC_RCR_ACS_POS, MAC_RCR_ACS_LEN, 0); regval = XLGMAC_SET_REG_BITS(regval, MAC_RCR_RE_POS, MAC_RCR_RE_LEN, 0); writel(regval, pdata->mac_regs + MAC_RCR); /* Prepare for Rx DMA channel stop */ for (i = 0; i < pdata->rx_q_count; i++) xlgmac_prepare_rx_stop(pdata, i); /* Disable each Rx queue */ writel(0, pdata->mac_regs + MAC_RQC0R); /* Disable each Rx DMA channel */ channel = pdata->channel_head; for (i = 0; i < pdata->channel_count; i++, channel++) { if (!channel->rx_ring) break; regval = readl(XLGMAC_DMA_REG(channel, DMA_CH_RCR)); regval = XLGMAC_SET_REG_BITS(regval, DMA_CH_RCR_SR_POS, DMA_CH_RCR_SR_LEN, 0); writel(regval, XLGMAC_DMA_REG(channel, DMA_CH_RCR)); } } static void xlgmac_tx_start_xmit(struct xlgmac_channel *channel, struct xlgmac_ring *ring) { struct xlgmac_pdata *pdata = channel->pdata; struct xlgmac_desc_data *desc_data; /* Make sure everything is written before the register write */ wmb(); /* Issue a poll command to Tx DMA by writing address * of next immediate free descriptor */ desc_data = XLGMAC_GET_DESC_DATA(ring, ring->cur); writel(lower_32_bits(desc_data->dma_desc_addr), XLGMAC_DMA_REG(channel, DMA_CH_TDTR_LO)); /* Start the Tx timer */ if (pdata->tx_usecs && !channel->tx_timer_active) { channel->tx_timer_active = 1; mod_timer(&channel->tx_timer, jiffies + usecs_to_jiffies(pdata->tx_usecs)); } ring->tx.xmit_more = 0; } static void xlgmac_dev_xmit(struct xlgmac_channel *channel) { struct xlgmac_pdata *pdata = channel->pdata; struct xlgmac_ring *ring = channel->tx_ring; unsigned int tso_context, vlan_context; struct xlgmac_desc_data *desc_data; struct xlgmac_dma_desc *dma_desc; struct xlgmac_pkt_info *pkt_info; unsigned int csum, tso, vlan; int start_index = ring->cur; int cur_index = ring->cur; unsigned int tx_set_ic; int i; pkt_info = &ring->pkt_info; csum = XLGMAC_GET_REG_BITS(pkt_info->attributes, TX_PACKET_ATTRIBUTES_CSUM_ENABLE_POS, TX_PACKET_ATTRIBUTES_CSUM_ENABLE_LEN); tso = XLGMAC_GET_REG_BITS(pkt_info->attributes, TX_PACKET_ATTRIBUTES_TSO_ENABLE_POS, TX_PACKET_ATTRIBUTES_TSO_ENABLE_LEN); vlan = XLGMAC_GET_REG_BITS(pkt_info->attributes, TX_PACKET_ATTRIBUTES_VLAN_CTAG_POS, TX_PACKET_ATTRIBUTES_VLAN_CTAG_LEN); if (tso && (pkt_info->mss != ring->tx.cur_mss)) tso_context = 1; else tso_context = 0; if (vlan && (pkt_info->vlan_ctag != ring->tx.cur_vlan_ctag)) vlan_context = 1; else vlan_context = 0; /* Determine if an interrupt should be generated for this Tx: * Interrupt: * - Tx frame count exceeds the frame count setting * - Addition of Tx frame count to the frame count since the * last interrupt was set exceeds the frame count setting * No interrupt: * - No frame count setting specified (ethtool -C ethX tx-frames 0) * - Addition of Tx frame count to the frame count since the * last interrupt was set does not exceed the frame count setting */ ring->coalesce_count += pkt_info->tx_packets; if (!pdata->tx_frames) tx_set_ic = 0; else if (pkt_info->tx_packets > pdata->tx_frames) tx_set_ic = 1; else if ((ring->coalesce_count % pdata->tx_frames) < pkt_info->tx_packets) tx_set_ic = 1; else tx_set_ic = 0; desc_data = XLGMAC_GET_DESC_DATA(ring, cur_index); dma_desc = desc_data->dma_desc; /* Create a context descriptor if this is a TSO pkt_info */ if (tso_context || vlan_context) { if (tso_context) { netif_dbg(pdata, tx_queued, pdata->netdev, "TSO context descriptor, mss=%u\n", pkt_info->mss); /* Set the MSS size */ dma_desc->desc2 = XLGMAC_SET_REG_BITS_LE( dma_desc->desc2, TX_CONTEXT_DESC2_MSS_POS, TX_CONTEXT_DESC2_MSS_LEN, pkt_info->mss); /* Mark it as a CONTEXT descriptor */ dma_desc->desc3 = XLGMAC_SET_REG_BITS_LE( dma_desc->desc3, TX_CONTEXT_DESC3_CTXT_POS, TX_CONTEXT_DESC3_CTXT_LEN, 1); /* Indicate this descriptor contains the MSS */ dma_desc->desc3 = XLGMAC_SET_REG_BITS_LE( dma_desc->desc3, TX_CONTEXT_DESC3_TCMSSV_POS, TX_CONTEXT_DESC3_TCMSSV_LEN, 1); ring->tx.cur_mss = pkt_info->mss; } if (vlan_context) { netif_dbg(pdata, tx_queued, pdata->netdev, "VLAN context descriptor, ctag=%u\n", pkt_info->vlan_ctag); /* Mark it as a CONTEXT descriptor */ dma_desc->desc3 = XLGMAC_SET_REG_BITS_LE( dma_desc->desc3, TX_CONTEXT_DESC3_CTXT_POS, TX_CONTEXT_DESC3_CTXT_LEN, 1); /* Set the VLAN tag */ dma_desc->desc3 = XLGMAC_SET_REG_BITS_LE( dma_desc->desc3, TX_CONTEXT_DESC3_VT_POS, TX_CONTEXT_DESC3_VT_LEN, pkt_info->vlan_ctag); /* Indicate this descriptor contains the VLAN tag */ dma_desc->desc3 = XLGMAC_SET_REG_BITS_LE( dma_desc->desc3, TX_CONTEXT_DESC3_VLTV_POS, TX_CONTEXT_DESC3_VLTV_LEN, 1); ring->tx.cur_vlan_ctag = pkt_info->vlan_ctag; } cur_index++; desc_data = XLGMAC_GET_DESC_DATA(ring, cur_index); dma_desc = desc_data->dma_desc; } /* Update buffer address (for TSO this is the header) */ dma_desc->desc0 = cpu_to_le32(lower_32_bits(desc_data->skb_dma)); dma_desc->desc1 = cpu_to_le32(upper_32_bits(desc_data->skb_dma)); /* Update the buffer length */ dma_desc->desc2 = XLGMAC_SET_REG_BITS_LE( dma_desc->desc2, TX_NORMAL_DESC2_HL_B1L_POS, TX_NORMAL_DESC2_HL_B1L_LEN, desc_data->skb_dma_len); /* VLAN tag insertion check */ if (vlan) { dma_desc->desc2 = XLGMAC_SET_REG_BITS_LE( dma_desc->desc2, TX_NORMAL_DESC2_VTIR_POS, TX_NORMAL_DESC2_VTIR_LEN, TX_NORMAL_DESC2_VLAN_INSERT); pdata->stats.tx_vlan_packets++; } /* Timestamp enablement check */ if (XLGMAC_GET_REG_BITS(pkt_info->attributes, TX_PACKET_ATTRIBUTES_PTP_POS, TX_PACKET_ATTRIBUTES_PTP_LEN)) dma_desc->desc2 = XLGMAC_SET_REG_BITS_LE( dma_desc->desc2, TX_NORMAL_DESC2_TTSE_POS, TX_NORMAL_DESC2_TTSE_LEN, 1); /* Mark it as First Descriptor */ dma_desc->desc3 = XLGMAC_SET_REG_BITS_LE( dma_desc->desc3, TX_NORMAL_DESC3_FD_POS, TX_NORMAL_DESC3_FD_LEN, 1); /* Mark it as a NORMAL descriptor */ dma_desc->desc3 = XLGMAC_SET_REG_BITS_LE( dma_desc->desc3, TX_NORMAL_DESC3_CTXT_POS, TX_NORMAL_DESC3_CTXT_LEN, 0); /* Set OWN bit if not the first descriptor */ if (cur_index != start_index) dma_desc->desc3 = XLGMAC_SET_REG_BITS_LE( dma_desc->desc3, TX_NORMAL_DESC3_OWN_POS, TX_NORMAL_DESC3_OWN_LEN, 1); if (tso) { /* Enable TSO */ dma_desc->desc3 = XLGMAC_SET_REG_BITS_LE( dma_desc->desc3, TX_NORMAL_DESC3_TSE_POS, TX_NORMAL_DESC3_TSE_LEN, 1); dma_desc->desc3 = XLGMAC_SET_REG_BITS_LE( dma_desc->desc3, TX_NORMAL_DESC3_TCPPL_POS, TX_NORMAL_DESC3_TCPPL_LEN, pkt_info->tcp_payload_len); dma_desc->desc3 = XLGMAC_SET_REG_BITS_LE( dma_desc->desc3, TX_NORMAL_DESC3_TCPHDRLEN_POS, TX_NORMAL_DESC3_TCPHDRLEN_LEN, pkt_info->tcp_header_len / 4); pdata->stats.tx_tso_packets++; } else { /* Enable CRC and Pad Insertion */ dma_desc->desc3 = XLGMAC_SET_REG_BITS_LE( dma_desc->desc3, TX_NORMAL_DESC3_CPC_POS, TX_NORMAL_DESC3_CPC_LEN, 0); /* Enable HW CSUM */ if (csum) dma_desc->desc3 = XLGMAC_SET_REG_BITS_LE( dma_desc->desc3, TX_NORMAL_DESC3_CIC_POS, TX_NORMAL_DESC3_CIC_LEN, 0x3); /* Set the total length to be transmitted */ dma_desc->desc3 = XLGMAC_SET_REG_BITS_LE( dma_desc->desc3, TX_NORMAL_DESC3_FL_POS, TX_NORMAL_DESC3_FL_LEN, pkt_info->length); } for (i = cur_index - start_index + 1; i < pkt_info->desc_count; i++) { cur_index++; desc_data = XLGMAC_GET_DESC_DATA(ring, cur_index); dma_desc = desc_data->dma_desc; /* Update buffer address */ dma_desc->desc0 = cpu_to_le32(lower_32_bits(desc_data->skb_dma)); dma_desc->desc1 = cpu_to_le32(upper_32_bits(desc_data->skb_dma)); /* Update the buffer length */ dma_desc->desc2 = XLGMAC_SET_REG_BITS_LE( dma_desc->desc2, TX_NORMAL_DESC2_HL_B1L_POS, TX_NORMAL_DESC2_HL_B1L_LEN, desc_data->skb_dma_len); /* Set OWN bit */ dma_desc->desc3 = XLGMAC_SET_REG_BITS_LE( dma_desc->desc3, TX_NORMAL_DESC3_OWN_POS, TX_NORMAL_DESC3_OWN_LEN, 1); /* Mark it as NORMAL descriptor */ dma_desc->desc3 = XLGMAC_SET_REG_BITS_LE( dma_desc->desc3, TX_NORMAL_DESC3_CTXT_POS, TX_NORMAL_DESC3_CTXT_LEN, 0); /* Enable HW CSUM */ if (csum) dma_desc->desc3 = XLGMAC_SET_REG_BITS_LE( dma_desc->desc3, TX_NORMAL_DESC3_CIC_POS, TX_NORMAL_DESC3_CIC_LEN, 0x3); } /* Set LAST bit for the last descriptor */ dma_desc->desc3 = XLGMAC_SET_REG_BITS_LE( dma_desc->desc3, TX_NORMAL_DESC3_LD_POS, TX_NORMAL_DESC3_LD_LEN, 1); /* Set IC bit based on Tx coalescing settings */ if (tx_set_ic) dma_desc->desc2 = XLGMAC_SET_REG_BITS_LE( dma_desc->desc2, TX_NORMAL_DESC2_IC_POS, TX_NORMAL_DESC2_IC_LEN, 1); /* Save the Tx info to report back during cleanup */ desc_data->tx.packets = pkt_info->tx_packets; desc_data->tx.bytes = pkt_info->tx_bytes; /* In case the Tx DMA engine is running, make sure everything * is written to the descriptor(s) before setting the OWN bit * for the first descriptor */ dma_wmb(); /* Set OWN bit for the first descriptor */ desc_data = XLGMAC_GET_DESC_DATA(ring, start_index); dma_desc = desc_data->dma_desc; dma_desc->desc3 = XLGMAC_SET_REG_BITS_LE( dma_desc->desc3, TX_NORMAL_DESC3_OWN_POS, TX_NORMAL_DESC3_OWN_LEN, 1); if (netif_msg_tx_queued(pdata)) xlgmac_dump_tx_desc(pdata, ring, start_index, pkt_info->desc_count, 1); /* Make sure ownership is written to the descriptor */ smp_wmb(); ring->cur = cur_index + 1; if (!netdev_xmit_more() || netif_xmit_stopped(netdev_get_tx_queue(pdata->netdev, channel->queue_index))) xlgmac_tx_start_xmit(channel, ring); else ring->tx.xmit_more = 1; XLGMAC_PR("%s: descriptors %u to %u written\n", channel->name, start_index & (ring->dma_desc_count - 1), (ring->cur - 1) & (ring->dma_desc_count - 1)); } static void xlgmac_get_rx_tstamp(struct xlgmac_pkt_info *pkt_info, struct xlgmac_dma_desc *dma_desc) { u32 tsa, tsd; u64 nsec; tsa = XLGMAC_GET_REG_BITS_LE(dma_desc->desc3, RX_CONTEXT_DESC3_TSA_POS, RX_CONTEXT_DESC3_TSA_LEN); tsd = XLGMAC_GET_REG_BITS_LE(dma_desc->desc3, RX_CONTEXT_DESC3_TSD_POS, RX_CONTEXT_DESC3_TSD_LEN); if (tsa && !tsd) { nsec = le32_to_cpu(dma_desc->desc1); nsec <<= 32; nsec |= le32_to_cpu(dma_desc->desc0); if (nsec != 0xffffffffffffffffULL) { pkt_info->rx_tstamp = nsec; pkt_info->attributes = XLGMAC_SET_REG_BITS( pkt_info->attributes, RX_PACKET_ATTRIBUTES_RX_TSTAMP_POS, RX_PACKET_ATTRIBUTES_RX_TSTAMP_LEN, 1); } } } static void xlgmac_tx_desc_reset(struct xlgmac_desc_data *desc_data) { struct xlgmac_dma_desc *dma_desc = desc_data->dma_desc; /* Reset the Tx descriptor * Set buffer 1 (lo) address to zero * Set buffer 1 (hi) address to zero * Reset all other control bits (IC, TTSE, B2L & B1L) * Reset all other control bits (OWN, CTXT, FD, LD, CPC, CIC, etc) */ dma_desc->desc0 = 0; dma_desc->desc1 = 0; dma_desc->desc2 = 0; dma_desc->desc3 = 0; /* Make sure ownership is written to the descriptor */ dma_wmb(); } static void xlgmac_tx_desc_init(struct xlgmac_channel *channel) { struct xlgmac_ring *ring = channel->tx_ring; struct xlgmac_desc_data *desc_data; int start_index = ring->cur; int i; /* Initialze all descriptors */ for (i = 0; i < ring->dma_desc_count; i++) { desc_data = XLGMAC_GET_DESC_DATA(ring, i); /* Initialize Tx descriptor */ xlgmac_tx_desc_reset(desc_data); } /* Update the total number of Tx descriptors */ writel(ring->dma_desc_count - 1, XLGMAC_DMA_REG(channel, DMA_CH_TDRLR)); /* Update the starting address of descriptor ring */ desc_data = XLGMAC_GET_DESC_DATA(ring, start_index); writel(upper_32_bits(desc_data->dma_desc_addr), XLGMAC_DMA_REG(channel, DMA_CH_TDLR_HI)); writel(lower_32_bits(desc_data->dma_desc_addr), XLGMAC_DMA_REG(channel, DMA_CH_TDLR_LO)); } static void xlgmac_rx_desc_reset(struct xlgmac_pdata *pdata, struct xlgmac_desc_data *desc_data, unsigned int index) { struct xlgmac_dma_desc *dma_desc = desc_data->dma_desc; unsigned int rx_frames = pdata->rx_frames; unsigned int rx_usecs = pdata->rx_usecs; dma_addr_t hdr_dma, buf_dma; unsigned int inte; if (!rx_usecs && !rx_frames) { /* No coalescing, interrupt for every descriptor */ inte = 1; } else { /* Set interrupt based on Rx frame coalescing setting */ if (rx_frames && !((index + 1) % rx_frames)) inte = 1; else inte = 0; } /* Reset the Rx descriptor * Set buffer 1 (lo) address to header dma address (lo) * Set buffer 1 (hi) address to header dma address (hi) * Set buffer 2 (lo) address to buffer dma address (lo) * Set buffer 2 (hi) address to buffer dma address (hi) and * set control bits OWN and INTE */ hdr_dma = desc_data->rx.hdr.dma_base + desc_data->rx.hdr.dma_off; buf_dma = desc_data->rx.buf.dma_base + desc_data->rx.buf.dma_off; dma_desc->desc0 = cpu_to_le32(lower_32_bits(hdr_dma)); dma_desc->desc1 = cpu_to_le32(upper_32_bits(hdr_dma)); dma_desc->desc2 = cpu_to_le32(lower_32_bits(buf_dma)); dma_desc->desc3 = cpu_to_le32(upper_32_bits(buf_dma)); dma_desc->desc3 = XLGMAC_SET_REG_BITS_LE( dma_desc->desc3, RX_NORMAL_DESC3_INTE_POS, RX_NORMAL_DESC3_INTE_LEN, inte); /* Since the Rx DMA engine is likely running, make sure everything * is written to the descriptor(s) before setting the OWN bit * for the descriptor */ dma_wmb(); dma_desc->desc3 = XLGMAC_SET_REG_BITS_LE( dma_desc->desc3, RX_NORMAL_DESC3_OWN_POS, RX_NORMAL_DESC3_OWN_LEN, 1); /* Make sure ownership is written to the descriptor */ dma_wmb(); } static void xlgmac_rx_desc_init(struct xlgmac_channel *channel) { struct xlgmac_pdata *pdata = channel->pdata; struct xlgmac_ring *ring = channel->rx_ring; unsigned int start_index = ring->cur; struct xlgmac_desc_data *desc_data; unsigned int i; /* Initialize all descriptors */ for (i = 0; i < ring->dma_desc_count; i++) { desc_data = XLGMAC_GET_DESC_DATA(ring, i); /* Initialize Rx descriptor */ xlgmac_rx_desc_reset(pdata, desc_data, i); } /* Update the total number of Rx descriptors */ writel(ring->dma_desc_count - 1, XLGMAC_DMA_REG(channel, DMA_CH_RDRLR)); /* Update the starting address of descriptor ring */ desc_data = XLGMAC_GET_DESC_DATA(ring, start_index); writel(upper_32_bits(desc_data->dma_desc_addr), XLGMAC_DMA_REG(channel, DMA_CH_RDLR_HI)); writel(lower_32_bits(desc_data->dma_desc_addr), XLGMAC_DMA_REG(channel, DMA_CH_RDLR_LO)); /* Update the Rx Descriptor Tail Pointer */ desc_data = XLGMAC_GET_DESC_DATA(ring, start_index + ring->dma_desc_count - 1); writel(lower_32_bits(desc_data->dma_desc_addr), XLGMAC_DMA_REG(channel, DMA_CH_RDTR_LO)); } static int xlgmac_is_context_desc(struct xlgmac_dma_desc *dma_desc) { /* Rx and Tx share CTXT bit, so check TDES3.CTXT bit */ return XLGMAC_GET_REG_BITS_LE(dma_desc->desc3, TX_NORMAL_DESC3_CTXT_POS, TX_NORMAL_DESC3_CTXT_LEN); } static int xlgmac_is_last_desc(struct xlgmac_dma_desc *dma_desc) { /* Rx and Tx share LD bit, so check TDES3.LD bit */ return XLGMAC_GET_REG_BITS_LE(dma_desc->desc3, TX_NORMAL_DESC3_LD_POS, TX_NORMAL_DESC3_LD_LEN); } static int xlgmac_disable_tx_flow_control(struct xlgmac_pdata *pdata) { unsigned int max_q_count, q_count; unsigned int reg, regval; unsigned int i; /* Clear MTL flow control */ for (i = 0; i < pdata->rx_q_count; i++) { regval = readl(XLGMAC_MTL_REG(pdata, i, MTL_Q_RQOMR)); regval = XLGMAC_SET_REG_BITS(regval, MTL_Q_RQOMR_EHFC_POS, MTL_Q_RQOMR_EHFC_LEN, 0); writel(regval, XLGMAC_MTL_REG(pdata, i, MTL_Q_RQOMR)); } /* Clear MAC flow control */ max_q_count = XLGMAC_MAX_FLOW_CONTROL_QUEUES; q_count = min_t(unsigned int, pdata->tx_q_count, max_q_count); reg = MAC_Q0TFCR; for (i = 0; i < q_count; i++) { regval = readl(pdata->mac_regs + reg); regval = XLGMAC_SET_REG_BITS(regval, MAC_Q0TFCR_TFE_POS, MAC_Q0TFCR_TFE_LEN, 0); writel(regval, pdata->mac_regs + reg); reg += MAC_QTFCR_INC; } return 0; } static int xlgmac_enable_tx_flow_control(struct xlgmac_pdata *pdata) { unsigned int max_q_count, q_count; unsigned int reg, regval; unsigned int i; /* Set MTL flow control */ for (i = 0; i < pdata->rx_q_count; i++) { regval = readl(XLGMAC_MTL_REG(pdata, i, MTL_Q_RQOMR)); regval = XLGMAC_SET_REG_BITS(regval, MTL_Q_RQOMR_EHFC_POS, MTL_Q_RQOMR_EHFC_LEN, 1); writel(regval, XLGMAC_MTL_REG(pdata, i, MTL_Q_RQOMR)); } /* Set MAC flow control */ max_q_count = XLGMAC_MAX_FLOW_CONTROL_QUEUES; q_count = min_t(unsigned int, pdata->tx_q_count, max_q_count); reg = MAC_Q0TFCR; for (i = 0; i < q_count; i++) { regval = readl(pdata->mac_regs + reg); /* Enable transmit flow control */ regval = XLGMAC_SET_REG_BITS(regval, MAC_Q0TFCR_TFE_POS, MAC_Q0TFCR_TFE_LEN, 1); /* Set pause time */ regval = XLGMAC_SET_REG_BITS(regval, MAC_Q0TFCR_PT_POS, MAC_Q0TFCR_PT_LEN, 0xffff); writel(regval, pdata->mac_regs + reg); reg += MAC_QTFCR_INC; } return 0; } static int xlgmac_disable_rx_flow_control(struct xlgmac_pdata *pdata) { u32 regval; regval = readl(pdata->mac_regs + MAC_RFCR); regval = XLGMAC_SET_REG_BITS(regval, MAC_RFCR_RFE_POS, MAC_RFCR_RFE_LEN, 0); writel(regval, pdata->mac_regs + MAC_RFCR); return 0; } static int xlgmac_enable_rx_flow_control(struct xlgmac_pdata *pdata) { u32 regval; regval = readl(pdata->mac_regs + MAC_RFCR); regval = XLGMAC_SET_REG_BITS(regval, MAC_RFCR_RFE_POS, MAC_RFCR_RFE_LEN, 1); writel(regval, pdata->mac_regs + MAC_RFCR); return 0; } static int xlgmac_config_tx_flow_control(struct xlgmac_pdata *pdata) { if (pdata->tx_pause) xlgmac_enable_tx_flow_control(pdata); else xlgmac_disable_tx_flow_control(pdata); return 0; } static int xlgmac_config_rx_flow_control(struct xlgmac_pdata *pdata) { if (pdata->rx_pause) xlgmac_enable_rx_flow_control(pdata); else xlgmac_disable_rx_flow_control(pdata); return 0; } static int xlgmac_config_rx_coalesce(struct xlgmac_pdata *pdata) { struct xlgmac_channel *channel; unsigned int i; u32 regval; channel = pdata->channel_head; for (i = 0; i < pdata->channel_count; i++, channel++) { if (!channel->rx_ring) break; regval = readl(XLGMAC_DMA_REG(channel, DMA_CH_RIWT)); regval = XLGMAC_SET_REG_BITS(regval, DMA_CH_RIWT_RWT_POS, DMA_CH_RIWT_RWT_LEN, pdata->rx_riwt); writel(regval, XLGMAC_DMA_REG(channel, DMA_CH_RIWT)); } return 0; } static void xlgmac_config_flow_control(struct xlgmac_pdata *pdata) { xlgmac_config_tx_flow_control(pdata); xlgmac_config_rx_flow_control(pdata); } static void xlgmac_config_rx_fep_enable(struct xlgmac_pdata *pdata) { unsigned int i; u32 regval; for (i = 0; i < pdata->rx_q_count; i++) { regval = readl(XLGMAC_MTL_REG(pdata, i, MTL_Q_RQOMR)); regval = XLGMAC_SET_REG_BITS(regval, MTL_Q_RQOMR_FEP_POS, MTL_Q_RQOMR_FEP_LEN, 1); writel(regval, XLGMAC_MTL_REG(pdata, i, MTL_Q_RQOMR)); } } static void xlgmac_config_rx_fup_enable(struct xlgmac_pdata *pdata) { unsigned int i; u32 regval; for (i = 0; i < pdata->rx_q_count; i++) { regval = readl(XLGMAC_MTL_REG(pdata, i, MTL_Q_RQOMR)); regval = XLGMAC_SET_REG_BITS(regval, MTL_Q_RQOMR_FUP_POS, MTL_Q_RQOMR_FUP_LEN, 1); writel(regval, XLGMAC_MTL_REG(pdata, i, MTL_Q_RQOMR)); } } static int xlgmac_config_tx_coalesce(struct xlgmac_pdata *pdata) { return 0; } static void xlgmac_config_rx_buffer_size(struct xlgmac_pdata *pdata) { struct xlgmac_channel *channel; unsigned int i; u32 regval; channel = pdata->channel_head; for (i = 0; i < pdata->channel_count; i++, channel++) { if (!channel->rx_ring) break; regval = readl(XLGMAC_DMA_REG(channel, DMA_CH_RCR)); regval = XLGMAC_SET_REG_BITS(regval, DMA_CH_RCR_RBSZ_POS, DMA_CH_RCR_RBSZ_LEN, pdata->rx_buf_size); writel(regval, XLGMAC_DMA_REG(channel, DMA_CH_RCR)); } } static void xlgmac_config_tso_mode(struct xlgmac_pdata *pdata) { struct xlgmac_channel *channel; unsigned int i; u32 regval; channel = pdata->channel_head; for (i = 0; i < pdata->channel_count; i++, channel++) { if (!channel->tx_ring) break; if (pdata->hw_feat.tso) { regval = readl(XLGMAC_DMA_REG(channel, DMA_CH_TCR)); regval = XLGMAC_SET_REG_BITS(regval, DMA_CH_TCR_TSE_POS, DMA_CH_TCR_TSE_LEN, 1); writel(regval, XLGMAC_DMA_REG(channel, DMA_CH_TCR)); } } } static void xlgmac_config_sph_mode(struct xlgmac_pdata *pdata) { struct xlgmac_channel *channel; unsigned int i; u32 regval; channel = pdata->channel_head; for (i = 0; i < pdata->channel_count; i++, channel++) { if (!channel->rx_ring) break; regval = readl(XLGMAC_DMA_REG(channel, DMA_CH_CR)); regval = XLGMAC_SET_REG_BITS(regval, DMA_CH_CR_SPH_POS, DMA_CH_CR_SPH_LEN, 1); writel(regval, XLGMAC_DMA_REG(channel, DMA_CH_CR)); } regval = readl(pdata->mac_regs + MAC_RCR); regval = XLGMAC_SET_REG_BITS(regval, MAC_RCR_HDSMS_POS, MAC_RCR_HDSMS_LEN, XLGMAC_SPH_HDSMS_SIZE); writel(regval, pdata->mac_regs + MAC_RCR); } static unsigned int xlgmac_usec_to_riwt(struct xlgmac_pdata *pdata, unsigned int usec) { unsigned long rate; unsigned int ret; rate = pdata->sysclk_rate; /* Convert the input usec value to the watchdog timer value. Each * watchdog timer value is equivalent to 256 clock cycles. * Calculate the required value as: * ( usec * ( system_clock_mhz / 10^6 ) / 256 */ ret = (usec * (rate / 1000000)) / 256; return ret; } static unsigned int xlgmac_riwt_to_usec(struct xlgmac_pdata *pdata, unsigned int riwt) { unsigned long rate; unsigned int ret; rate = pdata->sysclk_rate; /* Convert the input watchdog timer value to the usec value. Each * watchdog timer value is equivalent to 256 clock cycles. * Calculate the required value as: * ( riwt * 256 ) / ( system_clock_mhz / 10^6 ) */ ret = (riwt * 256) / (rate / 1000000); return ret; } static int xlgmac_config_rx_threshold(struct xlgmac_pdata *pdata, unsigned int val) { unsigned int i; u32 regval; for (i = 0; i < pdata->rx_q_count; i++) { regval = readl(XLGMAC_MTL_REG(pdata, i, MTL_Q_RQOMR)); regval = XLGMAC_SET_REG_BITS(regval, MTL_Q_RQOMR_RTC_POS, MTL_Q_RQOMR_RTC_LEN, val); writel(regval, XLGMAC_MTL_REG(pdata, i, MTL_Q_RQOMR)); } return 0; } static void xlgmac_config_mtl_mode(struct xlgmac_pdata *pdata) { unsigned int i; u32 regval; /* Set Tx to weighted round robin scheduling algorithm */ regval = readl(pdata->mac_regs + MTL_OMR); regval = XLGMAC_SET_REG_BITS(regval, MTL_OMR_ETSALG_POS, MTL_OMR_ETSALG_LEN, MTL_ETSALG_WRR); writel(regval, pdata->mac_regs + MTL_OMR); /* Set Tx traffic classes to use WRR algorithm with equal weights */ for (i = 0; i < pdata->hw_feat.tc_cnt; i++) { regval = readl(XLGMAC_MTL_REG(pdata, i, MTL_TC_ETSCR)); regval = XLGMAC_SET_REG_BITS(regval, MTL_TC_ETSCR_TSA_POS, MTL_TC_ETSCR_TSA_LEN, MTL_TSA_ETS); writel(regval, XLGMAC_MTL_REG(pdata, i, MTL_TC_ETSCR)); regval = readl(XLGMAC_MTL_REG(pdata, i, MTL_TC_QWR)); regval = XLGMAC_SET_REG_BITS(regval, MTL_TC_QWR_QW_POS, MTL_TC_QWR_QW_LEN, 1); writel(regval, XLGMAC_MTL_REG(pdata, i, MTL_TC_QWR)); } /* Set Rx to strict priority algorithm */ regval = readl(pdata->mac_regs + MTL_OMR); regval = XLGMAC_SET_REG_BITS(regval, MTL_OMR_RAA_POS, MTL_OMR_RAA_LEN, MTL_RAA_SP); writel(regval, pdata->mac_regs + MTL_OMR); } static void xlgmac_config_queue_mapping(struct xlgmac_pdata *pdata) { unsigned int ppq, ppq_extra, prio, prio_queues; unsigned int qptc, qptc_extra, queue; unsigned int reg, regval; unsigned int mask; unsigned int i, j; /* Map the MTL Tx Queues to Traffic Classes * Note: Tx Queues >= Traffic Classes */ qptc = pdata->tx_q_count / pdata->hw_feat.tc_cnt; qptc_extra = pdata->tx_q_count % pdata->hw_feat.tc_cnt; for (i = 0, queue = 0; i < pdata->hw_feat.tc_cnt; i++) { for (j = 0; j < qptc; j++) { netif_dbg(pdata, drv, pdata->netdev, "TXq%u mapped to TC%u\n", queue, i); regval = readl(XLGMAC_MTL_REG(pdata, queue, MTL_Q_TQOMR)); regval = XLGMAC_SET_REG_BITS(regval, MTL_Q_TQOMR_Q2TCMAP_POS, MTL_Q_TQOMR_Q2TCMAP_LEN, i); writel(regval, XLGMAC_MTL_REG(pdata, queue, MTL_Q_TQOMR)); queue++; } if (i < qptc_extra) { netif_dbg(pdata, drv, pdata->netdev, "TXq%u mapped to TC%u\n", queue, i); regval = readl(XLGMAC_MTL_REG(pdata, queue, MTL_Q_TQOMR)); regval = XLGMAC_SET_REG_BITS(regval, MTL_Q_TQOMR_Q2TCMAP_POS, MTL_Q_TQOMR_Q2TCMAP_LEN, i); writel(regval, XLGMAC_MTL_REG(pdata, queue, MTL_Q_TQOMR)); queue++; } } /* Map the 8 VLAN priority values to available MTL Rx queues */ prio_queues = min_t(unsigned int, IEEE_8021QAZ_MAX_TCS, pdata->rx_q_count); ppq = IEEE_8021QAZ_MAX_TCS / prio_queues; ppq_extra = IEEE_8021QAZ_MAX_TCS % prio_queues; reg = MAC_RQC2R; regval = 0; for (i = 0, prio = 0; i < prio_queues;) { mask = 0; for (j = 0; j < ppq; j++) { netif_dbg(pdata, drv, pdata->netdev, "PRIO%u mapped to RXq%u\n", prio, i); mask |= (1 << prio); prio++; } if (i < ppq_extra) { netif_dbg(pdata, drv, pdata->netdev, "PRIO%u mapped to RXq%u\n", prio, i); mask |= (1 << prio); prio++; } regval |= (mask << ((i++ % MAC_RQC2_Q_PER_REG) << 3)); if ((i % MAC_RQC2_Q_PER_REG) && (i != prio_queues)) continue; writel(regval, pdata->mac_regs + reg); reg += MAC_RQC2_INC; regval = 0; } /* Configure one to one, MTL Rx queue to DMA Rx channel mapping * ie Q0 <--> CH0, Q1 <--> CH1 ... Q11 <--> CH11 */ reg = MTL_RQDCM0R; regval = readl(pdata->mac_regs + reg); regval |= (MTL_RQDCM0R_Q0MDMACH | MTL_RQDCM0R_Q1MDMACH | MTL_RQDCM0R_Q2MDMACH | MTL_RQDCM0R_Q3MDMACH); writel(regval, pdata->mac_regs + reg); reg += MTL_RQDCM_INC; regval = readl(pdata->mac_regs + reg); regval |= (MTL_RQDCM1R_Q4MDMACH | MTL_RQDCM1R_Q5MDMACH | MTL_RQDCM1R_Q6MDMACH | MTL_RQDCM1R_Q7MDMACH); writel(regval, pdata->mac_regs + reg); reg += MTL_RQDCM_INC; regval = readl(pdata->mac_regs + reg); regval |= (MTL_RQDCM2R_Q8MDMACH | MTL_RQDCM2R_Q9MDMACH | MTL_RQDCM2R_Q10MDMACH | MTL_RQDCM2R_Q11MDMACH); writel(regval, pdata->mac_regs + reg); } static unsigned int xlgmac_calculate_per_queue_fifo( unsigned int fifo_size, unsigned int queue_count) { unsigned int q_fifo_size; unsigned int p_fifo; /* Calculate the configured fifo size */ q_fifo_size = 1 << (fifo_size + 7); /* The configured value may not be the actual amount of fifo RAM */ q_fifo_size = min_t(unsigned int, XLGMAC_MAX_FIFO, q_fifo_size); q_fifo_size = q_fifo_size / queue_count; /* Each increment in the queue fifo size represents 256 bytes of * fifo, with 0 representing 256 bytes. Distribute the fifo equally * between the queues. */ p_fifo = q_fifo_size / 256; if (p_fifo) p_fifo--; return p_fifo; } static void xlgmac_config_tx_fifo_size(struct xlgmac_pdata *pdata) { unsigned int fifo_size; unsigned int i; u32 regval; fifo_size = xlgmac_calculate_per_queue_fifo( pdata->hw_feat.tx_fifo_size, pdata->tx_q_count); for (i = 0; i < pdata->tx_q_count; i++) { regval = readl(XLGMAC_MTL_REG(pdata, i, MTL_Q_TQOMR)); regval = XLGMAC_SET_REG_BITS(regval, MTL_Q_TQOMR_TQS_POS, MTL_Q_TQOMR_TQS_LEN, fifo_size); writel(regval, XLGMAC_MTL_REG(pdata, i, MTL_Q_TQOMR)); } netif_info(pdata, drv, pdata->netdev, "%d Tx hardware queues, %d byte fifo per queue\n", pdata->tx_q_count, ((fifo_size + 1) * 256)); } static void xlgmac_config_rx_fifo_size(struct xlgmac_pdata *pdata) { unsigned int fifo_size; unsigned int i; u32 regval; fifo_size = xlgmac_calculate_per_queue_fifo( pdata->hw_feat.rx_fifo_size, pdata->rx_q_count); for (i = 0; i < pdata->rx_q_count; i++) { regval = readl(XLGMAC_MTL_REG(pdata, i, MTL_Q_RQOMR)); regval = XLGMAC_SET_REG_BITS(regval, MTL_Q_RQOMR_RQS_POS, MTL_Q_RQOMR_RQS_LEN, fifo_size); writel(regval, XLGMAC_MTL_REG(pdata, i, MTL_Q_RQOMR)); } netif_info(pdata, drv, pdata->netdev, "%d Rx hardware queues, %d byte fifo per queue\n", pdata->rx_q_count, ((fifo_size + 1) * 256)); } static void xlgmac_config_flow_control_threshold(struct xlgmac_pdata *pdata) { unsigned int i; u32 regval; for (i = 0; i < pdata->rx_q_count; i++) { regval = readl(XLGMAC_MTL_REG(pdata, i, MTL_Q_RQFCR)); /* Activate flow control when less than 4k left in fifo */ regval = XLGMAC_SET_REG_BITS(regval, MTL_Q_RQFCR_RFA_POS, MTL_Q_RQFCR_RFA_LEN, 2); /* De-activate flow control when more than 6k left in fifo */ regval = XLGMAC_SET_REG_BITS(regval, MTL_Q_RQFCR_RFD_POS, MTL_Q_RQFCR_RFD_LEN, 4); writel(regval, XLGMAC_MTL_REG(pdata, i, MTL_Q_RQFCR)); } } static int xlgmac_config_tx_threshold(struct xlgmac_pdata *pdata, unsigned int val) { unsigned int i; u32 regval; for (i = 0; i < pdata->tx_q_count; i++) { regval = readl(XLGMAC_MTL_REG(pdata, i, MTL_Q_TQOMR)); regval = XLGMAC_SET_REG_BITS(regval, MTL_Q_TQOMR_TTC_POS, MTL_Q_TQOMR_TTC_LEN, val); writel(regval, XLGMAC_MTL_REG(pdata, i, MTL_Q_TQOMR)); } return 0; } static int xlgmac_config_rsf_mode(struct xlgmac_pdata *pdata, unsigned int val) { unsigned int i; u32 regval; for (i = 0; i < pdata->rx_q_count; i++) { regval = readl(XLGMAC_MTL_REG(pdata, i, MTL_Q_RQOMR)); regval = XLGMAC_SET_REG_BITS(regval, MTL_Q_RQOMR_RSF_POS, MTL_Q_RQOMR_RSF_LEN, val); writel(regval, XLGMAC_MTL_REG(pdata, i, MTL_Q_RQOMR)); } return 0; } static int xlgmac_config_tsf_mode(struct xlgmac_pdata *pdata, unsigned int val) { unsigned int i; u32 regval; for (i = 0; i < pdata->tx_q_count; i++) { regval = readl(XLGMAC_MTL_REG(pdata, i, MTL_Q_TQOMR)); regval = XLGMAC_SET_REG_BITS(regval, MTL_Q_TQOMR_TSF_POS, MTL_Q_TQOMR_TSF_LEN, val); writel(regval, XLGMAC_MTL_REG(pdata, i, MTL_Q_TQOMR)); } return 0; } static int xlgmac_config_osp_mode(struct xlgmac_pdata *pdata) { struct xlgmac_channel *channel; unsigned int i; u32 regval; channel = pdata->channel_head; for (i = 0; i < pdata->channel_count; i++, channel++) { if (!channel->tx_ring) break; regval = readl(XLGMAC_DMA_REG(channel, DMA_CH_TCR)); regval = XLGMAC_SET_REG_BITS(regval, DMA_CH_TCR_OSP_POS, DMA_CH_TCR_OSP_LEN, pdata->tx_osp_mode); writel(regval, XLGMAC_DMA_REG(channel, DMA_CH_TCR)); } return 0; } static int xlgmac_config_pblx8(struct xlgmac_pdata *pdata) { struct xlgmac_channel *channel; unsigned int i; u32 regval; channel = pdata->channel_head; for (i = 0; i < pdata->channel_count; i++, channel++) { regval = readl(XLGMAC_DMA_REG(channel, DMA_CH_CR)); regval = XLGMAC_SET_REG_BITS(regval, DMA_CH_CR_PBLX8_POS, DMA_CH_CR_PBLX8_LEN, pdata->pblx8); writel(regval, XLGMAC_DMA_REG(channel, DMA_CH_CR)); } return 0; } static int xlgmac_get_tx_pbl_val(struct xlgmac_pdata *pdata) { u32 regval; regval = readl(XLGMAC_DMA_REG(pdata->channel_head, DMA_CH_TCR)); regval = XLGMAC_GET_REG_BITS(regval, DMA_CH_TCR_PBL_POS, DMA_CH_TCR_PBL_LEN); return regval; } static int xlgmac_config_tx_pbl_val(struct xlgmac_pdata *pdata) { struct xlgmac_channel *channel; unsigned int i; u32 regval; channel = pdata->channel_head; for (i = 0; i < pdata->channel_count; i++, channel++) { if (!channel->tx_ring) break; regval = readl(XLGMAC_DMA_REG(channel, DMA_CH_TCR)); regval = XLGMAC_SET_REG_BITS(regval, DMA_CH_TCR_PBL_POS, DMA_CH_TCR_PBL_LEN, pdata->tx_pbl); writel(regval, XLGMAC_DMA_REG(channel, DMA_CH_TCR)); } return 0; } static int xlgmac_get_rx_pbl_val(struct xlgmac_pdata *pdata) { u32 regval; regval = readl(XLGMAC_DMA_REG(pdata->channel_head, DMA_CH_RCR)); regval = XLGMAC_GET_REG_BITS(regval, DMA_CH_RCR_PBL_POS, DMA_CH_RCR_PBL_LEN); return regval; } static int xlgmac_config_rx_pbl_val(struct xlgmac_pdata *pdata) { struct xlgmac_channel *channel; unsigned int i; u32 regval; channel = pdata->channel_head; for (i = 0; i < pdata->channel_count; i++, channel++) { if (!channel->rx_ring) break; regval = readl(XLGMAC_DMA_REG(channel, DMA_CH_RCR)); regval = XLGMAC_SET_REG_BITS(regval, DMA_CH_RCR_PBL_POS, DMA_CH_RCR_PBL_LEN, pdata->rx_pbl); writel(regval, XLGMAC_DMA_REG(channel, DMA_CH_RCR)); } return 0; } static u64 xlgmac_mmc_read(struct xlgmac_pdata *pdata, unsigned int reg_lo) { bool read_hi; u64 val; switch (reg_lo) { /* These registers are always 64 bit */ case MMC_TXOCTETCOUNT_GB_LO: case MMC_TXOCTETCOUNT_G_LO: case MMC_RXOCTETCOUNT_GB_LO: case MMC_RXOCTETCOUNT_G_LO: read_hi = true; break; default: read_hi = false; } val = (u64)readl(pdata->mac_regs + reg_lo); if (read_hi) val |= ((u64)readl(pdata->mac_regs + reg_lo + 4) << 32); return val; } static void xlgmac_tx_mmc_int(struct xlgmac_pdata *pdata) { unsigned int mmc_isr = readl(pdata->mac_regs + MMC_TISR); struct xlgmac_stats *stats = &pdata->stats; if (XLGMAC_GET_REG_BITS(mmc_isr, MMC_TISR_TXOCTETCOUNT_GB_POS, MMC_TISR_TXOCTETCOUNT_GB_LEN)) stats->txoctetcount_gb += xlgmac_mmc_read(pdata, MMC_TXOCTETCOUNT_GB_LO); if (XLGMAC_GET_REG_BITS(mmc_isr, MMC_TISR_TXFRAMECOUNT_GB_POS, MMC_TISR_TXFRAMECOUNT_GB_LEN)) stats->txframecount_gb += xlgmac_mmc_read(pdata, MMC_TXFRAMECOUNT_GB_LO); if (XLGMAC_GET_REG_BITS(mmc_isr, MMC_TISR_TXBROADCASTFRAMES_G_POS, MMC_TISR_TXBROADCASTFRAMES_G_LEN)) stats->txbroadcastframes_g += xlgmac_mmc_read(pdata, MMC_TXBROADCASTFRAMES_G_LO); if (XLGMAC_GET_REG_BITS(mmc_isr, MMC_TISR_TXMULTICASTFRAMES_G_POS, MMC_TISR_TXMULTICASTFRAMES_G_LEN)) stats->txmulticastframes_g += xlgmac_mmc_read(pdata, MMC_TXMULTICASTFRAMES_G_LO); if (XLGMAC_GET_REG_BITS(mmc_isr, MMC_TISR_TX64OCTETS_GB_POS, MMC_TISR_TX64OCTETS_GB_LEN)) stats->tx64octets_gb += xlgmac_mmc_read(pdata, MMC_TX64OCTETS_GB_LO); if (XLGMAC_GET_REG_BITS(mmc_isr, MMC_TISR_TX65TO127OCTETS_GB_POS, MMC_TISR_TX65TO127OCTETS_GB_LEN)) stats->tx65to127octets_gb += xlgmac_mmc_read(pdata, MMC_TX65TO127OCTETS_GB_LO); if (XLGMAC_GET_REG_BITS(mmc_isr, MMC_TISR_TX128TO255OCTETS_GB_POS, MMC_TISR_TX128TO255OCTETS_GB_LEN)) stats->tx128to255octets_gb += xlgmac_mmc_read(pdata, MMC_TX128TO255OCTETS_GB_LO); if (XLGMAC_GET_REG_BITS(mmc_isr, MMC_TISR_TX256TO511OCTETS_GB_POS, MMC_TISR_TX256TO511OCTETS_GB_LEN)) stats->tx256to511octets_gb += xlgmac_mmc_read(pdata, MMC_TX256TO511OCTETS_GB_LO); if (XLGMAC_GET_REG_BITS(mmc_isr, MMC_TISR_TX512TO1023OCTETS_GB_POS, MMC_TISR_TX512TO1023OCTETS_GB_LEN)) stats->tx512to1023octets_gb += xlgmac_mmc_read(pdata, MMC_TX512TO1023OCTETS_GB_LO); if (XLGMAC_GET_REG_BITS(mmc_isr, MMC_TISR_TX1024TOMAXOCTETS_GB_POS, MMC_TISR_TX1024TOMAXOCTETS_GB_LEN)) stats->tx1024tomaxoctets_gb += xlgmac_mmc_read(pdata, MMC_TX1024TOMAXOCTETS_GB_LO); if (XLGMAC_GET_REG_BITS(mmc_isr, MMC_TISR_TXUNICASTFRAMES_GB_POS, MMC_TISR_TXUNICASTFRAMES_GB_LEN)) stats->txunicastframes_gb += xlgmac_mmc_read(pdata, MMC_TXUNICASTFRAMES_GB_LO); if (XLGMAC_GET_REG_BITS(mmc_isr, MMC_TISR_TXMULTICASTFRAMES_GB_POS, MMC_TISR_TXMULTICASTFRAMES_GB_LEN)) stats->txmulticastframes_gb += xlgmac_mmc_read(pdata, MMC_TXMULTICASTFRAMES_GB_LO); if (XLGMAC_GET_REG_BITS(mmc_isr, MMC_TISR_TXBROADCASTFRAMES_GB_POS, MMC_TISR_TXBROADCASTFRAMES_GB_LEN)) stats->txbroadcastframes_g += xlgmac_mmc_read(pdata, MMC_TXBROADCASTFRAMES_GB_LO); if (XLGMAC_GET_REG_BITS(mmc_isr, MMC_TISR_TXUNDERFLOWERROR_POS, MMC_TISR_TXUNDERFLOWERROR_LEN)) stats->txunderflowerror += xlgmac_mmc_read(pdata, MMC_TXUNDERFLOWERROR_LO); if (XLGMAC_GET_REG_BITS(mmc_isr, MMC_TISR_TXOCTETCOUNT_G_POS, MMC_TISR_TXOCTETCOUNT_G_LEN)) stats->txoctetcount_g += xlgmac_mmc_read(pdata, MMC_TXOCTETCOUNT_G_LO); if (XLGMAC_GET_REG_BITS(mmc_isr, MMC_TISR_TXFRAMECOUNT_G_POS, MMC_TISR_TXFRAMECOUNT_G_LEN)) stats->txframecount_g += xlgmac_mmc_read(pdata, MMC_TXFRAMECOUNT_G_LO); if (XLGMAC_GET_REG_BITS(mmc_isr, MMC_TISR_TXPAUSEFRAMES_POS, MMC_TISR_TXPAUSEFRAMES_LEN)) stats->txpauseframes += xlgmac_mmc_read(pdata, MMC_TXPAUSEFRAMES_LO); if (XLGMAC_GET_REG_BITS(mmc_isr, MMC_TISR_TXVLANFRAMES_G_POS, MMC_TISR_TXVLANFRAMES_G_LEN)) stats->txvlanframes_g += xlgmac_mmc_read(pdata, MMC_TXVLANFRAMES_G_LO); } static void xlgmac_rx_mmc_int(struct xlgmac_pdata *pdata) { unsigned int mmc_isr = readl(pdata->mac_regs + MMC_RISR); struct xlgmac_stats *stats = &pdata->stats; if (XLGMAC_GET_REG_BITS(mmc_isr, MMC_RISR_RXFRAMECOUNT_GB_POS, MMC_RISR_RXFRAMECOUNT_GB_LEN)) stats->rxframecount_gb += xlgmac_mmc_read(pdata, MMC_RXFRAMECOUNT_GB_LO); if (XLGMAC_GET_REG_BITS(mmc_isr, MMC_RISR_RXOCTETCOUNT_GB_POS, MMC_RISR_RXOCTETCOUNT_GB_LEN)) stats->rxoctetcount_gb += xlgmac_mmc_read(pdata, MMC_RXOCTETCOUNT_GB_LO); if (XLGMAC_GET_REG_BITS(mmc_isr, MMC_RISR_RXOCTETCOUNT_G_POS, MMC_RISR_RXOCTETCOUNT_G_LEN)) stats->rxoctetcount_g += xlgmac_mmc_read(pdata, MMC_RXOCTETCOUNT_G_LO); if (XLGMAC_GET_REG_BITS(mmc_isr, MMC_RISR_RXBROADCASTFRAMES_G_POS, MMC_RISR_RXBROADCASTFRAMES_G_LEN)) stats->rxbroadcastframes_g += xlgmac_mmc_read(pdata, MMC_RXBROADCASTFRAMES_G_LO); if (XLGMAC_GET_REG_BITS(mmc_isr, MMC_RISR_RXMULTICASTFRAMES_G_POS, MMC_RISR_RXMULTICASTFRAMES_G_LEN)) stats->rxmulticastframes_g += xlgmac_mmc_read(pdata, MMC_RXMULTICASTFRAMES_G_LO); if (XLGMAC_GET_REG_BITS(mmc_isr, MMC_RISR_RXCRCERROR_POS, MMC_RISR_RXCRCERROR_LEN)) stats->rxcrcerror += xlgmac_mmc_read(pdata, MMC_RXCRCERROR_LO); if (XLGMAC_GET_REG_BITS(mmc_isr, MMC_RISR_RXRUNTERROR_POS, MMC_RISR_RXRUNTERROR_LEN)) stats->rxrunterror += xlgmac_mmc_read(pdata, MMC_RXRUNTERROR); if (XLGMAC_GET_REG_BITS(mmc_isr, MMC_RISR_RXJABBERERROR_POS, MMC_RISR_RXJABBERERROR_LEN)) stats->rxjabbererror += xlgmac_mmc_read(pdata, MMC_RXJABBERERROR); if (XLGMAC_GET_REG_BITS(mmc_isr, MMC_RISR_RXUNDERSIZE_G_POS, MMC_RISR_RXUNDERSIZE_G_LEN)) stats->rxundersize_g += xlgmac_mmc_read(pdata, MMC_RXUNDERSIZE_G); if (XLGMAC_GET_REG_BITS(mmc_isr, MMC_RISR_RXOVERSIZE_G_POS, MMC_RISR_RXOVERSIZE_G_LEN)) stats->rxoversize_g += xlgmac_mmc_read(pdata, MMC_RXOVERSIZE_G); if (XLGMAC_GET_REG_BITS(mmc_isr, MMC_RISR_RX64OCTETS_GB_POS, MMC_RISR_RX64OCTETS_GB_LEN)) stats->rx64octets_gb += xlgmac_mmc_read(pdata, MMC_RX64OCTETS_GB_LO); if (XLGMAC_GET_REG_BITS(mmc_isr, MMC_RISR_RX65TO127OCTETS_GB_POS, MMC_RISR_RX65TO127OCTETS_GB_LEN)) stats->rx65to127octets_gb += xlgmac_mmc_read(pdata, MMC_RX65TO127OCTETS_GB_LO); if (XLGMAC_GET_REG_BITS(mmc_isr, MMC_RISR_RX128TO255OCTETS_GB_POS, MMC_RISR_RX128TO255OCTETS_GB_LEN)) stats->rx128to255octets_gb += xlgmac_mmc_read(pdata, MMC_RX128TO255OCTETS_GB_LO); if (XLGMAC_GET_REG_BITS(mmc_isr, MMC_RISR_RX256TO511OCTETS_GB_POS, MMC_RISR_RX256TO511OCTETS_GB_LEN)) stats->rx256to511octets_gb += xlgmac_mmc_read(pdata, MMC_RX256TO511OCTETS_GB_LO); if (XLGMAC_GET_REG_BITS(mmc_isr, MMC_RISR_RX512TO1023OCTETS_GB_POS, MMC_RISR_RX512TO1023OCTETS_GB_LEN)) stats->rx512to1023octets_gb += xlgmac_mmc_read(pdata, MMC_RX512TO1023OCTETS_GB_LO); if (XLGMAC_GET_REG_BITS(mmc_isr, MMC_RISR_RX1024TOMAXOCTETS_GB_POS, MMC_RISR_RX1024TOMAXOCTETS_GB_LEN)) stats->rx1024tomaxoctets_gb += xlgmac_mmc_read(pdata, MMC_RX1024TOMAXOCTETS_GB_LO); if (XLGMAC_GET_REG_BITS(mmc_isr, MMC_RISR_RXUNICASTFRAMES_G_POS, MMC_RISR_RXUNICASTFRAMES_G_LEN)) stats->rxunicastframes_g += xlgmac_mmc_read(pdata, MMC_RXUNICASTFRAMES_G_LO); if (XLGMAC_GET_REG_BITS(mmc_isr, MMC_RISR_RXLENGTHERROR_POS, MMC_RISR_RXLENGTHERROR_LEN)) stats->rxlengtherror += xlgmac_mmc_read(pdata, MMC_RXLENGTHERROR_LO); if (XLGMAC_GET_REG_BITS(mmc_isr, MMC_RISR_RXOUTOFRANGETYPE_POS, MMC_RISR_RXOUTOFRANGETYPE_LEN)) stats->rxoutofrangetype += xlgmac_mmc_read(pdata, MMC_RXOUTOFRANGETYPE_LO); if (XLGMAC_GET_REG_BITS(mmc_isr, MMC_RISR_RXPAUSEFRAMES_POS, MMC_RISR_RXPAUSEFRAMES_LEN)) stats->rxpauseframes += xlgmac_mmc_read(pdata, MMC_RXPAUSEFRAMES_LO); if (XLGMAC_GET_REG_BITS(mmc_isr, MMC_RISR_RXFIFOOVERFLOW_POS, MMC_RISR_RXFIFOOVERFLOW_LEN)) stats->rxfifooverflow += xlgmac_mmc_read(pdata, MMC_RXFIFOOVERFLOW_LO); if (XLGMAC_GET_REG_BITS(mmc_isr, MMC_RISR_RXVLANFRAMES_GB_POS, MMC_RISR_RXVLANFRAMES_GB_LEN)) stats->rxvlanframes_gb += xlgmac_mmc_read(pdata, MMC_RXVLANFRAMES_GB_LO); if (XLGMAC_GET_REG_BITS(mmc_isr, MMC_RISR_RXWATCHDOGERROR_POS, MMC_RISR_RXWATCHDOGERROR_LEN)) stats->rxwatchdogerror += xlgmac_mmc_read(pdata, MMC_RXWATCHDOGERROR); } static void xlgmac_read_mmc_stats(struct xlgmac_pdata *pdata) { struct xlgmac_stats *stats = &pdata->stats; u32 regval; /* Freeze counters */ regval = readl(pdata->mac_regs + MMC_CR); regval = XLGMAC_SET_REG_BITS(regval, MMC_CR_MCF_POS, MMC_CR_MCF_LEN, 1); writel(regval, pdata->mac_regs + MMC_CR); stats->txoctetcount_gb += xlgmac_mmc_read(pdata, MMC_TXOCTETCOUNT_GB_LO); stats->txframecount_gb += xlgmac_mmc_read(pdata, MMC_TXFRAMECOUNT_GB_LO); stats->txbroadcastframes_g += xlgmac_mmc_read(pdata, MMC_TXBROADCASTFRAMES_G_LO); stats->txmulticastframes_g += xlgmac_mmc_read(pdata, MMC_TXMULTICASTFRAMES_G_LO); stats->tx64octets_gb += xlgmac_mmc_read(pdata, MMC_TX64OCTETS_GB_LO); stats->tx65to127octets_gb += xlgmac_mmc_read(pdata, MMC_TX65TO127OCTETS_GB_LO); stats->tx128to255octets_gb += xlgmac_mmc_read(pdata, MMC_TX128TO255OCTETS_GB_LO); stats->tx256to511octets_gb += xlgmac_mmc_read(pdata, MMC_TX256TO511OCTETS_GB_LO); stats->tx512to1023octets_gb += xlgmac_mmc_read(pdata, MMC_TX512TO1023OCTETS_GB_LO); stats->tx1024tomaxoctets_gb += xlgmac_mmc_read(pdata, MMC_TX1024TOMAXOCTETS_GB_LO); stats->txunicastframes_gb += xlgmac_mmc_read(pdata, MMC_TXUNICASTFRAMES_GB_LO); stats->txmulticastframes_gb += xlgmac_mmc_read(pdata, MMC_TXMULTICASTFRAMES_GB_LO); stats->txbroadcastframes_g += xlgmac_mmc_read(pdata, MMC_TXBROADCASTFRAMES_GB_LO); stats->txunderflowerror += xlgmac_mmc_read(pdata, MMC_TXUNDERFLOWERROR_LO); stats->txoctetcount_g += xlgmac_mmc_read(pdata, MMC_TXOCTETCOUNT_G_LO); stats->txframecount_g += xlgmac_mmc_read(pdata, MMC_TXFRAMECOUNT_G_LO); stats->txpauseframes += xlgmac_mmc_read(pdata, MMC_TXPAUSEFRAMES_LO); stats->txvlanframes_g += xlgmac_mmc_read(pdata, MMC_TXVLANFRAMES_G_LO); stats->rxframecount_gb += xlgmac_mmc_read(pdata, MMC_RXFRAMECOUNT_GB_LO); stats->rxoctetcount_gb += xlgmac_mmc_read(pdata, MMC_RXOCTETCOUNT_GB_LO); stats->rxoctetcount_g += xlgmac_mmc_read(pdata, MMC_RXOCTETCOUNT_G_LO); stats->rxbroadcastframes_g += xlgmac_mmc_read(pdata, MMC_RXBROADCASTFRAMES_G_LO); stats->rxmulticastframes_g += xlgmac_mmc_read(pdata, MMC_RXMULTICASTFRAMES_G_LO); stats->rxcrcerror += xlgmac_mmc_read(pdata, MMC_RXCRCERROR_LO); stats->rxrunterror += xlgmac_mmc_read(pdata, MMC_RXRUNTERROR); stats->rxjabbererror += xlgmac_mmc_read(pdata, MMC_RXJABBERERROR); stats->rxundersize_g += xlgmac_mmc_read(pdata, MMC_RXUNDERSIZE_G); stats->rxoversize_g += xlgmac_mmc_read(pdata, MMC_RXOVERSIZE_G); stats->rx64octets_gb += xlgmac_mmc_read(pdata, MMC_RX64OCTETS_GB_LO); stats->rx65to127octets_gb += xlgmac_mmc_read(pdata, MMC_RX65TO127OCTETS_GB_LO); stats->rx128to255octets_gb += xlgmac_mmc_read(pdata, MMC_RX128TO255OCTETS_GB_LO); stats->rx256to511octets_gb += xlgmac_mmc_read(pdata, MMC_RX256TO511OCTETS_GB_LO); stats->rx512to1023octets_gb += xlgmac_mmc_read(pdata, MMC_RX512TO1023OCTETS_GB_LO); stats->rx1024tomaxoctets_gb += xlgmac_mmc_read(pdata, MMC_RX1024TOMAXOCTETS_GB_LO); stats->rxunicastframes_g += xlgmac_mmc_read(pdata, MMC_RXUNICASTFRAMES_G_LO); stats->rxlengtherror += xlgmac_mmc_read(pdata, MMC_RXLENGTHERROR_LO); stats->rxoutofrangetype += xlgmac_mmc_read(pdata, MMC_RXOUTOFRANGETYPE_LO); stats->rxpauseframes += xlgmac_mmc_read(pdata, MMC_RXPAUSEFRAMES_LO); stats->rxfifooverflow += xlgmac_mmc_read(pdata, MMC_RXFIFOOVERFLOW_LO); stats->rxvlanframes_gb += xlgmac_mmc_read(pdata, MMC_RXVLANFRAMES_GB_LO); stats->rxwatchdogerror += xlgmac_mmc_read(pdata, MMC_RXWATCHDOGERROR); /* Un-freeze counters */ regval = readl(pdata->mac_regs + MMC_CR); regval = XLGMAC_SET_REG_BITS(regval, MMC_CR_MCF_POS, MMC_CR_MCF_LEN, 0); writel(regval, pdata->mac_regs + MMC_CR); } static void xlgmac_config_mmc(struct xlgmac_pdata *pdata) { u32 regval; regval = readl(pdata->mac_regs + MMC_CR); /* Set counters to reset on read */ regval = XLGMAC_SET_REG_BITS(regval, MMC_CR_ROR_POS, MMC_CR_ROR_LEN, 1); /* Reset the counters */ regval = XLGMAC_SET_REG_BITS(regval, MMC_CR_CR_POS, MMC_CR_CR_LEN, 1); writel(regval, pdata->mac_regs + MMC_CR); } static int xlgmac_write_rss_reg(struct xlgmac_pdata *pdata, unsigned int type, unsigned int index, unsigned int val) { unsigned int wait; int ret = 0; u32 regval; mutex_lock(&pdata->rss_mutex); regval = XLGMAC_GET_REG_BITS(readl(pdata->mac_regs + MAC_RSSAR), MAC_RSSAR_OB_POS, MAC_RSSAR_OB_LEN); if (regval) { ret = -EBUSY; goto unlock; } writel(val, pdata->mac_regs + MAC_RSSDR); regval = readl(pdata->mac_regs + MAC_RSSAR); regval = XLGMAC_SET_REG_BITS(regval, MAC_RSSAR_RSSIA_POS, MAC_RSSAR_RSSIA_LEN, index); regval = XLGMAC_SET_REG_BITS(regval, MAC_RSSAR_ADDRT_POS, MAC_RSSAR_ADDRT_LEN, type); regval = XLGMAC_SET_REG_BITS(regval, MAC_RSSAR_CT_POS, MAC_RSSAR_CT_LEN, 0); regval = XLGMAC_SET_REG_BITS(regval, MAC_RSSAR_OB_POS, MAC_RSSAR_OB_LEN, 1); writel(regval, pdata->mac_regs + MAC_RSSAR); wait = 1000; while (wait--) { regval = XLGMAC_GET_REG_BITS(readl(pdata->mac_regs + MAC_RSSAR), MAC_RSSAR_OB_POS, MAC_RSSAR_OB_LEN); if (!regval) goto unlock; usleep_range(1000, 1500); } ret = -EBUSY; unlock: mutex_unlock(&pdata->rss_mutex); return ret; } static int xlgmac_write_rss_hash_key(struct xlgmac_pdata *pdata) { unsigned int key_regs = sizeof(pdata->rss_key) / sizeof(u32); unsigned int *key = (unsigned int *)&pdata->rss_key; int ret; while (key_regs--) { ret = xlgmac_write_rss_reg(pdata, XLGMAC_RSS_HASH_KEY_TYPE, key_regs, *key++); if (ret) return ret; } return 0; } static int xlgmac_write_rss_lookup_table(struct xlgmac_pdata *pdata) { unsigned int i; int ret; for (i = 0; i < ARRAY_SIZE(pdata->rss_table); i++) { ret = xlgmac_write_rss_reg(pdata, XLGMAC_RSS_LOOKUP_TABLE_TYPE, i, pdata->rss_table[i]); if (ret) return ret; } return 0; } static int xlgmac_set_rss_hash_key(struct xlgmac_pdata *pdata, const u8 *key) { memcpy(pdata->rss_key, key, sizeof(pdata->rss_key)); return xlgmac_write_rss_hash_key(pdata); } static int xlgmac_set_rss_lookup_table(struct xlgmac_pdata *pdata, const u32 *table) { unsigned int i; u32 tval; for (i = 0; i < ARRAY_SIZE(pdata->rss_table); i++) { tval = table[i]; pdata->rss_table[i] = XLGMAC_SET_REG_BITS( pdata->rss_table[i], MAC_RSSDR_DMCH_POS, MAC_RSSDR_DMCH_LEN, tval); } return xlgmac_write_rss_lookup_table(pdata); } static int xlgmac_enable_rss(struct xlgmac_pdata *pdata) { u32 regval; int ret; if (!pdata->hw_feat.rss) return -EOPNOTSUPP; /* Program the hash key */ ret = xlgmac_write_rss_hash_key(pdata); if (ret) return ret; /* Program the lookup table */ ret = xlgmac_write_rss_lookup_table(pdata); if (ret) return ret; /* Set the RSS options */ writel(pdata->rss_options, pdata->mac_regs + MAC_RSSCR); /* Enable RSS */ regval = readl(pdata->mac_regs + MAC_RSSCR); regval = XLGMAC_SET_REG_BITS(regval, MAC_RSSCR_RSSE_POS, MAC_RSSCR_RSSE_LEN, 1); writel(regval, pdata->mac_regs + MAC_RSSCR); return 0; } static int xlgmac_disable_rss(struct xlgmac_pdata *pdata) { u32 regval; if (!pdata->hw_feat.rss) return -EOPNOTSUPP; regval = readl(pdata->mac_regs + MAC_RSSCR); regval = XLGMAC_SET_REG_BITS(regval, MAC_RSSCR_RSSE_POS, MAC_RSSCR_RSSE_LEN, 0); writel(regval, pdata->mac_regs + MAC_RSSCR); return 0; } static void xlgmac_config_rss(struct xlgmac_pdata *pdata) { int ret; if (!pdata->hw_feat.rss) return; if (pdata->netdev->features & NETIF_F_RXHASH) ret = xlgmac_enable_rss(pdata); else ret = xlgmac_disable_rss(pdata); if (ret) netdev_err(pdata->netdev, "error configuring RSS, RSS disabled\n"); } static void xlgmac_enable_dma_interrupts(struct xlgmac_pdata *pdata) { unsigned int dma_ch_isr, dma_ch_ier; struct xlgmac_channel *channel; unsigned int i; channel = pdata->channel_head; for (i = 0; i < pdata->channel_count; i++, channel++) { /* Clear all the interrupts which are set */ dma_ch_isr = readl(XLGMAC_DMA_REG(channel, DMA_CH_SR)); writel(dma_ch_isr, XLGMAC_DMA_REG(channel, DMA_CH_SR)); /* Clear all interrupt enable bits */ dma_ch_ier = 0; /* Enable following interrupts * NIE - Normal Interrupt Summary Enable * AIE - Abnormal Interrupt Summary Enable * FBEE - Fatal Bus Error Enable */ dma_ch_ier = XLGMAC_SET_REG_BITS(dma_ch_ier, DMA_CH_IER_NIE_POS, DMA_CH_IER_NIE_LEN, 1); dma_ch_ier = XLGMAC_SET_REG_BITS(dma_ch_ier, DMA_CH_IER_AIE_POS, DMA_CH_IER_AIE_LEN, 1); dma_ch_ier = XLGMAC_SET_REG_BITS(dma_ch_ier, DMA_CH_IER_FBEE_POS, DMA_CH_IER_FBEE_LEN, 1); if (channel->tx_ring) { /* Enable the following Tx interrupts * TIE - Transmit Interrupt Enable (unless using * per channel interrupts) */ if (!pdata->per_channel_irq) dma_ch_ier = XLGMAC_SET_REG_BITS( dma_ch_ier, DMA_CH_IER_TIE_POS, DMA_CH_IER_TIE_LEN, 1); } if (channel->rx_ring) { /* Enable following Rx interrupts * RBUE - Receive Buffer Unavailable Enable * RIE - Receive Interrupt Enable (unless using * per channel interrupts) */ dma_ch_ier = XLGMAC_SET_REG_BITS( dma_ch_ier, DMA_CH_IER_RBUE_POS, DMA_CH_IER_RBUE_LEN, 1); if (!pdata->per_channel_irq) dma_ch_ier = XLGMAC_SET_REG_BITS( dma_ch_ier, DMA_CH_IER_RIE_POS, DMA_CH_IER_RIE_LEN, 1); } writel(dma_ch_isr, XLGMAC_DMA_REG(channel, DMA_CH_IER)); } } static void xlgmac_enable_mtl_interrupts(struct xlgmac_pdata *pdata) { unsigned int q_count, i; unsigned int mtl_q_isr; q_count = max(pdata->hw_feat.tx_q_cnt, pdata->hw_feat.rx_q_cnt); for (i = 0; i < q_count; i++) { /* Clear all the interrupts which are set */ mtl_q_isr = readl(XLGMAC_MTL_REG(pdata, i, MTL_Q_ISR)); writel(mtl_q_isr, XLGMAC_MTL_REG(pdata, i, MTL_Q_ISR)); /* No MTL interrupts to be enabled */ writel(0, XLGMAC_MTL_REG(pdata, i, MTL_Q_IER)); } } static void xlgmac_enable_mac_interrupts(struct xlgmac_pdata *pdata) { unsigned int mac_ier = 0; u32 regval; /* Enable Timestamp interrupt */ mac_ier = XLGMAC_SET_REG_BITS(mac_ier, MAC_IER_TSIE_POS, MAC_IER_TSIE_LEN, 1); writel(mac_ier, pdata->mac_regs + MAC_IER); /* Enable all counter interrupts */ regval = readl(pdata->mac_regs + MMC_RIER); regval = XLGMAC_SET_REG_BITS(regval, MMC_RIER_ALL_INTERRUPTS_POS, MMC_RIER_ALL_INTERRUPTS_LEN, 0xffffffff); writel(regval, pdata->mac_regs + MMC_RIER); regval = readl(pdata->mac_regs + MMC_TIER); regval = XLGMAC_SET_REG_BITS(regval, MMC_TIER_ALL_INTERRUPTS_POS, MMC_TIER_ALL_INTERRUPTS_LEN, 0xffffffff); writel(regval, pdata->mac_regs + MMC_TIER); } static int xlgmac_set_xlgmii_25000_speed(struct xlgmac_pdata *pdata) { u32 regval; regval = XLGMAC_GET_REG_BITS(readl(pdata->mac_regs + MAC_TCR), MAC_TCR_SS_POS, MAC_TCR_SS_LEN); if (regval == 0x1) return 0; regval = readl(pdata->mac_regs + MAC_TCR); regval = XLGMAC_SET_REG_BITS(regval, MAC_TCR_SS_POS, MAC_TCR_SS_LEN, 0x1); writel(regval, pdata->mac_regs + MAC_TCR); return 0; } static int xlgmac_set_xlgmii_40000_speed(struct xlgmac_pdata *pdata) { u32 regval; regval = XLGMAC_GET_REG_BITS(readl(pdata->mac_regs + MAC_TCR), MAC_TCR_SS_POS, MAC_TCR_SS_LEN); if (regval == 0) return 0; regval = readl(pdata->mac_regs + MAC_TCR); regval = XLGMAC_SET_REG_BITS(regval, MAC_TCR_SS_POS, MAC_TCR_SS_LEN, 0); writel(regval, pdata->mac_regs + MAC_TCR); return 0; } static int xlgmac_set_xlgmii_50000_speed(struct xlgmac_pdata *pdata) { u32 regval; regval = XLGMAC_GET_REG_BITS(readl(pdata->mac_regs + MAC_TCR), MAC_TCR_SS_POS, MAC_TCR_SS_LEN); if (regval == 0x2) return 0; regval = readl(pdata->mac_regs + MAC_TCR); regval = XLGMAC_SET_REG_BITS(regval, MAC_TCR_SS_POS, MAC_TCR_SS_LEN, 0x2); writel(regval, pdata->mac_regs + MAC_TCR); return 0; } static int xlgmac_set_xlgmii_100000_speed(struct xlgmac_pdata *pdata) { u32 regval; regval = XLGMAC_GET_REG_BITS(readl(pdata->mac_regs + MAC_TCR), MAC_TCR_SS_POS, MAC_TCR_SS_LEN); if (regval == 0x3) return 0; regval = readl(pdata->mac_regs + MAC_TCR); regval = XLGMAC_SET_REG_BITS(regval, MAC_TCR_SS_POS, MAC_TCR_SS_LEN, 0x3); writel(regval, pdata->mac_regs + MAC_TCR); return 0; } static void xlgmac_config_mac_speed(struct xlgmac_pdata *pdata) { switch (pdata->phy_speed) { case SPEED_100000: xlgmac_set_xlgmii_100000_speed(pdata); break; case SPEED_50000: xlgmac_set_xlgmii_50000_speed(pdata); break; case SPEED_40000: xlgmac_set_xlgmii_40000_speed(pdata); break; case SPEED_25000: xlgmac_set_xlgmii_25000_speed(pdata); break; } } static int xlgmac_dev_read(struct xlgmac_channel *channel) { struct xlgmac_pdata *pdata = channel->pdata; struct xlgmac_ring *ring = channel->rx_ring; struct net_device *netdev = pdata->netdev; struct xlgmac_desc_data *desc_data; struct xlgmac_dma_desc *dma_desc; struct xlgmac_pkt_info *pkt_info; unsigned int err, etlt, l34t; desc_data = XLGMAC_GET_DESC_DATA(ring, ring->cur); dma_desc = desc_data->dma_desc; pkt_info = &ring->pkt_info; /* Check for data availability */ if (XLGMAC_GET_REG_BITS_LE(dma_desc->desc3, RX_NORMAL_DESC3_OWN_POS, RX_NORMAL_DESC3_OWN_LEN)) return 1; /* Make sure descriptor fields are read after reading the OWN bit */ dma_rmb(); if (netif_msg_rx_status(pdata)) xlgmac_dump_rx_desc(pdata, ring, ring->cur); if (XLGMAC_GET_REG_BITS_LE(dma_desc->desc3, RX_NORMAL_DESC3_CTXT_POS, RX_NORMAL_DESC3_CTXT_LEN)) { /* Timestamp Context Descriptor */ xlgmac_get_rx_tstamp(pkt_info, dma_desc); pkt_info->attributes = XLGMAC_SET_REG_BITS( pkt_info->attributes, RX_PACKET_ATTRIBUTES_CONTEXT_POS, RX_PACKET_ATTRIBUTES_CONTEXT_LEN, 1); pkt_info->attributes = XLGMAC_SET_REG_BITS( pkt_info->attributes, RX_PACKET_ATTRIBUTES_CONTEXT_NEXT_POS, RX_PACKET_ATTRIBUTES_CONTEXT_NEXT_LEN, 0); return 0; } /* Normal Descriptor, be sure Context Descriptor bit is off */ pkt_info->attributes = XLGMAC_SET_REG_BITS( pkt_info->attributes, RX_PACKET_ATTRIBUTES_CONTEXT_POS, RX_PACKET_ATTRIBUTES_CONTEXT_LEN, 0); /* Indicate if a Context Descriptor is next */ if (XLGMAC_GET_REG_BITS_LE(dma_desc->desc3, RX_NORMAL_DESC3_CDA_POS, RX_NORMAL_DESC3_CDA_LEN)) pkt_info->attributes = XLGMAC_SET_REG_BITS( pkt_info->attributes, RX_PACKET_ATTRIBUTES_CONTEXT_NEXT_POS, RX_PACKET_ATTRIBUTES_CONTEXT_NEXT_LEN, 1); /* Get the header length */ if (XLGMAC_GET_REG_BITS_LE(dma_desc->desc3, RX_NORMAL_DESC3_FD_POS, RX_NORMAL_DESC3_FD_LEN)) { desc_data->rx.hdr_len = XLGMAC_GET_REG_BITS_LE(dma_desc->desc2, RX_NORMAL_DESC2_HL_POS, RX_NORMAL_DESC2_HL_LEN); if (desc_data->rx.hdr_len) pdata->stats.rx_split_header_packets++; } /* Get the RSS hash */ if (XLGMAC_GET_REG_BITS_LE(dma_desc->desc3, RX_NORMAL_DESC3_RSV_POS, RX_NORMAL_DESC3_RSV_LEN)) { pkt_info->attributes = XLGMAC_SET_REG_BITS( pkt_info->attributes, RX_PACKET_ATTRIBUTES_RSS_HASH_POS, RX_PACKET_ATTRIBUTES_RSS_HASH_LEN, 1); pkt_info->rss_hash = le32_to_cpu(dma_desc->desc1); l34t = XLGMAC_GET_REG_BITS_LE(dma_desc->desc3, RX_NORMAL_DESC3_L34T_POS, RX_NORMAL_DESC3_L34T_LEN); switch (l34t) { case RX_DESC3_L34T_IPV4_TCP: case RX_DESC3_L34T_IPV4_UDP: case RX_DESC3_L34T_IPV6_TCP: case RX_DESC3_L34T_IPV6_UDP: pkt_info->rss_hash_type = PKT_HASH_TYPE_L4; break; default: pkt_info->rss_hash_type = PKT_HASH_TYPE_L3; } } /* Get the pkt_info length */ desc_data->rx.len = XLGMAC_GET_REG_BITS_LE(dma_desc->desc3, RX_NORMAL_DESC3_PL_POS, RX_NORMAL_DESC3_PL_LEN); if (!XLGMAC_GET_REG_BITS_LE(dma_desc->desc3, RX_NORMAL_DESC3_LD_POS, RX_NORMAL_DESC3_LD_LEN)) { /* Not all the data has been transferred for this pkt_info */ pkt_info->attributes = XLGMAC_SET_REG_BITS( pkt_info->attributes, RX_PACKET_ATTRIBUTES_INCOMPLETE_POS, RX_PACKET_ATTRIBUTES_INCOMPLETE_LEN, 1); return 0; } /* This is the last of the data for this pkt_info */ pkt_info->attributes = XLGMAC_SET_REG_BITS( pkt_info->attributes, RX_PACKET_ATTRIBUTES_INCOMPLETE_POS, RX_PACKET_ATTRIBUTES_INCOMPLETE_LEN, 0); /* Set checksum done indicator as appropriate */ if (netdev->features & NETIF_F_RXCSUM) pkt_info->attributes = XLGMAC_SET_REG_BITS( pkt_info->attributes, RX_PACKET_ATTRIBUTES_CSUM_DONE_POS, RX_PACKET_ATTRIBUTES_CSUM_DONE_LEN, 1); /* Check for errors (only valid in last descriptor) */ err = XLGMAC_GET_REG_BITS_LE(dma_desc->desc3, RX_NORMAL_DESC3_ES_POS, RX_NORMAL_DESC3_ES_LEN); etlt = XLGMAC_GET_REG_BITS_LE(dma_desc->desc3, RX_NORMAL_DESC3_ETLT_POS, RX_NORMAL_DESC3_ETLT_LEN); netif_dbg(pdata, rx_status, netdev, "err=%u, etlt=%#x\n", err, etlt); if (!err || !etlt) { /* No error if err is 0 or etlt is 0 */ if ((etlt == 0x09) && (netdev->features & NETIF_F_HW_VLAN_CTAG_RX)) { pkt_info->attributes = XLGMAC_SET_REG_BITS( pkt_info->attributes, RX_PACKET_ATTRIBUTES_VLAN_CTAG_POS, RX_PACKET_ATTRIBUTES_VLAN_CTAG_LEN, 1); pkt_info->vlan_ctag = XLGMAC_GET_REG_BITS_LE(dma_desc->desc0, RX_NORMAL_DESC0_OVT_POS, RX_NORMAL_DESC0_OVT_LEN); netif_dbg(pdata, rx_status, netdev, "vlan-ctag=%#06x\n", pkt_info->vlan_ctag); } } else { if ((etlt == 0x05) || (etlt == 0x06)) pkt_info->attributes = XLGMAC_SET_REG_BITS( pkt_info->attributes, RX_PACKET_ATTRIBUTES_CSUM_DONE_POS, RX_PACKET_ATTRIBUTES_CSUM_DONE_LEN, 0); else pkt_info->errors = XLGMAC_SET_REG_BITS( pkt_info->errors, RX_PACKET_ERRORS_FRAME_POS, RX_PACKET_ERRORS_FRAME_LEN, 1); } XLGMAC_PR("%s - descriptor=%u (cur=%d)\n", channel->name, ring->cur & (ring->dma_desc_count - 1), ring->cur); return 0; } static int xlgmac_enable_int(struct xlgmac_channel *channel, enum xlgmac_int int_id) { unsigned int dma_ch_ier; dma_ch_ier = readl(XLGMAC_DMA_REG(channel, DMA_CH_IER)); switch (int_id) { case XLGMAC_INT_DMA_CH_SR_TI: dma_ch_ier = XLGMAC_SET_REG_BITS( dma_ch_ier, DMA_CH_IER_TIE_POS, DMA_CH_IER_TIE_LEN, 1); break; case XLGMAC_INT_DMA_CH_SR_TPS: dma_ch_ier = XLGMAC_SET_REG_BITS( dma_ch_ier, DMA_CH_IER_TXSE_POS, DMA_CH_IER_TXSE_LEN, 1); break; case XLGMAC_INT_DMA_CH_SR_TBU: dma_ch_ier = XLGMAC_SET_REG_BITS( dma_ch_ier, DMA_CH_IER_TBUE_POS, DMA_CH_IER_TBUE_LEN, 1); break; case XLGMAC_INT_DMA_CH_SR_RI: dma_ch_ier = XLGMAC_SET_REG_BITS( dma_ch_ier, DMA_CH_IER_RIE_POS, DMA_CH_IER_RIE_LEN, 1); break; case XLGMAC_INT_DMA_CH_SR_RBU: dma_ch_ier = XLGMAC_SET_REG_BITS( dma_ch_ier, DMA_CH_IER_RBUE_POS, DMA_CH_IER_RBUE_LEN, 1); break; case XLGMAC_INT_DMA_CH_SR_RPS: dma_ch_ier = XLGMAC_SET_REG_BITS( dma_ch_ier, DMA_CH_IER_RSE_POS, DMA_CH_IER_RSE_LEN, 1); break; case XLGMAC_INT_DMA_CH_SR_TI_RI: dma_ch_ier = XLGMAC_SET_REG_BITS( dma_ch_ier, DMA_CH_IER_TIE_POS, DMA_CH_IER_TIE_LEN, 1); dma_ch_ier = XLGMAC_SET_REG_BITS( dma_ch_ier, DMA_CH_IER_RIE_POS, DMA_CH_IER_RIE_LEN, 1); break; case XLGMAC_INT_DMA_CH_SR_FBE: dma_ch_ier = XLGMAC_SET_REG_BITS( dma_ch_ier, DMA_CH_IER_FBEE_POS, DMA_CH_IER_FBEE_LEN, 1); break; case XLGMAC_INT_DMA_ALL: dma_ch_ier |= channel->saved_ier; break; default: return -1; } writel(dma_ch_ier, XLGMAC_DMA_REG(channel, DMA_CH_IER)); return 0; } static int xlgmac_disable_int(struct xlgmac_channel *channel, enum xlgmac_int int_id) { unsigned int dma_ch_ier; dma_ch_ier = readl(XLGMAC_DMA_REG(channel, DMA_CH_IER)); switch (int_id) { case XLGMAC_INT_DMA_CH_SR_TI: dma_ch_ier = XLGMAC_SET_REG_BITS( dma_ch_ier, DMA_CH_IER_TIE_POS, DMA_CH_IER_TIE_LEN, 0); break; case XLGMAC_INT_DMA_CH_SR_TPS: dma_ch_ier = XLGMAC_SET_REG_BITS( dma_ch_ier, DMA_CH_IER_TXSE_POS, DMA_CH_IER_TXSE_LEN, 0); break; case XLGMAC_INT_DMA_CH_SR_TBU: dma_ch_ier = XLGMAC_SET_REG_BITS( dma_ch_ier, DMA_CH_IER_TBUE_POS, DMA_CH_IER_TBUE_LEN, 0); break; case XLGMAC_INT_DMA_CH_SR_RI: dma_ch_ier = XLGMAC_SET_REG_BITS( dma_ch_ier, DMA_CH_IER_RIE_POS, DMA_CH_IER_RIE_LEN, 0); break; case XLGMAC_INT_DMA_CH_SR_RBU: dma_ch_ier = XLGMAC_SET_REG_BITS( dma_ch_ier, DMA_CH_IER_RBUE_POS, DMA_CH_IER_RBUE_LEN, 0); break; case XLGMAC_INT_DMA_CH_SR_RPS: dma_ch_ier = XLGMAC_SET_REG_BITS( dma_ch_ier, DMA_CH_IER_RSE_POS, DMA_CH_IER_RSE_LEN, 0); break; case XLGMAC_INT_DMA_CH_SR_TI_RI: dma_ch_ier = XLGMAC_SET_REG_BITS( dma_ch_ier, DMA_CH_IER_TIE_POS, DMA_CH_IER_TIE_LEN, 0); dma_ch_ier = XLGMAC_SET_REG_BITS( dma_ch_ier, DMA_CH_IER_RIE_POS, DMA_CH_IER_RIE_LEN, 0); break; case XLGMAC_INT_DMA_CH_SR_FBE: dma_ch_ier = XLGMAC_SET_REG_BITS( dma_ch_ier, DMA_CH_IER_FBEE_POS, DMA_CH_IER_FBEE_LEN, 0); break; case XLGMAC_INT_DMA_ALL: channel->saved_ier = dma_ch_ier & XLGMAC_DMA_INTERRUPT_MASK; dma_ch_ier &= ~XLGMAC_DMA_INTERRUPT_MASK; break; default: return -1; } writel(dma_ch_ier, XLGMAC_DMA_REG(channel, DMA_CH_IER)); return 0; } static int xlgmac_flush_tx_queues(struct xlgmac_pdata *pdata) { unsigned int i, count; u32 regval; for (i = 0; i < pdata->tx_q_count; i++) { regval = readl(XLGMAC_MTL_REG(pdata, i, MTL_Q_TQOMR)); regval = XLGMAC_SET_REG_BITS(regval, MTL_Q_TQOMR_FTQ_POS, MTL_Q_TQOMR_FTQ_LEN, 1); writel(regval, XLGMAC_MTL_REG(pdata, i, MTL_Q_TQOMR)); } /* Poll Until Poll Condition */ for (i = 0; i < pdata->tx_q_count; i++) { count = 2000; regval = readl(XLGMAC_MTL_REG(pdata, i, MTL_Q_TQOMR)); regval = XLGMAC_GET_REG_BITS(regval, MTL_Q_TQOMR_FTQ_POS, MTL_Q_TQOMR_FTQ_LEN); while (--count && regval) usleep_range(500, 600); if (!count) return -EBUSY; } return 0; } static void xlgmac_config_dma_bus(struct xlgmac_pdata *pdata) { u32 regval; regval = readl(pdata->mac_regs + DMA_SBMR); /* Set enhanced addressing mode */ regval = XLGMAC_SET_REG_BITS(regval, DMA_SBMR_EAME_POS, DMA_SBMR_EAME_LEN, 1); /* Set the System Bus mode */ regval = XLGMAC_SET_REG_BITS(regval, DMA_SBMR_UNDEF_POS, DMA_SBMR_UNDEF_LEN, 1); regval = XLGMAC_SET_REG_BITS(regval, DMA_SBMR_BLEN_256_POS, DMA_SBMR_BLEN_256_LEN, 1); writel(regval, pdata->mac_regs + DMA_SBMR); } static int xlgmac_hw_init(struct xlgmac_pdata *pdata) { struct xlgmac_desc_ops *desc_ops = &pdata->desc_ops; int ret; /* Flush Tx queues */ ret = xlgmac_flush_tx_queues(pdata); if (ret) return ret; /* Initialize DMA related features */ xlgmac_config_dma_bus(pdata); xlgmac_config_osp_mode(pdata); xlgmac_config_pblx8(pdata); xlgmac_config_tx_pbl_val(pdata); xlgmac_config_rx_pbl_val(pdata); xlgmac_config_rx_coalesce(pdata); xlgmac_config_tx_coalesce(pdata); xlgmac_config_rx_buffer_size(pdata); xlgmac_config_tso_mode(pdata); xlgmac_config_sph_mode(pdata); xlgmac_config_rss(pdata); desc_ops->tx_desc_init(pdata); desc_ops->rx_desc_init(pdata); xlgmac_enable_dma_interrupts(pdata); /* Initialize MTL related features */ xlgmac_config_mtl_mode(pdata); xlgmac_config_queue_mapping(pdata); xlgmac_config_tsf_mode(pdata, pdata->tx_sf_mode); xlgmac_config_rsf_mode(pdata, pdata->rx_sf_mode); xlgmac_config_tx_threshold(pdata, pdata->tx_threshold); xlgmac_config_rx_threshold(pdata, pdata->rx_threshold); xlgmac_config_tx_fifo_size(pdata); xlgmac_config_rx_fifo_size(pdata); xlgmac_config_flow_control_threshold(pdata); xlgmac_config_rx_fep_enable(pdata); xlgmac_config_rx_fup_enable(pdata); xlgmac_enable_mtl_interrupts(pdata); /* Initialize MAC related features */ xlgmac_config_mac_address(pdata); xlgmac_config_rx_mode(pdata); xlgmac_config_jumbo_enable(pdata); xlgmac_config_flow_control(pdata); xlgmac_config_mac_speed(pdata); xlgmac_config_checksum_offload(pdata); xlgmac_config_vlan_support(pdata); xlgmac_config_mmc(pdata); xlgmac_enable_mac_interrupts(pdata); return 0; } static int xlgmac_hw_exit(struct xlgmac_pdata *pdata) { unsigned int count = 2000; u32 regval; /* Issue a software reset */ regval = readl(pdata->mac_regs + DMA_MR); regval = XLGMAC_SET_REG_BITS(regval, DMA_MR_SWR_POS, DMA_MR_SWR_LEN, 1); writel(regval, pdata->mac_regs + DMA_MR); usleep_range(10, 15); /* Poll Until Poll Condition */ while (--count && XLGMAC_GET_REG_BITS(readl(pdata->mac_regs + DMA_MR), DMA_MR_SWR_POS, DMA_MR_SWR_LEN)) usleep_range(500, 600); if (!count) return -EBUSY; return 0; } void xlgmac_init_hw_ops(struct xlgmac_hw_ops *hw_ops) { hw_ops->init = xlgmac_hw_init; hw_ops->exit = xlgmac_hw_exit; hw_ops->tx_complete = xlgmac_tx_complete; hw_ops->enable_tx = xlgmac_enable_tx; hw_ops->disable_tx = xlgmac_disable_tx; hw_ops->enable_rx = xlgmac_enable_rx; hw_ops->disable_rx = xlgmac_disable_rx; hw_ops->dev_xmit = xlgmac_dev_xmit; hw_ops->dev_read = xlgmac_dev_read; hw_ops->enable_int = xlgmac_enable_int; hw_ops->disable_int = xlgmac_disable_int; hw_ops->set_mac_address = xlgmac_set_mac_address; hw_ops->config_rx_mode = xlgmac_config_rx_mode; hw_ops->enable_rx_csum = xlgmac_enable_rx_csum; hw_ops->disable_rx_csum = xlgmac_disable_rx_csum; /* For MII speed configuration */ hw_ops->set_xlgmii_25000_speed = xlgmac_set_xlgmii_25000_speed; hw_ops->set_xlgmii_40000_speed = xlgmac_set_xlgmii_40000_speed; hw_ops->set_xlgmii_50000_speed = xlgmac_set_xlgmii_50000_speed; hw_ops->set_xlgmii_100000_speed = xlgmac_set_xlgmii_100000_speed; /* For descriptor related operation */ hw_ops->tx_desc_init = xlgmac_tx_desc_init; hw_ops->rx_desc_init = xlgmac_rx_desc_init; hw_ops->tx_desc_reset = xlgmac_tx_desc_reset; hw_ops->rx_desc_reset = xlgmac_rx_desc_reset; hw_ops->is_last_desc = xlgmac_is_last_desc; hw_ops->is_context_desc = xlgmac_is_context_desc; hw_ops->tx_start_xmit = xlgmac_tx_start_xmit; /* For Flow Control */ hw_ops->config_tx_flow_control = xlgmac_config_tx_flow_control; hw_ops->config_rx_flow_control = xlgmac_config_rx_flow_control; /* For Vlan related config */ hw_ops->enable_rx_vlan_stripping = xlgmac_enable_rx_vlan_stripping; hw_ops->disable_rx_vlan_stripping = xlgmac_disable_rx_vlan_stripping; hw_ops->enable_rx_vlan_filtering = xlgmac_enable_rx_vlan_filtering; hw_ops->disable_rx_vlan_filtering = xlgmac_disable_rx_vlan_filtering; hw_ops->update_vlan_hash_table = xlgmac_update_vlan_hash_table; /* For RX coalescing */ hw_ops->config_rx_coalesce = xlgmac_config_rx_coalesce; hw_ops->config_tx_coalesce = xlgmac_config_tx_coalesce; hw_ops->usec_to_riwt = xlgmac_usec_to_riwt; hw_ops->riwt_to_usec = xlgmac_riwt_to_usec; /* For RX and TX threshold config */ hw_ops->config_rx_threshold = xlgmac_config_rx_threshold; hw_ops->config_tx_threshold = xlgmac_config_tx_threshold; /* For RX and TX Store and Forward Mode config */ hw_ops->config_rsf_mode = xlgmac_config_rsf_mode; hw_ops->config_tsf_mode = xlgmac_config_tsf_mode; /* For TX DMA Operating on Second Frame config */ hw_ops->config_osp_mode = xlgmac_config_osp_mode; /* For RX and TX PBL config */ hw_ops->config_rx_pbl_val = xlgmac_config_rx_pbl_val; hw_ops->get_rx_pbl_val = xlgmac_get_rx_pbl_val; hw_ops->config_tx_pbl_val = xlgmac_config_tx_pbl_val; hw_ops->get_tx_pbl_val = xlgmac_get_tx_pbl_val; hw_ops->config_pblx8 = xlgmac_config_pblx8; /* For MMC statistics support */ hw_ops->tx_mmc_int = xlgmac_tx_mmc_int; hw_ops->rx_mmc_int = xlgmac_rx_mmc_int; hw_ops->read_mmc_stats = xlgmac_read_mmc_stats; /* For Receive Side Scaling */ hw_ops->enable_rss = xlgmac_enable_rss; hw_ops->disable_rss = xlgmac_disable_rss; hw_ops->set_rss_hash_key = xlgmac_set_rss_hash_key; hw_ops->set_rss_lookup_table = xlgmac_set_rss_lookup_table; }
linux-master
drivers/net/ethernet/synopsys/dwc-xlgmac-hw.c
/* Synopsys DesignWare Core Enterprise Ethernet (XLGMAC) Driver * * Copyright (c) 2017 Synopsys, Inc. (www.synopsys.com) * * This program is dual-licensed; you may select either version 2 of * the GNU General Public License ("GPL") or BSD license ("BSD"). * * This Synopsys DWC XLGMAC software driver and associated documentation * (hereinafter the "Software") is an unsupported proprietary work of * Synopsys, Inc. unless otherwise expressly agreed to in writing between * Synopsys and you. The Software IS NOT an item of Licensed Software or a * Licensed Product under any End User Software License Agreement or * Agreement for Licensed Products with Synopsys or any supplement thereto. * Synopsys is a registered trademark of Synopsys, Inc. Other names included * in the SOFTWARE may be the trademarks of their respective owners. */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/pci.h> #include "dwc-xlgmac.h" #include "dwc-xlgmac-reg.h" static int xlgmac_probe(struct pci_dev *pcidev, const struct pci_device_id *id) { struct device *dev = &pcidev->dev; struct xlgmac_resources res; int i, ret; ret = pcim_enable_device(pcidev); if (ret) { dev_err(dev, "ERROR: failed to enable device\n"); return ret; } for (i = 0; i < PCI_STD_NUM_BARS; i++) { if (pci_resource_len(pcidev, i) == 0) continue; ret = pcim_iomap_regions(pcidev, BIT(i), XLGMAC_DRV_NAME); if (ret) return ret; break; } pci_set_master(pcidev); memset(&res, 0, sizeof(res)); res.irq = pcidev->irq; res.addr = pcim_iomap_table(pcidev)[i]; return xlgmac_drv_probe(&pcidev->dev, &res); } static void xlgmac_remove(struct pci_dev *pcidev) { xlgmac_drv_remove(&pcidev->dev); } static const struct pci_device_id xlgmac_pci_tbl[] = { { PCI_DEVICE(PCI_VENDOR_ID_SYNOPSYS, 0x7302) }, { 0 } }; MODULE_DEVICE_TABLE(pci, xlgmac_pci_tbl); static struct pci_driver xlgmac_pci_driver = { .name = XLGMAC_DRV_NAME, .id_table = xlgmac_pci_tbl, .probe = xlgmac_probe, .remove = xlgmac_remove, }; module_pci_driver(xlgmac_pci_driver); MODULE_DESCRIPTION(XLGMAC_DRV_DESC); MODULE_VERSION(XLGMAC_DRV_VERSION); MODULE_AUTHOR("Jie Deng <[email protected]>"); MODULE_LICENSE("Dual BSD/GPL");
linux-master
drivers/net/ethernet/synopsys/dwc-xlgmac-pci.c
/* [xirc2ps_cs.c wk 03.11.99] (1.40 1999/11/18 00:06:03) * Xircom CreditCard Ethernet Adapter IIps driver * Xircom Realport 10/100 (RE-100) driver * * This driver supports various Xircom CreditCard Ethernet adapters * including the CE2, CE IIps, RE-10, CEM28, CEM33, CE33, CEM56, * CE3-100, CE3B, RE-100, REM10BT, and REM56G-100. * * 2000-09-24 <[email protected]> The Xircom CE3B-100 may not * autodetect the media properly. In this case use the * if_port=1 (for 10BaseT) or if_port=4 (for 100BaseT) options * to force the media type. * * Written originally by Werner Koch based on David Hinds' skeleton of the * PCMCIA driver. * * Copyright (c) 1997,1998 Werner Koch (dd9jn) * * This driver is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * It is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, see <http://www.gnu.org/licenses/>. * * * ALTERNATIVELY, this driver may be distributed under the terms of * the following license, in which case the provisions of this license * are required INSTEAD OF the GNU General Public License. (This clause * is necessary due to a potential bad interaction between the GPL and * the restrictions contained in a BSD-style copyright.) * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, and the entire permission notice in its entirety, * including the disclaimer of warranties. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. The name of the author may not be used to endorse or promote * products derived from this software without specific prior * written permission. * * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED * OF THE POSSIBILITY OF SUCH DAMAGE. */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/module.h> #include <linux/kernel.h> #include <linux/init.h> #include <linux/ptrace.h> #include <linux/slab.h> #include <linux/string.h> #include <linux/timer.h> #include <linux/interrupt.h> #include <linux/in.h> #include <linux/delay.h> #include <linux/ethtool.h> #include <linux/netdevice.h> #include <linux/etherdevice.h> #include <linux/skbuff.h> #include <linux/if_arp.h> #include <linux/ioport.h> #include <linux/bitops.h> #include <linux/mii.h> #include <pcmcia/cistpl.h> #include <pcmcia/cisreg.h> #include <pcmcia/ciscode.h> #include <asm/io.h> #include <linux/uaccess.h> #ifndef MANFID_COMPAQ #define MANFID_COMPAQ 0x0138 #define MANFID_COMPAQ2 0x0183 /* is this correct? */ #endif #include <pcmcia/ds.h> /* Time in jiffies before concluding Tx hung */ #define TX_TIMEOUT ((400*HZ)/1000) /**************** * Some constants used to access the hardware */ /* Register offsets and value constans */ #define XIRCREG_CR 0 /* Command register (wr) */ enum xirc_cr { TransmitPacket = 0x01, SoftReset = 0x02, EnableIntr = 0x04, ForceIntr = 0x08, ClearTxFIFO = 0x10, ClearRxOvrun = 0x20, RestartTx = 0x40 }; #define XIRCREG_ESR 0 /* Ethernet status register (rd) */ enum xirc_esr { FullPktRcvd = 0x01, /* full packet in receive buffer */ PktRejected = 0x04, /* a packet has been rejected */ TxPktPend = 0x08, /* TX Packet Pending */ IncorPolarity = 0x10, MediaSelect = 0x20 /* set if TP, clear if AUI */ }; #define XIRCREG_PR 1 /* Page Register select */ #define XIRCREG_EDP 4 /* Ethernet Data Port Register */ #define XIRCREG_ISR 6 /* Ethernet Interrupt Status Register */ enum xirc_isr { TxBufOvr = 0x01, /* TX Buffer Overflow */ PktTxed = 0x02, /* Packet Transmitted */ MACIntr = 0x04, /* MAC Interrupt occurred */ TxResGrant = 0x08, /* Tx Reservation Granted */ RxFullPkt = 0x20, /* Rx Full Packet */ RxPktRej = 0x40, /* Rx Packet Rejected */ ForcedIntr= 0x80 /* Forced Interrupt */ }; #define XIRCREG1_IMR0 12 /* Ethernet Interrupt Mask Register (on page 1)*/ #define XIRCREG1_IMR1 13 #define XIRCREG0_TSO 8 /* Transmit Space Open Register (on page 0)*/ #define XIRCREG0_TRS 10 /* Transmit reservation Size Register (page 0)*/ #define XIRCREG0_DO 12 /* Data Offset Register (page 0) (wr) */ #define XIRCREG0_RSR 12 /* Receive Status Register (page 0) (rd) */ enum xirc_rsr { PhyPkt = 0x01, /* set:physical packet, clear: multicast packet */ BrdcstPkt = 0x02, /* set if it is a broadcast packet */ PktTooLong = 0x04, /* set if packet length > 1518 */ AlignErr = 0x10, /* incorrect CRC and last octet not complete */ CRCErr = 0x20, /* incorrect CRC and last octet is complete */ PktRxOk = 0x80 /* received ok */ }; #define XIRCREG0_PTR 13 /* packets transmitted register (rd) */ #define XIRCREG0_RBC 14 /* receive byte count regsister (rd) */ #define XIRCREG1_ECR 14 /* ethernet configurationn register */ enum xirc_ecr { FullDuplex = 0x04, /* enable full duplex mode */ LongTPMode = 0x08, /* adjust for longer lengths of TP cable */ DisablePolCor = 0x10,/* disable auto polarity correction */ DisableLinkPulse = 0x20, /* disable link pulse generation */ DisableAutoTx = 0x40, /* disable auto-transmit */ }; #define XIRCREG2_RBS 8 /* receive buffer start register */ #define XIRCREG2_LED 10 /* LED Configuration register */ /* values for the leds: Bits 2-0 for led 1 * 0 disabled Bits 5-3 for led 2 * 1 collision * 2 noncollision * 3 link_detected * 4 incor_polarity * 5 jabber * 6 auto_assertion * 7 rx_tx_activity */ #define XIRCREG2_MSR 12 /* Mohawk specific register */ #define XIRCREG4_GPR0 8 /* General Purpose Register 0 */ #define XIRCREG4_GPR1 9 /* General Purpose Register 1 */ #define XIRCREG2_GPR2 13 /* General Purpose Register 2 (page2!)*/ #define XIRCREG4_BOV 10 /* Bonding Version Register */ #define XIRCREG4_LMA 12 /* Local Memory Address Register */ #define XIRCREG4_LMD 14 /* Local Memory Data Port */ /* MAC register can only by accessed with 8 bit operations */ #define XIRCREG40_CMD0 8 /* Command Register (wr) */ enum xirc_cmd { /* Commands */ Transmit = 0x01, EnableRecv = 0x04, DisableRecv = 0x08, Abort = 0x10, Online = 0x20, IntrAck = 0x40, Offline = 0x80 }; #define XIRCREG5_RHSA0 10 /* Rx Host Start Address */ #define XIRCREG40_RXST0 9 /* Receive Status Register */ #define XIRCREG40_TXST0 11 /* Transmit Status Register 0 */ #define XIRCREG40_TXST1 12 /* Transmit Status Register 10 */ #define XIRCREG40_RMASK0 13 /* Receive Mask Register */ #define XIRCREG40_TMASK0 14 /* Transmit Mask Register 0 */ #define XIRCREG40_TMASK1 15 /* Transmit Mask Register 0 */ #define XIRCREG42_SWC0 8 /* Software Configuration 0 */ #define XIRCREG42_SWC1 9 /* Software Configuration 1 */ #define XIRCREG42_BOC 10 /* Back-Off Configuration */ #define XIRCREG44_TDR0 8 /* Time Domain Reflectometry 0 */ #define XIRCREG44_TDR1 9 /* Time Domain Reflectometry 1 */ #define XIRCREG44_RXBC_LO 10 /* Rx Byte Count 0 (rd) */ #define XIRCREG44_RXBC_HI 11 /* Rx Byte Count 1 (rd) */ #define XIRCREG45_REV 15 /* Revision Register (rd) */ #define XIRCREG50_IA 8 /* Individual Address (8-13) */ static const char *if_names[] = { "Auto", "10BaseT", "10Base2", "AUI", "100BaseT" }; /* card types */ #define XIR_UNKNOWN 0 /* unknown: not supported */ #define XIR_CE 1 /* (prodid 1) different hardware: not supported */ #define XIR_CE2 2 /* (prodid 2) */ #define XIR_CE3 3 /* (prodid 3) */ #define XIR_CEM 4 /* (prodid 1) different hardware: not supported */ #define XIR_CEM2 5 /* (prodid 2) */ #define XIR_CEM3 6 /* (prodid 3) */ #define XIR_CEM33 7 /* (prodid 4) */ #define XIR_CEM56M 8 /* (prodid 5) */ #define XIR_CEM56 9 /* (prodid 6) */ #define XIR_CM28 10 /* (prodid 3) modem only: not supported here */ #define XIR_CM33 11 /* (prodid 4) modem only: not supported here */ #define XIR_CM56 12 /* (prodid 5) modem only: not supported here */ #define XIR_CG 13 /* (prodid 1) GSM modem only: not supported */ #define XIR_CBE 14 /* (prodid 1) cardbus ethernet: not supported */ /*====================================================================*/ /* Module parameters */ MODULE_DESCRIPTION("Xircom PCMCIA ethernet driver"); MODULE_LICENSE("Dual MPL/GPL"); #define INT_MODULE_PARM(n, v) static int n = v; module_param(n, int, 0) INT_MODULE_PARM(if_port, 0); INT_MODULE_PARM(full_duplex, 0); INT_MODULE_PARM(do_sound, 1); INT_MODULE_PARM(lockup_hack, 0); /* anti lockup hack */ /*====================================================================*/ /* We do not process more than these number of bytes during one * interrupt. (Of course we receive complete packets, so this is not * an exact value). * Something between 2000..22000; first value gives best interrupt latency, * the second enables the usage of the complete on-chip buffer. We use the * high value as the initial value. */ static unsigned maxrx_bytes = 22000; /* MII management prototypes */ static void mii_idle(unsigned int ioaddr); static void mii_putbit(unsigned int ioaddr, unsigned data); static int mii_getbit(unsigned int ioaddr); static void mii_wbits(unsigned int ioaddr, unsigned data, int len); static unsigned mii_rd(unsigned int ioaddr, u_char phyaddr, u_char phyreg); static void mii_wr(unsigned int ioaddr, u_char phyaddr, u_char phyreg, unsigned data, int len); static int has_ce2_string(struct pcmcia_device * link); static int xirc2ps_config(struct pcmcia_device * link); static void xirc2ps_release(struct pcmcia_device * link); static void xirc2ps_detach(struct pcmcia_device *p_dev); static irqreturn_t xirc2ps_interrupt(int irq, void *dev_id); struct local_info { struct net_device *dev; struct pcmcia_device *p_dev; int card_type; int probe_port; int silicon; /* silicon revision. 0=old CE2, 1=Scipper, 4=Mohawk */ int mohawk; /* a CE3 type card */ int dingo; /* a CEM56 type card */ int new_mii; /* has full 10baseT/100baseT MII */ int modem; /* is a multi function card (i.e with a modem) */ void __iomem *dingo_ccr; /* only used for CEM56 cards */ unsigned last_ptr_value; /* last packets transmitted value */ const char *manf_str; struct work_struct tx_timeout_task; }; /**************** * Some more prototypes */ static netdev_tx_t do_start_xmit(struct sk_buff *skb, struct net_device *dev); static void xirc_tx_timeout(struct net_device *dev, unsigned int txqueue); static void xirc2ps_tx_timeout_task(struct work_struct *work); static void set_addresses(struct net_device *dev); static void set_multicast_list(struct net_device *dev); static int set_card_type(struct pcmcia_device *link); static int do_config(struct net_device *dev, struct ifmap *map); static int do_open(struct net_device *dev); static int do_ioctl(struct net_device *dev, struct ifreq *rq, int cmd); static const struct ethtool_ops netdev_ethtool_ops; static void hardreset(struct net_device *dev); static void do_reset(struct net_device *dev, int full); static int init_mii(struct net_device *dev); static void do_powerdown(struct net_device *dev); static int do_stop(struct net_device *dev); /*=============== Helper functions =========================*/ #define SelectPage(pgnr) outb((pgnr), ioaddr + XIRCREG_PR) #define GetByte(reg) ((unsigned)inb(ioaddr + (reg))) #define GetWord(reg) ((unsigned)inw(ioaddr + (reg))) #define PutByte(reg,value) outb((value), ioaddr+(reg)) #define PutWord(reg,value) outw((value), ioaddr+(reg)) /*====== Functions used for debugging =================================*/ #if 0 /* reading regs may change system status */ static void PrintRegisters(struct net_device *dev) { unsigned int ioaddr = dev->base_addr; if (pc_debug > 1) { int i, page; printk(KERN_DEBUG pr_fmt("Register common: ")); for (i = 0; i < 8; i++) pr_cont(" %2.2x", GetByte(i)); pr_cont("\n"); for (page = 0; page <= 8; page++) { printk(KERN_DEBUG pr_fmt("Register page %2x: "), page); SelectPage(page); for (i = 8; i < 16; i++) pr_cont(" %2.2x", GetByte(i)); pr_cont("\n"); } for (page=0x40 ; page <= 0x5f; page++) { if (page == 0x43 || (page >= 0x46 && page <= 0x4f) || (page >= 0x51 && page <=0x5e)) continue; printk(KERN_DEBUG pr_fmt("Register page %2x: "), page); SelectPage(page); for (i = 8; i < 16; i++) pr_cont(" %2.2x", GetByte(i)); pr_cont("\n"); } } } #endif /* 0 */ /*============== MII Management functions ===============*/ /**************** * Turn around for read */ static void mii_idle(unsigned int ioaddr) { PutByte(XIRCREG2_GPR2, 0x04|0); /* drive MDCK low */ udelay(1); PutByte(XIRCREG2_GPR2, 0x04|1); /* and drive MDCK high */ udelay(1); } /**************** * Write a bit to MDI/O */ static void mii_putbit(unsigned int ioaddr, unsigned data) { #if 1 if (data) { PutByte(XIRCREG2_GPR2, 0x0c|2|0); /* set MDIO */ udelay(1); PutByte(XIRCREG2_GPR2, 0x0c|2|1); /* and drive MDCK high */ udelay(1); } else { PutByte(XIRCREG2_GPR2, 0x0c|0|0); /* clear MDIO */ udelay(1); PutByte(XIRCREG2_GPR2, 0x0c|0|1); /* and drive MDCK high */ udelay(1); } #else if (data) { PutWord(XIRCREG2_GPR2-1, 0x0e0e); udelay(1); PutWord(XIRCREG2_GPR2-1, 0x0f0f); udelay(1); } else { PutWord(XIRCREG2_GPR2-1, 0x0c0c); udelay(1); PutWord(XIRCREG2_GPR2-1, 0x0d0d); udelay(1); } #endif } /**************** * Get a bit from MDI/O */ static int mii_getbit(unsigned int ioaddr) { unsigned d; PutByte(XIRCREG2_GPR2, 4|0); /* drive MDCK low */ udelay(1); d = GetByte(XIRCREG2_GPR2); /* read MDIO */ PutByte(XIRCREG2_GPR2, 4|1); /* drive MDCK high again */ udelay(1); return d & 0x20; /* read MDIO */ } static void mii_wbits(unsigned int ioaddr, unsigned data, int len) { unsigned m = 1 << (len-1); for (; m; m >>= 1) mii_putbit(ioaddr, data & m); } static unsigned mii_rd(unsigned int ioaddr, u_char phyaddr, u_char phyreg) { int i; unsigned data=0, m; SelectPage(2); for (i=0; i < 32; i++) /* 32 bit preamble */ mii_putbit(ioaddr, 1); mii_wbits(ioaddr, 0x06, 4); /* Start and opcode for read */ mii_wbits(ioaddr, phyaddr, 5); /* PHY address to be accessed */ mii_wbits(ioaddr, phyreg, 5); /* PHY register to read */ mii_idle(ioaddr); /* turn around */ mii_getbit(ioaddr); for (m = 1<<15; m; m >>= 1) if (mii_getbit(ioaddr)) data |= m; mii_idle(ioaddr); return data; } static void mii_wr(unsigned int ioaddr, u_char phyaddr, u_char phyreg, unsigned data, int len) { int i; SelectPage(2); for (i=0; i < 32; i++) /* 32 bit preamble */ mii_putbit(ioaddr, 1); mii_wbits(ioaddr, 0x05, 4); /* Start and opcode for write */ mii_wbits(ioaddr, phyaddr, 5); /* PHY address to be accessed */ mii_wbits(ioaddr, phyreg, 5); /* PHY Register to write */ mii_putbit(ioaddr, 1); /* turn around */ mii_putbit(ioaddr, 0); mii_wbits(ioaddr, data, len); /* And write the data */ mii_idle(ioaddr); } /*============= Main bulk of functions =========================*/ static const struct net_device_ops netdev_ops = { .ndo_open = do_open, .ndo_stop = do_stop, .ndo_start_xmit = do_start_xmit, .ndo_tx_timeout = xirc_tx_timeout, .ndo_set_config = do_config, .ndo_eth_ioctl = do_ioctl, .ndo_set_rx_mode = set_multicast_list, .ndo_set_mac_address = eth_mac_addr, .ndo_validate_addr = eth_validate_addr, }; static int xirc2ps_probe(struct pcmcia_device *link) { struct net_device *dev; struct local_info *local; dev_dbg(&link->dev, "attach()\n"); /* Allocate the device structure */ dev = alloc_etherdev(sizeof(struct local_info)); if (!dev) return -ENOMEM; local = netdev_priv(dev); local->dev = dev; local->p_dev = link; link->priv = dev; /* General socket configuration */ link->config_index = 1; /* Fill in card specific entries */ dev->netdev_ops = &netdev_ops; dev->ethtool_ops = &netdev_ethtool_ops; dev->watchdog_timeo = TX_TIMEOUT; INIT_WORK(&local->tx_timeout_task, xirc2ps_tx_timeout_task); return xirc2ps_config(link); } /* xirc2ps_attach */ static void xirc2ps_detach(struct pcmcia_device *link) { struct net_device *dev = link->priv; struct local_info *local = netdev_priv(dev); netif_carrier_off(dev); netif_tx_disable(dev); cancel_work_sync(&local->tx_timeout_task); dev_dbg(&link->dev, "detach\n"); unregister_netdev(dev); xirc2ps_release(link); free_netdev(dev); } /* xirc2ps_detach */ /**************** * Detect the type of the card. s is the buffer with the data of tuple 0x20 * Returns: 0 := not supported * mediaid=11 and prodid=47 * Media-Id bits: * Ethernet 0x01 * Tokenring 0x02 * Arcnet 0x04 * Wireless 0x08 * Modem 0x10 * GSM only 0x20 * Prod-Id bits: * Pocket 0x10 * External 0x20 * Creditcard 0x40 * Cardbus 0x80 * */ static int set_card_type(struct pcmcia_device *link) { struct net_device *dev = link->priv; struct local_info *local = netdev_priv(dev); u8 *buf; unsigned int cisrev, mediaid, prodid; size_t len; len = pcmcia_get_tuple(link, CISTPL_MANFID, &buf); if (len < 5) { dev_err(&link->dev, "invalid CIS -- sorry\n"); return 0; } cisrev = buf[2]; mediaid = buf[3]; prodid = buf[4]; dev_dbg(&link->dev, "cisrev=%02x mediaid=%02x prodid=%02x\n", cisrev, mediaid, prodid); local->mohawk = 0; local->dingo = 0; local->modem = 0; local->card_type = XIR_UNKNOWN; if (!(prodid & 0x40)) { pr_notice("Oops: Not a creditcard\n"); return 0; } if (!(mediaid & 0x01)) { pr_notice("Not an Ethernet card\n"); return 0; } if (mediaid & 0x10) { local->modem = 1; switch(prodid & 15) { case 1: local->card_type = XIR_CEM ; break; case 2: local->card_type = XIR_CEM2 ; break; case 3: local->card_type = XIR_CEM3 ; break; case 4: local->card_type = XIR_CEM33 ; break; case 5: local->card_type = XIR_CEM56M; local->mohawk = 1; break; case 6: case 7: /* 7 is the RealPort 10/56 */ local->card_type = XIR_CEM56 ; local->mohawk = 1; local->dingo = 1; break; } } else { switch(prodid & 15) { case 1: local->card_type = has_ce2_string(link)? XIR_CE2 : XIR_CE ; break; case 2: local->card_type = XIR_CE2; break; case 3: local->card_type = XIR_CE3; local->mohawk = 1; break; } } if (local->card_type == XIR_CE || local->card_type == XIR_CEM) { pr_notice("Sorry, this is an old CE card\n"); return 0; } if (local->card_type == XIR_UNKNOWN) pr_notice("unknown card (mediaid=%02x prodid=%02x)\n", mediaid, prodid); return 1; } /**************** * There are some CE2 cards out which claim to be a CE card. * This function looks for a "CE2" in the 3rd version field. * Returns: true if this is a CE2 */ static int has_ce2_string(struct pcmcia_device * p_dev) { if (p_dev->prod_id[2] && strstr(p_dev->prod_id[2], "CE2")) return 1; return 0; } static int xirc2ps_config_modem(struct pcmcia_device *p_dev, void *priv_data) { unsigned int ioaddr; if ((p_dev->resource[0]->start & 0xf) == 8) return -ENODEV; p_dev->resource[0]->end = 16; p_dev->resource[1]->end = 8; p_dev->resource[0]->flags &= ~IO_DATA_PATH_WIDTH; p_dev->resource[0]->flags |= IO_DATA_PATH_WIDTH_16; p_dev->resource[1]->flags &= ~IO_DATA_PATH_WIDTH; p_dev->resource[1]->flags |= IO_DATA_PATH_WIDTH_8; p_dev->io_lines = 10; p_dev->resource[1]->start = p_dev->resource[0]->start; for (ioaddr = 0x300; ioaddr < 0x400; ioaddr += 0x10) { p_dev->resource[0]->start = ioaddr; if (!pcmcia_request_io(p_dev)) return 0; } return -ENODEV; } static int xirc2ps_config_check(struct pcmcia_device *p_dev, void *priv_data) { int *pass = priv_data; resource_size_t tmp = p_dev->resource[1]->start; tmp += (*pass ? (p_dev->config_index & 0x20 ? -24 : 8) : (p_dev->config_index & 0x20 ? 8 : -24)); if ((p_dev->resource[0]->start & 0xf) == 8) return -ENODEV; p_dev->resource[0]->end = 18; p_dev->resource[1]->end = 8; p_dev->resource[0]->flags &= ~IO_DATA_PATH_WIDTH; p_dev->resource[0]->flags |= IO_DATA_PATH_WIDTH_16; p_dev->resource[1]->flags &= ~IO_DATA_PATH_WIDTH; p_dev->resource[1]->flags |= IO_DATA_PATH_WIDTH_8; p_dev->io_lines = 10; p_dev->resource[1]->start = p_dev->resource[0]->start; p_dev->resource[0]->start = tmp; return pcmcia_request_io(p_dev); } static int pcmcia_get_mac_ce(struct pcmcia_device *p_dev, tuple_t *tuple, void *priv) { struct net_device *dev = priv; if (tuple->TupleDataLen != 13) return -EINVAL; if ((tuple->TupleData[0] != 2) || (tuple->TupleData[1] != 1) || (tuple->TupleData[2] != 6)) return -EINVAL; /* another try (James Lehmer's CE2 version 4.1)*/ dev_addr_mod(dev, 2, &tuple->TupleData[2], 4); return 0; }; static int xirc2ps_config(struct pcmcia_device * link) { struct net_device *dev = link->priv; struct local_info *local = netdev_priv(dev); unsigned int ioaddr; int err; u8 *buf; size_t len; local->dingo_ccr = NULL; dev_dbg(&link->dev, "config\n"); /* Is this a valid card */ if (link->has_manf_id == 0) { pr_notice("manfid not found in CIS\n"); goto failure; } switch (link->manf_id) { case MANFID_XIRCOM: local->manf_str = "Xircom"; break; case MANFID_ACCTON: local->manf_str = "Accton"; break; case MANFID_COMPAQ: case MANFID_COMPAQ2: local->manf_str = "Compaq"; break; case MANFID_INTEL: local->manf_str = "Intel"; break; case MANFID_TOSHIBA: local->manf_str = "Toshiba"; break; default: pr_notice("Unknown Card Manufacturer ID: 0x%04x\n", (unsigned)link->manf_id); goto failure; } dev_dbg(&link->dev, "found %s card\n", local->manf_str); if (!set_card_type(link)) { pr_notice("this card is not supported\n"); goto failure; } /* get the ethernet address from the CIS */ err = pcmcia_get_mac_from_cis(link, dev); /* not found: try to get the node-id from tuple 0x89 */ if (err) { len = pcmcia_get_tuple(link, 0x89, &buf); /* data layout looks like tuple 0x22 */ if (buf && len == 8) { if (*buf == CISTPL_FUNCE_LAN_NODE_ID) dev_addr_mod(dev, 2, &buf[2], 4); else err = -1; } kfree(buf); } if (err) err = pcmcia_loop_tuple(link, CISTPL_FUNCE, pcmcia_get_mac_ce, dev); if (err) { pr_notice("node-id not found in CIS\n"); goto failure; } if (local->modem) { int pass; link->config_flags |= CONF_AUTO_SET_IO; if (local->dingo) { /* Take the Modem IO port from the CIS and scan for a free * Ethernet port */ if (!pcmcia_loop_config(link, xirc2ps_config_modem, NULL)) goto port_found; } else { /* We do 2 passes here: The first one uses the regular mapping and * the second tries again, thereby considering that the 32 ports are * mirrored every 32 bytes. Actually we use a mirrored port for * the Mako if (on the first pass) the COR bit 5 is set. */ for (pass=0; pass < 2; pass++) if (!pcmcia_loop_config(link, xirc2ps_config_check, &pass)) goto port_found; /* if special option: * try to configure as Ethernet only. * .... */ } pr_notice("no ports available\n"); } else { link->io_lines = 10; link->resource[0]->end = 16; link->resource[0]->flags |= IO_DATA_PATH_WIDTH_16; for (ioaddr = 0x300; ioaddr < 0x400; ioaddr += 0x10) { link->resource[0]->start = ioaddr; if (!(err = pcmcia_request_io(link))) goto port_found; } link->resource[0]->start = 0; /* let CS decide */ if ((err = pcmcia_request_io(link))) goto config_error; } port_found: /**************** * Now allocate an interrupt line. Note that this does not * actually assign a handler to the interrupt. */ if ((err=pcmcia_request_irq(link, xirc2ps_interrupt))) goto config_error; link->config_flags |= CONF_ENABLE_IRQ; if (do_sound) link->config_flags |= CONF_ENABLE_SPKR; if ((err = pcmcia_enable_device(link))) goto config_error; if (local->dingo) { /* Reset the modem's BAR to the correct value * This is necessary because in the RequestConfiguration call, * the base address of the ethernet port (BasePort1) is written * to the BAR registers of the modem. */ err = pcmcia_write_config_byte(link, CISREG_IOBASE_0, (u8) link->resource[1]->start & 0xff); if (err) goto config_error; err = pcmcia_write_config_byte(link, CISREG_IOBASE_1, (link->resource[1]->start >> 8) & 0xff); if (err) goto config_error; /* There is no config entry for the Ethernet part which * is at 0x0800. So we allocate a window into the attribute * memory and write direct to the CIS registers */ link->resource[2]->flags = WIN_DATA_WIDTH_8 | WIN_MEMORY_TYPE_AM | WIN_ENABLE; link->resource[2]->start = link->resource[2]->end = 0; if ((err = pcmcia_request_window(link, link->resource[2], 0))) goto config_error; local->dingo_ccr = ioremap(link->resource[2]->start, 0x1000) + 0x0800; if ((err = pcmcia_map_mem_page(link, link->resource[2], 0))) goto config_error; /* Setup the CCRs; there are no infos in the CIS about the Ethernet * part. */ writeb(0x47, local->dingo_ccr + CISREG_COR); ioaddr = link->resource[0]->start; writeb(ioaddr & 0xff , local->dingo_ccr + CISREG_IOBASE_0); writeb((ioaddr >> 8)&0xff , local->dingo_ccr + CISREG_IOBASE_1); #if 0 { u_char tmp; pr_info("ECOR:"); for (i=0; i < 7; i++) { tmp = readb(local->dingo_ccr + i*2); pr_cont(" %02x", tmp); } pr_cont("\n"); pr_info("DCOR:"); for (i=0; i < 4; i++) { tmp = readb(local->dingo_ccr + 0x20 + i*2); pr_cont(" %02x", tmp); } pr_cont("\n"); pr_info("SCOR:"); for (i=0; i < 10; i++) { tmp = readb(local->dingo_ccr + 0x40 + i*2); pr_cont(" %02x", tmp); } pr_cont("\n"); } #endif writeb(0x01, local->dingo_ccr + 0x20); writeb(0x0c, local->dingo_ccr + 0x22); writeb(0x00, local->dingo_ccr + 0x24); writeb(0x00, local->dingo_ccr + 0x26); writeb(0x00, local->dingo_ccr + 0x28); } /* The if_port symbol can be set when the module is loaded */ local->probe_port=0; if (!if_port) { local->probe_port = dev->if_port = 1; } else if ((if_port >= 1 && if_port <= 2) || (local->mohawk && if_port==4)) dev->if_port = if_port; else pr_notice("invalid if_port requested\n"); /* we can now register the device with the net subsystem */ dev->irq = link->irq; dev->base_addr = link->resource[0]->start; if (local->dingo) do_reset(dev, 1); /* a kludge to make the cem56 work */ SET_NETDEV_DEV(dev, &link->dev); if ((err=register_netdev(dev))) { pr_notice("register_netdev() failed\n"); goto config_error; } /* give some infos about the hardware */ netdev_info(dev, "%s: port %#3lx, irq %d, hwaddr %pM\n", local->manf_str, (u_long)dev->base_addr, (int)dev->irq, dev->dev_addr); return 0; config_error: xirc2ps_release(link); return -ENODEV; failure: return -ENODEV; } /* xirc2ps_config */ static void xirc2ps_release(struct pcmcia_device *link) { dev_dbg(&link->dev, "release\n"); if (link->resource[2]->end) { struct net_device *dev = link->priv; struct local_info *local = netdev_priv(dev); if (local->dingo) iounmap(local->dingo_ccr - 0x0800); } pcmcia_disable_device(link); } /* xirc2ps_release */ /*====================================================================*/ static int xirc2ps_suspend(struct pcmcia_device *link) { struct net_device *dev = link->priv; if (link->open) { netif_device_detach(dev); do_powerdown(dev); } return 0; } static int xirc2ps_resume(struct pcmcia_device *link) { struct net_device *dev = link->priv; if (link->open) { do_reset(dev,1); netif_device_attach(dev); } return 0; } /*====================================================================*/ /**************** * This is the Interrupt service route. */ static irqreturn_t xirc2ps_interrupt(int irq, void *dev_id) { struct net_device *dev = (struct net_device *)dev_id; struct local_info *lp = netdev_priv(dev); unsigned int ioaddr; u_char saved_page; unsigned bytes_rcvd; unsigned int_status, eth_status, rx_status, tx_status; unsigned rsr, pktlen; ulong start_ticks = jiffies; /* fixme: jiffies rollover every 497 days * is this something to worry about? * -- on a laptop? */ if (!netif_device_present(dev)) return IRQ_HANDLED; ioaddr = dev->base_addr; if (lp->mohawk) { /* must disable the interrupt */ PutByte(XIRCREG_CR, 0); } pr_debug("%s: interrupt %d at %#x.\n", dev->name, irq, ioaddr); saved_page = GetByte(XIRCREG_PR); /* Read the ISR to see whats the cause for the interrupt. * This also clears the interrupt flags on CE2 cards */ int_status = GetByte(XIRCREG_ISR); bytes_rcvd = 0; loop_entry: if (int_status == 0xff) { /* card may be ejected */ pr_debug("%s: interrupt %d for dead card\n", dev->name, irq); goto leave; } eth_status = GetByte(XIRCREG_ESR); SelectPage(0x40); rx_status = GetByte(XIRCREG40_RXST0); PutByte(XIRCREG40_RXST0, (~rx_status & 0xff)); tx_status = GetByte(XIRCREG40_TXST0); tx_status |= GetByte(XIRCREG40_TXST1) << 8; PutByte(XIRCREG40_TXST0, 0); PutByte(XIRCREG40_TXST1, 0); pr_debug("%s: ISR=%#2.2x ESR=%#2.2x RSR=%#2.2x TSR=%#4.4x\n", dev->name, int_status, eth_status, rx_status, tx_status); /***** receive section ******/ SelectPage(0); while (eth_status & FullPktRcvd) { rsr = GetByte(XIRCREG0_RSR); if (bytes_rcvd > maxrx_bytes && (rsr & PktRxOk)) { /* too many bytes received during this int, drop the rest of the * packets */ dev->stats.rx_dropped++; pr_debug("%s: RX drop, too much done\n", dev->name); } else if (rsr & PktRxOk) { struct sk_buff *skb; pktlen = GetWord(XIRCREG0_RBC); bytes_rcvd += pktlen; pr_debug("rsr=%#02x packet_length=%u\n", rsr, pktlen); /* 1 extra so we can use insw */ skb = netdev_alloc_skb(dev, pktlen + 3); if (!skb) { dev->stats.rx_dropped++; } else { /* okay get the packet */ skb_reserve(skb, 2); if (lp->silicon == 0 ) { /* work around a hardware bug */ unsigned rhsa; /* receive start address */ SelectPage(5); rhsa = GetWord(XIRCREG5_RHSA0); SelectPage(0); rhsa += 3; /* skip control infos */ if (rhsa >= 0x8000) rhsa = 0; if (rhsa + pktlen > 0x8000) { unsigned i; u_char *buf = skb_put(skb, pktlen); for (i=0; i < pktlen ; i++, rhsa++) { buf[i] = GetByte(XIRCREG_EDP); if (rhsa == 0x8000) { rhsa = 0; i--; } } } else { insw(ioaddr+XIRCREG_EDP, skb_put(skb, pktlen), (pktlen+1)>>1); } } #if 0 else if (lp->mohawk) { /* To use this 32 bit access we should use * a manual optimized loop * Also the words are swapped, we can get more * performance by using 32 bit access and swapping * the words in a register. Will need this for cardbus * * Note: don't forget to change the ALLOC_SKB to .. +3 */ unsigned i; u_long *p = skb_put(skb, pktlen); register u_long a; unsigned int edpreg = ioaddr+XIRCREG_EDP-2; for (i=0; i < len ; i += 4, p++) { a = inl(edpreg); __asm__("rorl $16,%0\n\t" :"=q" (a) : "0" (a)); *p = a; } } #endif else { insw(ioaddr+XIRCREG_EDP, skb_put(skb, pktlen), (pktlen+1)>>1); } skb->protocol = eth_type_trans(skb, dev); netif_rx(skb); dev->stats.rx_packets++; dev->stats.rx_bytes += pktlen; if (!(rsr & PhyPkt)) dev->stats.multicast++; } } else { /* bad packet */ pr_debug("rsr=%#02x\n", rsr); } if (rsr & PktTooLong) { dev->stats.rx_frame_errors++; pr_debug("%s: Packet too long\n", dev->name); } if (rsr & CRCErr) { dev->stats.rx_crc_errors++; pr_debug("%s: CRC error\n", dev->name); } if (rsr & AlignErr) { dev->stats.rx_fifo_errors++; /* okay ? */ pr_debug("%s: Alignment error\n", dev->name); } /* clear the received/dropped/error packet */ PutWord(XIRCREG0_DO, 0x8000); /* issue cmd: skip_rx_packet */ /* get the new ethernet status */ eth_status = GetByte(XIRCREG_ESR); } if (rx_status & 0x10) { /* Receive overrun */ dev->stats.rx_over_errors++; PutByte(XIRCREG_CR, ClearRxOvrun); pr_debug("receive overrun cleared\n"); } /***** transmit section ******/ if (int_status & PktTxed) { unsigned n, nn; n = lp->last_ptr_value; nn = GetByte(XIRCREG0_PTR); lp->last_ptr_value = nn; if (nn < n) /* rollover */ dev->stats.tx_packets += 256 - n; else if (n == nn) { /* happens sometimes - don't know why */ pr_debug("PTR not changed?\n"); } else dev->stats.tx_packets += lp->last_ptr_value - n; netif_wake_queue(dev); } if (tx_status & 0x0002) { /* Excessive collisions */ pr_debug("tx restarted due to excessive collisions\n"); PutByte(XIRCREG_CR, RestartTx); /* restart transmitter process */ } if (tx_status & 0x0040) dev->stats.tx_aborted_errors++; /* recalculate our work chunk so that we limit the duration of this * ISR to about 1/10 of a second. * Calculate only if we received a reasonable amount of bytes. */ if (bytes_rcvd > 1000) { u_long duration = jiffies - start_ticks; if (duration >= HZ/10) { /* if more than about 1/10 second */ maxrx_bytes = (bytes_rcvd * (HZ/10)) / duration; if (maxrx_bytes < 2000) maxrx_bytes = 2000; else if (maxrx_bytes > 22000) maxrx_bytes = 22000; pr_debug("set maxrx=%u (rcvd=%u ticks=%lu)\n", maxrx_bytes, bytes_rcvd, duration); } else if (!duration && maxrx_bytes < 22000) { /* now much faster */ maxrx_bytes += 2000; if (maxrx_bytes > 22000) maxrx_bytes = 22000; pr_debug("set maxrx=%u\n", maxrx_bytes); } } leave: if (lockup_hack) { if (int_status != 0xff && (int_status = GetByte(XIRCREG_ISR)) != 0) goto loop_entry; } SelectPage(saved_page); PutByte(XIRCREG_CR, EnableIntr); /* re-enable interrupts */ /* Instead of dropping packets during a receive, we could * force an interrupt with this command: * PutByte(XIRCREG_CR, EnableIntr|ForceIntr); */ return IRQ_HANDLED; } /* xirc2ps_interrupt */ /*====================================================================*/ static void xirc2ps_tx_timeout_task(struct work_struct *work) { struct local_info *local = container_of(work, struct local_info, tx_timeout_task); struct net_device *dev = local->dev; /* reset the card */ do_reset(dev,1); netif_trans_update(dev); /* prevent tx timeout */ netif_wake_queue(dev); } static void xirc_tx_timeout(struct net_device *dev, unsigned int txqueue) { struct local_info *lp = netdev_priv(dev); dev->stats.tx_errors++; netdev_notice(dev, "transmit timed out\n"); schedule_work(&lp->tx_timeout_task); } static netdev_tx_t do_start_xmit(struct sk_buff *skb, struct net_device *dev) { struct local_info *lp = netdev_priv(dev); unsigned int ioaddr = dev->base_addr; int okay; unsigned freespace; unsigned pktlen = skb->len; pr_debug("do_start_xmit(skb=%p, dev=%p) len=%u\n", skb, dev, pktlen); /* adjust the packet length to min. required * and hope that the buffer is large enough * to provide some random data. * fixme: For Mohawk we can change this by sending * a larger packetlen than we actually have; the chip will * pad this in his buffer with random bytes */ if (pktlen < ETH_ZLEN) { if (skb_padto(skb, ETH_ZLEN)) return NETDEV_TX_OK; pktlen = ETH_ZLEN; } netif_stop_queue(dev); SelectPage(0); PutWord(XIRCREG0_TRS, (u_short)pktlen+2); freespace = GetWord(XIRCREG0_TSO); okay = freespace & 0x8000; freespace &= 0x7fff; /* TRS doesn't work - (indeed it is eliminated with sil-rev 1) */ okay = pktlen +2 < freespace; pr_debug("%s: avail. tx space=%u%s\n", dev->name, freespace, okay ? " (okay)":" (not enough)"); if (!okay) { /* not enough space */ return NETDEV_TX_BUSY; /* upper layer may decide to requeue this packet */ } /* send the packet */ PutWord(XIRCREG_EDP, (u_short)pktlen); outsw(ioaddr+XIRCREG_EDP, skb->data, pktlen>>1); if (pktlen & 1) PutByte(XIRCREG_EDP, skb->data[pktlen-1]); if (lp->mohawk) PutByte(XIRCREG_CR, TransmitPacket|EnableIntr); dev_kfree_skb (skb); dev->stats.tx_bytes += pktlen; netif_start_queue(dev); return NETDEV_TX_OK; } struct set_address_info { int reg_nr; int page_nr; int mohawk; unsigned int ioaddr; }; static void set_address(struct set_address_info *sa_info, const char *addr) { unsigned int ioaddr = sa_info->ioaddr; int i; for (i = 0; i < 6; i++) { if (sa_info->reg_nr > 15) { sa_info->reg_nr = 8; sa_info->page_nr++; SelectPage(sa_info->page_nr); } if (sa_info->mohawk) PutByte(sa_info->reg_nr++, addr[5 - i]); else PutByte(sa_info->reg_nr++, addr[i]); } } /**************** * Set all addresses: This first one is the individual address, * the next 9 addresses are taken from the multicast list and * the rest is filled with the individual address. */ static void set_addresses(struct net_device *dev) { unsigned int ioaddr = dev->base_addr; struct local_info *lp = netdev_priv(dev); struct netdev_hw_addr *ha; struct set_address_info sa_info; int i; /* * Setup the info structure so that by first set_address call it will do * SelectPage with the right page number. Hence these ones here. */ sa_info.reg_nr = 15 + 1; sa_info.page_nr = 0x50 - 1; sa_info.mohawk = lp->mohawk; sa_info.ioaddr = ioaddr; set_address(&sa_info, dev->dev_addr); i = 0; netdev_for_each_mc_addr(ha, dev) { if (i++ == 9) break; set_address(&sa_info, ha->addr); } while (i++ < 9) set_address(&sa_info, dev->dev_addr); SelectPage(0); } /**************** * Set or clear the multicast filter for this adaptor. * We can filter up to 9 addresses, if more are requested we set * multicast promiscuous mode. */ static void set_multicast_list(struct net_device *dev) { unsigned int ioaddr = dev->base_addr; unsigned value; SelectPage(0x42); value = GetByte(XIRCREG42_SWC1) & 0xC0; if (dev->flags & IFF_PROMISC) { /* snoop */ PutByte(XIRCREG42_SWC1, value | 0x06); /* set MPE and PME */ } else if (netdev_mc_count(dev) > 9 || (dev->flags & IFF_ALLMULTI)) { PutByte(XIRCREG42_SWC1, value | 0x02); /* set MPE */ } else if (!netdev_mc_empty(dev)) { /* the chip can filter 9 addresses perfectly */ PutByte(XIRCREG42_SWC1, value | 0x01); SelectPage(0x40); PutByte(XIRCREG40_CMD0, Offline); set_addresses(dev); SelectPage(0x40); PutByte(XIRCREG40_CMD0, EnableRecv | Online); } else { /* standard usage */ PutByte(XIRCREG42_SWC1, value | 0x00); } SelectPage(0); } static int do_config(struct net_device *dev, struct ifmap *map) { struct local_info *local = netdev_priv(dev); pr_debug("do_config(%p)\n", dev); if (map->port != 255 && map->port != dev->if_port) { if (map->port > 4) return -EINVAL; if (!map->port) { local->probe_port = 1; dev->if_port = 1; } else { local->probe_port = 0; dev->if_port = map->port; } netdev_info(dev, "switching to %s port\n", if_names[dev->if_port]); do_reset(dev,1); /* not the fine way :-) */ } return 0; } /**************** * Open the driver */ static int do_open(struct net_device *dev) { struct local_info *lp = netdev_priv(dev); struct pcmcia_device *link = lp->p_dev; dev_dbg(&link->dev, "do_open(%p)\n", dev); /* Check that the PCMCIA card is still here. */ /* Physical device present signature. */ if (!pcmcia_dev_present(link)) return -ENODEV; /* okay */ link->open++; netif_start_queue(dev); do_reset(dev,1); return 0; } static void netdev_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) { strscpy(info->driver, "xirc2ps_cs", sizeof(info->driver)); snprintf(info->bus_info, sizeof(info->bus_info), "PCMCIA 0x%lx", dev->base_addr); } static const struct ethtool_ops netdev_ethtool_ops = { .get_drvinfo = netdev_get_drvinfo, }; static int do_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) { struct local_info *local = netdev_priv(dev); unsigned int ioaddr = dev->base_addr; struct mii_ioctl_data *data = if_mii(rq); pr_debug("%s: ioctl(%-.6s, %#04x) %04x %04x %04x %04x\n", dev->name, rq->ifr_ifrn.ifrn_name, cmd, data->phy_id, data->reg_num, data->val_in, data->val_out); if (!local->mohawk) return -EOPNOTSUPP; switch(cmd) { case SIOCGMIIPHY: /* Get the address of the PHY in use. */ data->phy_id = 0; /* we have only this address */ fallthrough; case SIOCGMIIREG: /* Read the specified MII register. */ data->val_out = mii_rd(ioaddr, data->phy_id & 0x1f, data->reg_num & 0x1f); break; case SIOCSMIIREG: /* Write the specified MII register */ mii_wr(ioaddr, data->phy_id & 0x1f, data->reg_num & 0x1f, data->val_in, 16); break; default: return -EOPNOTSUPP; } return 0; } static void hardreset(struct net_device *dev) { struct local_info *local = netdev_priv(dev); unsigned int ioaddr = dev->base_addr; SelectPage(4); udelay(1); PutByte(XIRCREG4_GPR1, 0); /* clear bit 0: power down */ msleep(40); /* wait 40 msec */ if (local->mohawk) PutByte(XIRCREG4_GPR1, 1); /* set bit 0: power up */ else PutByte(XIRCREG4_GPR1, 1 | 4); /* set bit 0: power up, bit 2: AIC */ msleep(20); /* wait 20 msec */ } static void do_reset(struct net_device *dev, int full) { struct local_info *local = netdev_priv(dev); unsigned int ioaddr = dev->base_addr; unsigned value; pr_debug("%s: do_reset(%p,%d)\n", dev->name, dev, full); hardreset(dev); PutByte(XIRCREG_CR, SoftReset); /* set */ msleep(20); /* wait 20 msec */ PutByte(XIRCREG_CR, 0); /* clear */ msleep(40); /* wait 40 msec */ if (local->mohawk) { SelectPage(4); /* set pin GP1 and GP2 to output (0x0c) * set GP1 to low to power up the ML6692 (0x00) * set GP2 to high to power up the 10Mhz chip (0x02) */ PutByte(XIRCREG4_GPR0, 0x0e); } /* give the circuits some time to power up */ msleep(500); /* about 500ms */ local->last_ptr_value = 0; local->silicon = local->mohawk ? (GetByte(XIRCREG4_BOV) & 0x70) >> 4 : (GetByte(XIRCREG4_BOV) & 0x30) >> 4; if (local->probe_port) { if (!local->mohawk) { SelectPage(4); PutByte(XIRCREG4_GPR0, 4); local->probe_port = 0; } } else if (dev->if_port == 2) { /* enable 10Base2 */ SelectPage(0x42); PutByte(XIRCREG42_SWC1, 0xC0); } else { /* enable 10BaseT */ SelectPage(0x42); PutByte(XIRCREG42_SWC1, 0x80); } msleep(40); /* wait 40 msec to let it complete */ #if 0 { SelectPage(0); value = GetByte(XIRCREG_ESR); /* read the ESR */ pr_debug("%s: ESR is: %#02x\n", dev->name, value); } #endif /* setup the ECR */ SelectPage(1); PutByte(XIRCREG1_IMR0, 0xff); /* allow all ints */ PutByte(XIRCREG1_IMR1, 1 ); /* and Set TxUnderrunDetect */ value = GetByte(XIRCREG1_ECR); #if 0 if (local->mohawk) value |= DisableLinkPulse; PutByte(XIRCREG1_ECR, value); #endif pr_debug("%s: ECR is: %#02x\n", dev->name, value); SelectPage(0x42); PutByte(XIRCREG42_SWC0, 0x20); /* disable source insertion */ if (local->silicon != 1) { /* set the local memory dividing line. * The comments in the sample code say that this is only * settable with the scipper version 2 which is revision 0. * Always for CE3 cards */ SelectPage(2); PutWord(XIRCREG2_RBS, 0x2000); } if (full) set_addresses(dev); /* Hardware workaround: * The receive byte pointer after reset is off by 1 so we need * to move the offset pointer back to 0. */ SelectPage(0); PutWord(XIRCREG0_DO, 0x2000); /* change offset command, off=0 */ /* setup MAC IMRs and clear status registers */ SelectPage(0x40); /* Bit 7 ... bit 0 */ PutByte(XIRCREG40_RMASK0, 0xff); /* ROK, RAB, rsv, RO, CRC, AE, PTL, MP */ PutByte(XIRCREG40_TMASK0, 0xff); /* TOK, TAB, SQE, LL, TU, JAB, EXC, CRS */ PutByte(XIRCREG40_TMASK1, 0xb0); /* rsv, rsv, PTD, EXT, rsv,rsv,rsv, rsv*/ PutByte(XIRCREG40_RXST0, 0x00); /* ROK, RAB, REN, RO, CRC, AE, PTL, MP */ PutByte(XIRCREG40_TXST0, 0x00); /* TOK, TAB, SQE, LL, TU, JAB, EXC, CRS */ PutByte(XIRCREG40_TXST1, 0x00); /* TEN, rsv, PTD, EXT, retry_counter:4 */ if (full && local->mohawk && init_mii(dev)) { if (dev->if_port == 4 || local->dingo || local->new_mii) { netdev_info(dev, "MII selected\n"); SelectPage(2); PutByte(XIRCREG2_MSR, GetByte(XIRCREG2_MSR) | 0x08); msleep(20); } else { netdev_info(dev, "MII detected; using 10mbs\n"); SelectPage(0x42); if (dev->if_port == 2) /* enable 10Base2 */ PutByte(XIRCREG42_SWC1, 0xC0); else /* enable 10BaseT */ PutByte(XIRCREG42_SWC1, 0x80); msleep(40); /* wait 40 msec to let it complete */ } if (full_duplex) PutByte(XIRCREG1_ECR, GetByte(XIRCREG1_ECR | FullDuplex)); } else { /* No MII */ SelectPage(0); value = GetByte(XIRCREG_ESR); /* read the ESR */ dev->if_port = (value & MediaSelect) ? 1 : 2; } /* configure the LEDs */ SelectPage(2); if (dev->if_port == 1 || dev->if_port == 4) /* TP: Link and Activity */ PutByte(XIRCREG2_LED, 0x3b); else /* Coax: Not-Collision and Activity */ PutByte(XIRCREG2_LED, 0x3a); if (local->dingo) PutByte(0x0b, 0x04); /* 100 Mbit LED */ /* enable receiver and put the mac online */ if (full) { set_multicast_list(dev); SelectPage(0x40); PutByte(XIRCREG40_CMD0, EnableRecv | Online); } /* setup Ethernet IMR and enable interrupts */ SelectPage(1); PutByte(XIRCREG1_IMR0, 0xff); udelay(1); SelectPage(0); PutByte(XIRCREG_CR, EnableIntr); if (local->modem && !local->dingo) { /* do some magic */ if (!(GetByte(0x10) & 0x01)) PutByte(0x10, 0x11); /* unmask master-int bit */ } if (full) netdev_info(dev, "media %s, silicon revision %d\n", if_names[dev->if_port], local->silicon); /* We should switch back to page 0 to avoid a bug in revision 0 * where regs with offset below 8 can't be read after an access * to the MAC registers */ SelectPage(0); } /**************** * Initialize the Media-Independent-Interface * Returns: True if we have a good MII */ static int init_mii(struct net_device *dev) { struct local_info *local = netdev_priv(dev); unsigned int ioaddr = dev->base_addr; unsigned control, status, linkpartner; int i; if (if_port == 4 || if_port == 1) { /* force 100BaseT or 10BaseT */ dev->if_port = if_port; local->probe_port = 0; return 1; } status = mii_rd(ioaddr, 0, 1); if ((status & 0xff00) != 0x7800) return 0; /* No MII */ local->new_mii = (mii_rd(ioaddr, 0, 2) != 0xffff); if (local->probe_port) control = 0x1000; /* auto neg */ else if (dev->if_port == 4) control = 0x2000; /* no auto neg, 100mbs mode */ else control = 0x0000; /* no auto neg, 10mbs mode */ mii_wr(ioaddr, 0, 0, control, 16); udelay(100); control = mii_rd(ioaddr, 0, 0); if (control & 0x0400) { netdev_notice(dev, "can't take PHY out of isolation mode\n"); local->probe_port = 0; return 0; } if (local->probe_port) { /* according to the DP83840A specs the auto negotiation process * may take up to 3.5 sec, so we use this also for our ML6692 * Fixme: Better to use a timer here! */ for (i=0; i < 35; i++) { msleep(100); /* wait 100 msec */ status = mii_rd(ioaddr, 0, 1); if ((status & 0x0020) && (status & 0x0004)) break; } if (!(status & 0x0020)) { netdev_info(dev, "autonegotiation failed; using 10mbs\n"); if (!local->new_mii) { control = 0x0000; mii_wr(ioaddr, 0, 0, control, 16); udelay(100); SelectPage(0); dev->if_port = (GetByte(XIRCREG_ESR) & MediaSelect) ? 1 : 2; } } else { linkpartner = mii_rd(ioaddr, 0, 5); netdev_info(dev, "MII link partner: %04x\n", linkpartner); if (linkpartner & 0x0080) { dev->if_port = 4; } else dev->if_port = 1; } } return 1; } static void do_powerdown(struct net_device *dev) { unsigned int ioaddr = dev->base_addr; pr_debug("do_powerdown(%p)\n", dev); SelectPage(4); PutByte(XIRCREG4_GPR1, 0); /* clear bit 0: power down */ SelectPage(0); } static int do_stop(struct net_device *dev) { unsigned int ioaddr = dev->base_addr; struct local_info *lp = netdev_priv(dev); struct pcmcia_device *link = lp->p_dev; dev_dbg(&link->dev, "do_stop(%p)\n", dev); if (!link) return -ENODEV; netif_stop_queue(dev); SelectPage(0); PutByte(XIRCREG_CR, 0); /* disable interrupts */ SelectPage(0x01); PutByte(XIRCREG1_IMR0, 0x00); /* forbid all ints */ SelectPage(4); PutByte(XIRCREG4_GPR1, 0); /* clear bit 0: power down */ SelectPage(0); link->open--; return 0; } static const struct pcmcia_device_id xirc2ps_ids[] = { PCMCIA_PFC_DEVICE_MANF_CARD(0, 0x0089, 0x110a), PCMCIA_PFC_DEVICE_MANF_CARD(0, 0x0138, 0x110a), PCMCIA_PFC_DEVICE_PROD_ID13(0, "Xircom", "CEM28", 0x2e3ee845, 0x0ea978ea), PCMCIA_PFC_DEVICE_PROD_ID13(0, "Xircom", "CEM33", 0x2e3ee845, 0x80609023), PCMCIA_PFC_DEVICE_PROD_ID13(0, "Xircom", "CEM56", 0x2e3ee845, 0xa650c32a), PCMCIA_PFC_DEVICE_PROD_ID13(0, "Xircom", "REM10", 0x2e3ee845, 0x76df1d29), PCMCIA_PFC_DEVICE_PROD_ID13(0, "Xircom", "XEM5600", 0x2e3ee845, 0xf1403719), PCMCIA_PFC_DEVICE_PROD_ID12(0, "Xircom", "CreditCard Ethernet+Modem II", 0x2e3ee845, 0xeca401bf), PCMCIA_DEVICE_MANF_CARD(0x01bf, 0x010a), PCMCIA_DEVICE_PROD_ID13("Toshiba Information Systems", "TPCENET", 0x1b3b94fe, 0xf381c1a2), PCMCIA_DEVICE_PROD_ID13("Xircom", "CE3-10/100", 0x2e3ee845, 0x0ec0ac37), PCMCIA_DEVICE_PROD_ID13("Xircom", "PS-CE2-10", 0x2e3ee845, 0x947d9073), PCMCIA_DEVICE_PROD_ID13("Xircom", "R2E-100BTX", 0x2e3ee845, 0x2464a6e3), PCMCIA_DEVICE_PROD_ID13("Xircom", "RE-10", 0x2e3ee845, 0x3e08d609), PCMCIA_DEVICE_PROD_ID13("Xircom", "XE2000", 0x2e3ee845, 0xf7188e46), PCMCIA_DEVICE_PROD_ID12("Compaq", "Ethernet LAN Card", 0x54f7c49c, 0x9fd2f0a2), PCMCIA_DEVICE_PROD_ID12("Compaq", "Netelligent 10/100 PC Card", 0x54f7c49c, 0xefe96769), PCMCIA_DEVICE_PROD_ID12("Intel", "EtherExpress(TM) PRO/100 PC Card Mobile Adapter16", 0x816cc815, 0x174397db), PCMCIA_DEVICE_PROD_ID12("Toshiba", "10/100 Ethernet PC Card", 0x44a09d9c, 0xb44deecf), /* also matches CFE-10 cards! */ /* PCMCIA_DEVICE_MANF_CARD(0x0105, 0x010a), */ PCMCIA_DEVICE_NULL, }; MODULE_DEVICE_TABLE(pcmcia, xirc2ps_ids); static struct pcmcia_driver xirc2ps_cs_driver = { .owner = THIS_MODULE, .name = "xirc2ps_cs", .probe = xirc2ps_probe, .remove = xirc2ps_detach, .id_table = xirc2ps_ids, .suspend = xirc2ps_suspend, .resume = xirc2ps_resume, }; module_pcmcia_driver(xirc2ps_cs_driver); #ifndef MODULE static int __init setup_xirc2ps_cs(char *str) { /* if_port, full_duplex, do_sound, lockup_hack */ int ints[10] = { -1 }; str = get_options(str, ARRAY_SIZE(ints), ints); #define MAYBE_SET(X,Y) if (ints[0] >= Y && ints[Y] != -1) { X = ints[Y]; } MAYBE_SET(if_port, 3); MAYBE_SET(full_duplex, 4); MAYBE_SET(do_sound, 5); MAYBE_SET(lockup_hack, 6); #undef MAYBE_SET return 1; } __setup("xirc2ps_cs=", setup_xirc2ps_cs); #endif
linux-master
drivers/net/ethernet/xircom/xirc2ps_cs.c
// SPDX-License-Identifier: GPL-2.0-only /* * linux/drivers/acorn/net/ether3.c * * Copyright (C) 1995-2000 Russell King * * SEEQ nq8005 ethernet driver for Acorn/ANT Ether3 card * for Acorn machines * * By Russell King, with some suggestions from [email protected] * * Changelog: * 1.04 RMK 29/02/1996 Won't pass packets that are from our ethernet * address up to the higher levels - they're * silently ignored. I/F can now be put into * multicast mode. Receiver routine optimised. * 1.05 RMK 30/02/1996 Now claims interrupt at open when part of * the kernel rather than when a module. * 1.06 RMK 02/03/1996 Various code cleanups * 1.07 RMK 13/10/1996 Optimised interrupt routine and transmit * routines. * 1.08 RMK 14/10/1996 Fixed problem with too many packets, * prevented the kernel message about dropped * packets appearing too many times a second. * Now does not disable all IRQs, only the IRQ * used by this card. * 1.09 RMK 10/11/1996 Only enables TX irq when buffer space is low, * but we still service the TX queue if we get a * RX interrupt. * 1.10 RMK 15/07/1997 Fixed autoprobing of NQ8004. * 1.11 RMK 16/11/1997 Fixed autoprobing of NQ8005A. * 1.12 RMK 31/12/1997 Removed reference to dev_tint for Linux 2.1. * RMK 27/06/1998 Changed asm/delay.h to linux/delay.h. * 1.13 RMK 29/06/1998 Fixed problem with transmission of packets. * Chip seems to have a bug in, whereby if the * packet starts two bytes from the end of the * buffer, it corrupts the receiver chain, and * never updates the transmit status correctly. * 1.14 RMK 07/01/1998 Added initial code for ETHERB addressing. * 1.15 RMK 30/04/1999 More fixes to the transmit routine for buggy * hardware. * 1.16 RMK 10/02/2000 Updated for 2.3.43 * 1.17 RMK 13/05/2000 Updated for 2.3.99-pre8 */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/types.h> #include <linux/fcntl.h> #include <linux/interrupt.h> #include <linux/ioport.h> #include <linux/in.h> #include <linux/slab.h> #include <linux/string.h> #include <linux/errno.h> #include <linux/netdevice.h> #include <linux/etherdevice.h> #include <linux/skbuff.h> #include <linux/device.h> #include <linux/init.h> #include <linux/delay.h> #include <linux/bitops.h> #include <asm/ecard.h> #include <asm/io.h> static char version[] = "ether3 ethernet driver (c) 1995-2000 R.M.King v1.17\n"; #include "ether3.h" static unsigned int net_debug = NET_DEBUG; static void ether3_setmulticastlist(struct net_device *dev); static int ether3_rx(struct net_device *dev, unsigned int maxcnt); static void ether3_tx(struct net_device *dev); static int ether3_open (struct net_device *dev); static netdev_tx_t ether3_sendpacket(struct sk_buff *skb, struct net_device *dev); static irqreturn_t ether3_interrupt (int irq, void *dev_id); static int ether3_close (struct net_device *dev); static void ether3_setmulticastlist (struct net_device *dev); static void ether3_timeout(struct net_device *dev, unsigned int txqueue); #define BUS_16 2 #define BUS_8 1 #define BUS_UNKNOWN 0 /* --------------------------------------------------------------------------- */ typedef enum { buffer_write, buffer_read } buffer_rw_t; /* * ether3 read/write. Slow things down a bit... * The SEEQ8005 doesn't like us writing to its registers * too quickly. */ static inline void ether3_outb(int v, void __iomem *r) { writeb(v, r); udelay(1); } static inline void ether3_outw(int v, void __iomem *r) { writew(v, r); udelay(1); } #define ether3_inb(r) ({ unsigned int __v = readb((r)); udelay(1); __v; }) #define ether3_inw(r) ({ unsigned int __v = readw((r)); udelay(1); __v; }) static int ether3_setbuffer(struct net_device *dev, buffer_rw_t read, int start) { int timeout = 1000; ether3_outw(priv(dev)->regs.config1 | CFG1_LOCBUFMEM, REG_CONFIG1); ether3_outw(priv(dev)->regs.command | CMD_FIFOWRITE, REG_COMMAND); while ((ether3_inw(REG_STATUS) & STAT_FIFOEMPTY) == 0) { if (!timeout--) { printk("%s: setbuffer broken\n", dev->name); priv(dev)->broken = 1; return 1; } udelay(1); } if (read == buffer_read) { ether3_outw(start, REG_DMAADDR); ether3_outw(priv(dev)->regs.command | CMD_FIFOREAD, REG_COMMAND); } else { ether3_outw(priv(dev)->regs.command | CMD_FIFOWRITE, REG_COMMAND); ether3_outw(start, REG_DMAADDR); } return 0; } /* * write data to the buffer memory */ #define ether3_writebuffer(dev,data,length) \ writesw(REG_BUFWIN, (data), (length) >> 1) #define ether3_writeword(dev,data) \ writew((data), REG_BUFWIN) #define ether3_writelong(dev,data) { \ void __iomem *reg_bufwin = REG_BUFWIN; \ writew((data), reg_bufwin); \ writew((data) >> 16, reg_bufwin); \ } /* * read data from the buffer memory */ #define ether3_readbuffer(dev,data,length) \ readsw(REG_BUFWIN, (data), (length) >> 1) #define ether3_readword(dev) \ readw(REG_BUFWIN) #define ether3_readlong(dev) \ readw(REG_BUFWIN) | (readw(REG_BUFWIN) << 16) /* * Switch LED off... */ static void ether3_ledoff(struct timer_list *t) { struct dev_priv *private = from_timer(private, t, timer); struct net_device *dev = private->dev; ether3_outw(priv(dev)->regs.config2 |= CFG2_CTRLO, REG_CONFIG2); } /* * switch LED on... */ static inline void ether3_ledon(struct net_device *dev) { del_timer(&priv(dev)->timer); priv(dev)->timer.expires = jiffies + HZ / 50; /* leave on for 1/50th second */ add_timer(&priv(dev)->timer); if (priv(dev)->regs.config2 & CFG2_CTRLO) ether3_outw(priv(dev)->regs.config2 &= ~CFG2_CTRLO, REG_CONFIG2); } /* * Read the ethernet address string from the on board rom. * This is an ascii string!!! */ static int ether3_addr(char *addr, struct expansion_card *ec) { struct in_chunk_dir cd; char *s; if (ecard_readchunk(&cd, ec, 0xf5, 0) && (s = strchr(cd.d.string, '('))) { int i; for (i = 0; i<6; i++) { addr[i] = simple_strtoul(s + 1, &s, 0x10); if (*s != (i==5?')' : ':' )) break; } if (i == 6) return 0; } /* I wonder if we should even let the user continue in this case * - no, it would be better to disable the device */ printk(KERN_ERR "ether3: Couldn't read a valid MAC address from card.\n"); return -ENODEV; } /* --------------------------------------------------------------------------- */ static int ether3_ramtest(struct net_device *dev, unsigned char byte) { unsigned char *buffer = kmalloc(RX_END, GFP_KERNEL); int i,ret = 0; int max_errors = 4; int bad = -1; if (!buffer) return 1; memset(buffer, byte, RX_END); ether3_setbuffer(dev, buffer_write, 0); ether3_writebuffer(dev, buffer, TX_END); ether3_setbuffer(dev, buffer_write, RX_START); ether3_writebuffer(dev, buffer + RX_START, RX_LEN); memset(buffer, byte ^ 0xff, RX_END); ether3_setbuffer(dev, buffer_read, 0); ether3_readbuffer(dev, buffer, TX_END); ether3_setbuffer(dev, buffer_read, RX_START); ether3_readbuffer(dev, buffer + RX_START, RX_LEN); for (i = 0; i < RX_END; i++) { if (buffer[i] != byte) { if (max_errors > 0 && bad != buffer[i]) { printk("%s: RAM failed with (%02X instead of %02X) at 0x%04X", dev->name, buffer[i], byte, i); ret = 2; max_errors--; bad = i; } } else { if (bad != -1) { if (bad != i - 1) printk(" - 0x%04X\n", i - 1); printk("\n"); bad = -1; } } } if (bad != -1) printk(" - 0xffff\n"); kfree(buffer); return ret; } /* ------------------------------------------------------------------------------- */ static int ether3_init_2(struct net_device *dev) { int i; priv(dev)->regs.config1 = CFG1_RECVCOMPSTAT0|CFG1_DMABURST8; priv(dev)->regs.config2 = CFG2_CTRLO|CFG2_RECVCRC|CFG2_ERRENCRC; priv(dev)->regs.command = 0; /* * Set up our hardware address */ ether3_outw(priv(dev)->regs.config1 | CFG1_BUFSELSTAT0, REG_CONFIG1); for (i = 0; i < 6; i++) ether3_outb(dev->dev_addr[i], REG_BUFWIN); if (dev->flags & IFF_PROMISC) priv(dev)->regs.config1 |= CFG1_RECVPROMISC; else if (dev->flags & IFF_MULTICAST) priv(dev)->regs.config1 |= CFG1_RECVSPECBRMULTI; else priv(dev)->regs.config1 |= CFG1_RECVSPECBROAD; /* * There is a problem with the NQ8005 in that it occasionally loses the * last two bytes. To get round this problem, we receive the CRC as * well. That way, if we do lose the last two, then it doesn't matter. */ ether3_outw(priv(dev)->regs.config1 | CFG1_TRANSEND, REG_CONFIG1); ether3_outw((TX_END>>8) - 1, REG_BUFWIN); ether3_outw(priv(dev)->rx_head, REG_RECVPTR); ether3_outw(0, REG_TRANSMITPTR); ether3_outw(priv(dev)->rx_head >> 8, REG_RECVEND); ether3_outw(priv(dev)->regs.config2, REG_CONFIG2); ether3_outw(priv(dev)->regs.config1 | CFG1_LOCBUFMEM, REG_CONFIG1); ether3_outw(priv(dev)->regs.command, REG_COMMAND); i = ether3_ramtest(dev, 0x5A); if(i) return i; i = ether3_ramtest(dev, 0x1E); if(i) return i; ether3_setbuffer(dev, buffer_write, 0); ether3_writelong(dev, 0); return 0; } static void ether3_init_for_open(struct net_device *dev) { int i; /* Reset the chip */ ether3_outw(CFG2_RESET, REG_CONFIG2); udelay(4); priv(dev)->regs.command = 0; ether3_outw(CMD_RXOFF|CMD_TXOFF, REG_COMMAND); while (ether3_inw(REG_STATUS) & (STAT_RXON|STAT_TXON)) barrier(); ether3_outw(priv(dev)->regs.config1 | CFG1_BUFSELSTAT0, REG_CONFIG1); for (i = 0; i < 6; i++) ether3_outb(dev->dev_addr[i], REG_BUFWIN); priv(dev)->tx_head = 0; priv(dev)->tx_tail = 0; priv(dev)->regs.config2 |= CFG2_CTRLO; priv(dev)->rx_head = RX_START; ether3_outw(priv(dev)->regs.config1 | CFG1_TRANSEND, REG_CONFIG1); ether3_outw((TX_END>>8) - 1, REG_BUFWIN); ether3_outw(priv(dev)->rx_head, REG_RECVPTR); ether3_outw(priv(dev)->rx_head >> 8, REG_RECVEND); ether3_outw(0, REG_TRANSMITPTR); ether3_outw(priv(dev)->regs.config2, REG_CONFIG2); ether3_outw(priv(dev)->regs.config1 | CFG1_LOCBUFMEM, REG_CONFIG1); ether3_setbuffer(dev, buffer_write, 0); ether3_writelong(dev, 0); priv(dev)->regs.command = CMD_ENINTRX | CMD_ENINTTX; ether3_outw(priv(dev)->regs.command | CMD_RXON, REG_COMMAND); } static inline int ether3_probe_bus_8(struct net_device *dev, int val) { int write_low, write_high, read_low, read_high; write_low = val & 255; write_high = val >> 8; printk(KERN_DEBUG "ether3_probe: write8 [%02X:%02X]", write_high, write_low); ether3_outb(write_low, REG_RECVPTR); ether3_outb(write_high, REG_RECVPTR + 4); read_low = ether3_inb(REG_RECVPTR); read_high = ether3_inb(REG_RECVPTR + 4); printk(", read8 [%02X:%02X]\n", read_high, read_low); return read_low == write_low && read_high == write_high; } static inline int ether3_probe_bus_16(struct net_device *dev, int val) { int read_val; ether3_outw(val, REG_RECVPTR); read_val = ether3_inw(REG_RECVPTR); printk(KERN_DEBUG "ether3_probe: write16 [%04X], read16 [%04X]\n", val, read_val); return read_val == val; } /* * Open/initialize the board. This is called (in the current kernel) * sometime after booting when the 'ifconfig' program is run. * * This routine should set everything up anew at each open, even * registers that "should" only need to be set once at boot, so that * there is non-reboot way to recover if something goes wrong. */ static int ether3_open(struct net_device *dev) { if (request_irq(dev->irq, ether3_interrupt, 0, "ether3", dev)) return -EAGAIN; ether3_init_for_open(dev); netif_start_queue(dev); return 0; } /* * The inverse routine to ether3_open(). */ static int ether3_close(struct net_device *dev) { netif_stop_queue(dev); disable_irq(dev->irq); ether3_outw(CMD_RXOFF|CMD_TXOFF, REG_COMMAND); priv(dev)->regs.command = 0; while (ether3_inw(REG_STATUS) & (STAT_RXON|STAT_TXON)) barrier(); ether3_outb(0x80, REG_CONFIG2 + 4); ether3_outw(0, REG_COMMAND); free_irq(dev->irq, dev); return 0; } /* * Set or clear promiscuous/multicast mode filter for this adaptor. * * We don't attempt any packet filtering. The card may have a SEEQ 8004 * in which does not have the other ethernet address registers present... */ static void ether3_setmulticastlist(struct net_device *dev) { priv(dev)->regs.config1 &= ~CFG1_RECVPROMISC; if (dev->flags & IFF_PROMISC) { /* promiscuous mode */ priv(dev)->regs.config1 |= CFG1_RECVPROMISC; } else if (dev->flags & IFF_ALLMULTI || !netdev_mc_empty(dev)) { priv(dev)->regs.config1 |= CFG1_RECVSPECBRMULTI; } else priv(dev)->regs.config1 |= CFG1_RECVSPECBROAD; ether3_outw(priv(dev)->regs.config1 | CFG1_LOCBUFMEM, REG_CONFIG1); } static void ether3_timeout(struct net_device *dev, unsigned int txqueue) { unsigned long flags; del_timer(&priv(dev)->timer); local_irq_save(flags); printk(KERN_ERR "%s: transmit timed out, network cable problem?\n", dev->name); printk(KERN_ERR "%s: state: { status=%04X cfg1=%04X cfg2=%04X }\n", dev->name, ether3_inw(REG_STATUS), ether3_inw(REG_CONFIG1), ether3_inw(REG_CONFIG2)); printk(KERN_ERR "%s: { rpr=%04X rea=%04X tpr=%04X }\n", dev->name, ether3_inw(REG_RECVPTR), ether3_inw(REG_RECVEND), ether3_inw(REG_TRANSMITPTR)); printk(KERN_ERR "%s: tx head=%X tx tail=%X\n", dev->name, priv(dev)->tx_head, priv(dev)->tx_tail); ether3_setbuffer(dev, buffer_read, priv(dev)->tx_tail); printk(KERN_ERR "%s: packet status = %08X\n", dev->name, ether3_readlong(dev)); local_irq_restore(flags); priv(dev)->regs.config2 |= CFG2_CTRLO; dev->stats.tx_errors += 1; ether3_outw(priv(dev)->regs.config2, REG_CONFIG2); priv(dev)->tx_head = priv(dev)->tx_tail = 0; netif_wake_queue(dev); } /* * Transmit a packet */ static netdev_tx_t ether3_sendpacket(struct sk_buff *skb, struct net_device *dev) { unsigned long flags; unsigned int length = ETH_ZLEN < skb->len ? skb->len : ETH_ZLEN; unsigned int ptr, next_ptr; if (priv(dev)->broken) { dev_kfree_skb(skb); dev->stats.tx_dropped++; netif_start_queue(dev); return NETDEV_TX_OK; } length = (length + 1) & ~1; if (length != skb->len) { if (skb_padto(skb, length)) goto out; } next_ptr = (priv(dev)->tx_head + 1) & 15; local_irq_save(flags); if (priv(dev)->tx_tail == next_ptr) { local_irq_restore(flags); return NETDEV_TX_BUSY; /* unable to queue */ } ptr = 0x600 * priv(dev)->tx_head; priv(dev)->tx_head = next_ptr; next_ptr *= 0x600; #define TXHDR_FLAGS (TXHDR_TRANSMIT|TXHDR_CHAINCONTINUE|TXHDR_DATAFOLLOWS|TXHDR_ENSUCCESS) ether3_setbuffer(dev, buffer_write, next_ptr); ether3_writelong(dev, 0); ether3_setbuffer(dev, buffer_write, ptr); ether3_writelong(dev, 0); ether3_writebuffer(dev, skb->data, length); ether3_writeword(dev, htons(next_ptr)); ether3_writeword(dev, TXHDR_CHAINCONTINUE >> 16); ether3_setbuffer(dev, buffer_write, ptr); ether3_writeword(dev, htons((ptr + length + 4))); ether3_writeword(dev, TXHDR_FLAGS >> 16); ether3_ledon(dev); if (!(ether3_inw(REG_STATUS) & STAT_TXON)) { ether3_outw(ptr, REG_TRANSMITPTR); ether3_outw(priv(dev)->regs.command | CMD_TXON, REG_COMMAND); } next_ptr = (priv(dev)->tx_head + 1) & 15; local_irq_restore(flags); dev_kfree_skb(skb); if (priv(dev)->tx_tail == next_ptr) netif_stop_queue(dev); out: return NETDEV_TX_OK; } static irqreturn_t ether3_interrupt(int irq, void *dev_id) { struct net_device *dev = (struct net_device *)dev_id; unsigned int status, handled = IRQ_NONE; #if NET_DEBUG > 1 if(net_debug & DEBUG_INT) printk("eth3irq: %d ", irq); #endif status = ether3_inw(REG_STATUS); if (status & STAT_INTRX) { ether3_outw(CMD_ACKINTRX | priv(dev)->regs.command, REG_COMMAND); ether3_rx(dev, 12); handled = IRQ_HANDLED; } if (status & STAT_INTTX) { ether3_outw(CMD_ACKINTTX | priv(dev)->regs.command, REG_COMMAND); ether3_tx(dev); handled = IRQ_HANDLED; } #if NET_DEBUG > 1 if(net_debug & DEBUG_INT) printk("done\n"); #endif return handled; } /* * If we have a good packet(s), get it/them out of the buffers. */ static int ether3_rx(struct net_device *dev, unsigned int maxcnt) { unsigned int next_ptr = priv(dev)->rx_head, received = 0; ether3_ledon(dev); do { unsigned int this_ptr, status; unsigned char addrs[16]; /* * read the first 16 bytes from the buffer. * This contains the status bytes etc and ethernet addresses, * and we also check the source ethernet address to see if * it originated from us. */ { unsigned int temp_ptr; ether3_setbuffer(dev, buffer_read, next_ptr); temp_ptr = ether3_readword(dev); status = ether3_readword(dev); if ((status & (RXSTAT_DONE | RXHDR_CHAINCONTINUE | RXHDR_RECEIVE)) != (RXSTAT_DONE | RXHDR_CHAINCONTINUE) || !temp_ptr) break; this_ptr = next_ptr + 4; next_ptr = ntohs(temp_ptr); } ether3_setbuffer(dev, buffer_read, this_ptr); ether3_readbuffer(dev, addrs+2, 12); if (next_ptr < RX_START || next_ptr >= RX_END) { printk("%s: bad next pointer @%04X: ", dev->name, priv(dev)->rx_head); printk("%02X %02X %02X %02X ", next_ptr >> 8, next_ptr & 255, status & 255, status >> 8); printk("%pM %pM\n", addrs + 2, addrs + 8); next_ptr = priv(dev)->rx_head; break; } /* * ignore our own packets... */ if (!(*(unsigned long *)&dev->dev_addr[0] ^ *(unsigned long *)&addrs[2+6]) && !(*(unsigned short *)&dev->dev_addr[4] ^ *(unsigned short *)&addrs[2+10])) { maxcnt ++; /* compensate for loopedback packet */ ether3_outw(next_ptr >> 8, REG_RECVEND); } else if (!(status & (RXSTAT_OVERSIZE|RXSTAT_CRCERROR|RXSTAT_DRIBBLEERROR|RXSTAT_SHORTPACKET))) { unsigned int length = next_ptr - this_ptr; struct sk_buff *skb; if (next_ptr <= this_ptr) length += RX_END - RX_START; skb = netdev_alloc_skb(dev, length + 2); if (skb) { unsigned char *buf; skb_reserve(skb, 2); buf = skb_put(skb, length); ether3_readbuffer(dev, buf + 12, length - 12); ether3_outw(next_ptr >> 8, REG_RECVEND); *(unsigned short *)(buf + 0) = *(unsigned short *)(addrs + 2); *(unsigned long *)(buf + 2) = *(unsigned long *)(addrs + 4); *(unsigned long *)(buf + 6) = *(unsigned long *)(addrs + 8); *(unsigned short *)(buf + 10) = *(unsigned short *)(addrs + 12); skb->protocol = eth_type_trans(skb, dev); netif_rx(skb); received ++; } else { ether3_outw(next_ptr >> 8, REG_RECVEND); dev->stats.rx_dropped++; goto done; } } else { struct net_device_stats *stats = &dev->stats; ether3_outw(next_ptr >> 8, REG_RECVEND); if (status & RXSTAT_OVERSIZE) stats->rx_over_errors ++; if (status & RXSTAT_CRCERROR) stats->rx_crc_errors ++; if (status & RXSTAT_DRIBBLEERROR) stats->rx_fifo_errors ++; if (status & RXSTAT_SHORTPACKET) stats->rx_length_errors ++; stats->rx_errors++; } } while (-- maxcnt); done: dev->stats.rx_packets += received; priv(dev)->rx_head = next_ptr; /* * If rx went off line, then that means that the buffer may be full. We * have dropped at least one packet. */ if (!(ether3_inw(REG_STATUS) & STAT_RXON)) { dev->stats.rx_dropped++; ether3_outw(next_ptr, REG_RECVPTR); ether3_outw(priv(dev)->regs.command | CMD_RXON, REG_COMMAND); } return maxcnt; } /* * Update stats for the transmitted packet(s) */ static void ether3_tx(struct net_device *dev) { unsigned int tx_tail = priv(dev)->tx_tail; int max_work = 14; do { unsigned long status; /* * Read the packet header */ ether3_setbuffer(dev, buffer_read, tx_tail * 0x600); status = ether3_readlong(dev); /* * Check to see if this packet has been transmitted */ if ((status & (TXSTAT_DONE | TXHDR_TRANSMIT)) != (TXSTAT_DONE | TXHDR_TRANSMIT)) break; /* * Update errors */ if (!(status & (TXSTAT_BABBLED | TXSTAT_16COLLISIONS))) dev->stats.tx_packets++; else { dev->stats.tx_errors++; if (status & TXSTAT_16COLLISIONS) dev->stats.collisions += 16; if (status & TXSTAT_BABBLED) dev->stats.tx_fifo_errors++; } tx_tail = (tx_tail + 1) & 15; } while (--max_work); if (priv(dev)->tx_tail != tx_tail) { priv(dev)->tx_tail = tx_tail; netif_wake_queue(dev); } } static void ether3_banner(void) { static unsigned version_printed = 0; if (net_debug && version_printed++ == 0) printk(KERN_INFO "%s", version); } static const struct net_device_ops ether3_netdev_ops = { .ndo_open = ether3_open, .ndo_stop = ether3_close, .ndo_start_xmit = ether3_sendpacket, .ndo_set_rx_mode = ether3_setmulticastlist, .ndo_tx_timeout = ether3_timeout, .ndo_validate_addr = eth_validate_addr, .ndo_set_mac_address = eth_mac_addr, }; static int ether3_probe(struct expansion_card *ec, const struct ecard_id *id) { const struct ether3_data *data = id->data; struct net_device *dev; int bus_type, ret; u8 addr[ETH_ALEN]; ether3_banner(); ret = ecard_request_resources(ec); if (ret) goto out; dev = alloc_etherdev(sizeof(struct dev_priv)); if (!dev) { ret = -ENOMEM; goto release; } SET_NETDEV_DEV(dev, &ec->dev); priv(dev)->base = ecardm_iomap(ec, ECARD_RES_MEMC, 0, 0); if (!priv(dev)->base) { ret = -ENOMEM; goto free; } ec->irqaddr = priv(dev)->base + data->base_offset; ec->irqmask = 0xf0; priv(dev)->seeq = priv(dev)->base + data->base_offset; dev->irq = ec->irq; ether3_addr(addr, ec); eth_hw_addr_set(dev, addr); priv(dev)->dev = dev; timer_setup(&priv(dev)->timer, ether3_ledoff, 0); /* Reset card... */ ether3_outb(0x80, REG_CONFIG2 + 4); bus_type = BUS_UNKNOWN; udelay(4); /* Test using Receive Pointer (16-bit register) to find out * how the ether3 is connected to the bus... */ if (ether3_probe_bus_8(dev, 0x100) && ether3_probe_bus_8(dev, 0x201)) bus_type = BUS_8; if (bus_type == BUS_UNKNOWN && ether3_probe_bus_16(dev, 0x101) && ether3_probe_bus_16(dev, 0x201)) bus_type = BUS_16; switch (bus_type) { case BUS_UNKNOWN: printk(KERN_ERR "%s: unable to identify bus width\n", dev->name); ret = -ENODEV; goto free; case BUS_8: printk(KERN_ERR "%s: %s found, but is an unsupported " "8-bit card\n", dev->name, data->name); ret = -ENODEV; goto free; default: break; } if (ether3_init_2(dev)) { ret = -ENODEV; goto free; } dev->netdev_ops = &ether3_netdev_ops; dev->watchdog_timeo = 5 * HZ / 100; ret = register_netdev(dev); if (ret) goto free; printk("%s: %s in slot %d, %pM\n", dev->name, data->name, ec->slot_no, dev->dev_addr); ecard_set_drvdata(ec, dev); return 0; free: free_netdev(dev); release: ecard_release_resources(ec); out: return ret; } static void ether3_remove(struct expansion_card *ec) { struct net_device *dev = ecard_get_drvdata(ec); ecard_set_drvdata(ec, NULL); unregister_netdev(dev); free_netdev(dev); ecard_release_resources(ec); } static struct ether3_data ether3 = { .name = "ether3", .base_offset = 0, }; static struct ether3_data etherb = { .name = "etherb", .base_offset = 0x800, }; static const struct ecard_id ether3_ids[] = { { MANU_ANT2, PROD_ANT_ETHER3, &ether3 }, { MANU_ANT, PROD_ANT_ETHER3, &ether3 }, { MANU_ANT, PROD_ANT_ETHERB, &etherb }, { 0xffff, 0xffff } }; static struct ecard_driver ether3_driver = { .probe = ether3_probe, .remove = ether3_remove, .id_table = ether3_ids, .drv = { .name = "ether3", }, }; static int __init ether3_init(void) { return ecard_register_driver(&ether3_driver); } static void __exit ether3_exit(void) { ecard_remove_driver(&ether3_driver); } module_init(ether3_init); module_exit(ether3_exit); MODULE_LICENSE("GPL");
linux-master
drivers/net/ethernet/seeq/ether3.c
// SPDX-License-Identifier: GPL-2.0-only /* * sgiseeq.c: Seeq8003 ethernet driver for SGI machines. * * Copyright (C) 1996 David S. Miller ([email protected]) */ #undef DEBUG #include <linux/dma-mapping.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/slab.h> #include <linux/errno.h> #include <linux/types.h> #include <linux/interrupt.h> #include <linux/string.h> #include <linux/delay.h> #include <linux/netdevice.h> #include <linux/platform_device.h> #include <linux/etherdevice.h> #include <linux/skbuff.h> #include <asm/sgi/hpc3.h> #include <asm/sgi/ip22.h> #include <asm/sgi/seeq.h> #include "sgiseeq.h" static char *sgiseeqstr = "SGI Seeq8003"; /* * If you want speed, you do something silly, it always has worked for me. So, * with that in mind, I've decided to make this driver look completely like a * stupid Lance from a driver architecture perspective. Only difference is that * here our "ring buffer" looks and acts like a real Lance one does but is * laid out like how the HPC DMA and the Seeq want it to. You'd be surprised * how a stupid idea like this can pay off in performance, not to mention * making this driver 2,000 times easier to write. ;-) */ /* Tune these if we tend to run out often etc. */ #define SEEQ_RX_BUFFERS 16 #define SEEQ_TX_BUFFERS 16 #define PKT_BUF_SZ 1584 #define NEXT_RX(i) (((i) + 1) & (SEEQ_RX_BUFFERS - 1)) #define NEXT_TX(i) (((i) + 1) & (SEEQ_TX_BUFFERS - 1)) #define PREV_RX(i) (((i) - 1) & (SEEQ_RX_BUFFERS - 1)) #define PREV_TX(i) (((i) - 1) & (SEEQ_TX_BUFFERS - 1)) #define TX_BUFFS_AVAIL(sp) ((sp->tx_old <= sp->tx_new) ? \ sp->tx_old + (SEEQ_TX_BUFFERS - 1) - sp->tx_new : \ sp->tx_old - sp->tx_new - 1) #define VIRT_TO_DMA(sp, v) ((sp)->srings_dma + \ (dma_addr_t)((unsigned long)(v) - \ (unsigned long)((sp)->rx_desc))) /* Copy frames shorter than rx_copybreak, otherwise pass on up in * a full sized sk_buff. Value of 100 stolen from tulip.c (!alpha). */ static int rx_copybreak = 100; #define PAD_SIZE (128 - sizeof(struct hpc_dma_desc) - sizeof(void *)) struct sgiseeq_rx_desc { volatile struct hpc_dma_desc rdma; u8 padding[PAD_SIZE]; struct sk_buff *skb; }; struct sgiseeq_tx_desc { volatile struct hpc_dma_desc tdma; u8 padding[PAD_SIZE]; struct sk_buff *skb; }; /* * Warning: This structure is laid out in a certain way because HPC dma * descriptors must be 8-byte aligned. So don't touch this without * some care. */ struct sgiseeq_init_block { /* Note the name ;-) */ struct sgiseeq_rx_desc rxvector[SEEQ_RX_BUFFERS]; struct sgiseeq_tx_desc txvector[SEEQ_TX_BUFFERS]; }; struct sgiseeq_private { struct sgiseeq_init_block *srings; dma_addr_t srings_dma; /* Ptrs to the descriptors in uncached space. */ struct sgiseeq_rx_desc *rx_desc; struct sgiseeq_tx_desc *tx_desc; char *name; struct hpc3_ethregs *hregs; struct sgiseeq_regs *sregs; /* Ring entry counters. */ unsigned int rx_new, tx_new; unsigned int rx_old, tx_old; int is_edlc; unsigned char control; unsigned char mode; spinlock_t tx_lock; }; static inline void dma_sync_desc_cpu(struct net_device *dev, void *addr) { struct sgiseeq_private *sp = netdev_priv(dev); dma_sync_single_for_cpu(dev->dev.parent, VIRT_TO_DMA(sp, addr), sizeof(struct sgiseeq_rx_desc), DMA_BIDIRECTIONAL); } static inline void dma_sync_desc_dev(struct net_device *dev, void *addr) { struct sgiseeq_private *sp = netdev_priv(dev); dma_sync_single_for_device(dev->dev.parent, VIRT_TO_DMA(sp, addr), sizeof(struct sgiseeq_rx_desc), DMA_BIDIRECTIONAL); } static inline void hpc3_eth_reset(struct hpc3_ethregs *hregs) { hregs->reset = HPC3_ERST_CRESET | HPC3_ERST_CLRIRQ; udelay(20); hregs->reset = 0; } static inline void reset_hpc3_and_seeq(struct hpc3_ethregs *hregs, struct sgiseeq_regs *sregs) { hregs->rx_ctrl = hregs->tx_ctrl = 0; hpc3_eth_reset(hregs); } #define RSTAT_GO_BITS (SEEQ_RCMD_IGOOD | SEEQ_RCMD_IEOF | SEEQ_RCMD_ISHORT | \ SEEQ_RCMD_IDRIB | SEEQ_RCMD_ICRC) static inline void seeq_go(struct sgiseeq_private *sp, struct hpc3_ethregs *hregs, struct sgiseeq_regs *sregs) { sregs->rstat = sp->mode | RSTAT_GO_BITS; hregs->rx_ctrl = HPC3_ERXCTRL_ACTIVE; } static inline void __sgiseeq_set_mac_address(struct net_device *dev) { struct sgiseeq_private *sp = netdev_priv(dev); struct sgiseeq_regs *sregs = sp->sregs; int i; sregs->tstat = SEEQ_TCMD_RB0; for (i = 0; i < 6; i++) sregs->rw.eth_addr[i] = dev->dev_addr[i]; } static int sgiseeq_set_mac_address(struct net_device *dev, void *addr) { struct sgiseeq_private *sp = netdev_priv(dev); struct sockaddr *sa = addr; eth_hw_addr_set(dev, sa->sa_data); spin_lock_irq(&sp->tx_lock); __sgiseeq_set_mac_address(dev); spin_unlock_irq(&sp->tx_lock); return 0; } #define TCNTINFO_INIT (HPCDMA_EOX | HPCDMA_ETXD) #define RCNTCFG_INIT (HPCDMA_OWN | HPCDMA_EORP | HPCDMA_XIE) #define RCNTINFO_INIT (RCNTCFG_INIT | (PKT_BUF_SZ & HPCDMA_BCNT)) static int seeq_init_ring(struct net_device *dev) { struct sgiseeq_private *sp = netdev_priv(dev); int i; netif_stop_queue(dev); sp->rx_new = sp->tx_new = 0; sp->rx_old = sp->tx_old = 0; __sgiseeq_set_mac_address(dev); /* Setup tx ring. */ for(i = 0; i < SEEQ_TX_BUFFERS; i++) { sp->tx_desc[i].tdma.cntinfo = TCNTINFO_INIT; dma_sync_desc_dev(dev, &sp->tx_desc[i]); } /* And now the rx ring. */ for (i = 0; i < SEEQ_RX_BUFFERS; i++) { if (!sp->rx_desc[i].skb) { dma_addr_t dma_addr; struct sk_buff *skb = netdev_alloc_skb(dev, PKT_BUF_SZ); if (skb == NULL) return -ENOMEM; skb_reserve(skb, 2); dma_addr = dma_map_single(dev->dev.parent, skb->data - 2, PKT_BUF_SZ, DMA_FROM_DEVICE); sp->rx_desc[i].skb = skb; sp->rx_desc[i].rdma.pbuf = dma_addr; } sp->rx_desc[i].rdma.cntinfo = RCNTINFO_INIT; dma_sync_desc_dev(dev, &sp->rx_desc[i]); } sp->rx_desc[i - 1].rdma.cntinfo |= HPCDMA_EOR; dma_sync_desc_dev(dev, &sp->rx_desc[i - 1]); return 0; } static void seeq_purge_ring(struct net_device *dev) { struct sgiseeq_private *sp = netdev_priv(dev); int i; /* clear tx ring. */ for (i = 0; i < SEEQ_TX_BUFFERS; i++) { if (sp->tx_desc[i].skb) { dev_kfree_skb(sp->tx_desc[i].skb); sp->tx_desc[i].skb = NULL; } } /* And now the rx ring. */ for (i = 0; i < SEEQ_RX_BUFFERS; i++) { if (sp->rx_desc[i].skb) { dev_kfree_skb(sp->rx_desc[i].skb); sp->rx_desc[i].skb = NULL; } } } #ifdef DEBUG static struct sgiseeq_private *gpriv; static struct net_device *gdev; static void sgiseeq_dump_rings(void) { static int once; struct sgiseeq_rx_desc *r = gpriv->rx_desc; struct sgiseeq_tx_desc *t = gpriv->tx_desc; struct hpc3_ethregs *hregs = gpriv->hregs; int i; if (once) return; once++; printk("RING DUMP:\n"); for (i = 0; i < SEEQ_RX_BUFFERS; i++) { printk("RX [%d]: @(%p) [%08x,%08x,%08x] ", i, (&r[i]), r[i].rdma.pbuf, r[i].rdma.cntinfo, r[i].rdma.pnext); i += 1; printk("-- [%d]: @(%p) [%08x,%08x,%08x]\n", i, (&r[i]), r[i].rdma.pbuf, r[i].rdma.cntinfo, r[i].rdma.pnext); } for (i = 0; i < SEEQ_TX_BUFFERS; i++) { printk("TX [%d]: @(%p) [%08x,%08x,%08x] ", i, (&t[i]), t[i].tdma.pbuf, t[i].tdma.cntinfo, t[i].tdma.pnext); i += 1; printk("-- [%d]: @(%p) [%08x,%08x,%08x]\n", i, (&t[i]), t[i].tdma.pbuf, t[i].tdma.cntinfo, t[i].tdma.pnext); } printk("INFO: [rx_new = %d rx_old=%d] [tx_new = %d tx_old = %d]\n", gpriv->rx_new, gpriv->rx_old, gpriv->tx_new, gpriv->tx_old); printk("RREGS: rx_cbptr[%08x] rx_ndptr[%08x] rx_ctrl[%08x]\n", hregs->rx_cbptr, hregs->rx_ndptr, hregs->rx_ctrl); printk("TREGS: tx_cbptr[%08x] tx_ndptr[%08x] tx_ctrl[%08x]\n", hregs->tx_cbptr, hregs->tx_ndptr, hregs->tx_ctrl); } #endif #define TSTAT_INIT_SEEQ (SEEQ_TCMD_IPT|SEEQ_TCMD_I16|SEEQ_TCMD_IC|SEEQ_TCMD_IUF) #define TSTAT_INIT_EDLC ((TSTAT_INIT_SEEQ) | SEEQ_TCMD_RB2) static int init_seeq(struct net_device *dev, struct sgiseeq_private *sp, struct sgiseeq_regs *sregs) { struct hpc3_ethregs *hregs = sp->hregs; int err; reset_hpc3_and_seeq(hregs, sregs); err = seeq_init_ring(dev); if (err) return err; /* Setup to field the proper interrupt types. */ if (sp->is_edlc) { sregs->tstat = TSTAT_INIT_EDLC; sregs->rw.wregs.control = sp->control; sregs->rw.wregs.frame_gap = 0; } else { sregs->tstat = TSTAT_INIT_SEEQ; } hregs->rx_ndptr = VIRT_TO_DMA(sp, sp->rx_desc); hregs->tx_ndptr = VIRT_TO_DMA(sp, sp->tx_desc); seeq_go(sp, hregs, sregs); return 0; } static void record_rx_errors(struct net_device *dev, unsigned char status) { if (status & SEEQ_RSTAT_OVERF || status & SEEQ_RSTAT_SFRAME) dev->stats.rx_over_errors++; if (status & SEEQ_RSTAT_CERROR) dev->stats.rx_crc_errors++; if (status & SEEQ_RSTAT_DERROR) dev->stats.rx_frame_errors++; if (status & SEEQ_RSTAT_REOF) dev->stats.rx_errors++; } static inline void rx_maybe_restart(struct sgiseeq_private *sp, struct hpc3_ethregs *hregs, struct sgiseeq_regs *sregs) { if (!(hregs->rx_ctrl & HPC3_ERXCTRL_ACTIVE)) { hregs->rx_ndptr = VIRT_TO_DMA(sp, sp->rx_desc + sp->rx_new); seeq_go(sp, hregs, sregs); } } static inline void sgiseeq_rx(struct net_device *dev, struct sgiseeq_private *sp, struct hpc3_ethregs *hregs, struct sgiseeq_regs *sregs) { struct sgiseeq_rx_desc *rd; struct sk_buff *skb = NULL; struct sk_buff *newskb; unsigned char pkt_status; int len = 0; unsigned int orig_end = PREV_RX(sp->rx_new); /* Service every received packet. */ rd = &sp->rx_desc[sp->rx_new]; dma_sync_desc_cpu(dev, rd); while (!(rd->rdma.cntinfo & HPCDMA_OWN)) { len = PKT_BUF_SZ - (rd->rdma.cntinfo & HPCDMA_BCNT) - 3; dma_unmap_single(dev->dev.parent, rd->rdma.pbuf, PKT_BUF_SZ, DMA_FROM_DEVICE); pkt_status = rd->skb->data[len]; if (pkt_status & SEEQ_RSTAT_FIG) { /* Packet is OK. */ /* We don't want to receive our own packets */ if (!ether_addr_equal(rd->skb->data + 6, dev->dev_addr)) { if (len > rx_copybreak) { skb = rd->skb; newskb = netdev_alloc_skb(dev, PKT_BUF_SZ); if (!newskb) { newskb = skb; skb = NULL; goto memory_squeeze; } skb_reserve(newskb, 2); } else { skb = netdev_alloc_skb_ip_align(dev, len); if (skb) skb_copy_to_linear_data(skb, rd->skb->data, len); newskb = rd->skb; } memory_squeeze: if (skb) { skb_put(skb, len); skb->protocol = eth_type_trans(skb, dev); netif_rx(skb); dev->stats.rx_packets++; dev->stats.rx_bytes += len; } else { dev->stats.rx_dropped++; } } else { /* Silently drop my own packets */ newskb = rd->skb; } } else { record_rx_errors(dev, pkt_status); newskb = rd->skb; } rd->skb = newskb; rd->rdma.pbuf = dma_map_single(dev->dev.parent, newskb->data - 2, PKT_BUF_SZ, DMA_FROM_DEVICE); /* Return the entry to the ring pool. */ rd->rdma.cntinfo = RCNTINFO_INIT; sp->rx_new = NEXT_RX(sp->rx_new); dma_sync_desc_dev(dev, rd); rd = &sp->rx_desc[sp->rx_new]; dma_sync_desc_cpu(dev, rd); } dma_sync_desc_dev(dev, rd); dma_sync_desc_cpu(dev, &sp->rx_desc[orig_end]); sp->rx_desc[orig_end].rdma.cntinfo &= ~(HPCDMA_EOR); dma_sync_desc_dev(dev, &sp->rx_desc[orig_end]); dma_sync_desc_cpu(dev, &sp->rx_desc[PREV_RX(sp->rx_new)]); sp->rx_desc[PREV_RX(sp->rx_new)].rdma.cntinfo |= HPCDMA_EOR; dma_sync_desc_dev(dev, &sp->rx_desc[PREV_RX(sp->rx_new)]); rx_maybe_restart(sp, hregs, sregs); } static inline void tx_maybe_reset_collisions(struct sgiseeq_private *sp, struct sgiseeq_regs *sregs) { if (sp->is_edlc) { sregs->rw.wregs.control = sp->control & ~(SEEQ_CTRL_XCNT); sregs->rw.wregs.control = sp->control; } } static inline void kick_tx(struct net_device *dev, struct sgiseeq_private *sp, struct hpc3_ethregs *hregs) { struct sgiseeq_tx_desc *td; int i = sp->tx_old; /* If the HPC aint doin nothin, and there are more packets * with ETXD cleared and XIU set we must make very certain * that we restart the HPC else we risk locking up the * adapter. The following code is only safe iff the HPCDMA * is not active! */ td = &sp->tx_desc[i]; dma_sync_desc_cpu(dev, td); while ((td->tdma.cntinfo & (HPCDMA_XIU | HPCDMA_ETXD)) == (HPCDMA_XIU | HPCDMA_ETXD)) { i = NEXT_TX(i); td = &sp->tx_desc[i]; dma_sync_desc_cpu(dev, td); } if (td->tdma.cntinfo & HPCDMA_XIU) { dma_sync_desc_dev(dev, td); hregs->tx_ndptr = VIRT_TO_DMA(sp, td); hregs->tx_ctrl = HPC3_ETXCTRL_ACTIVE; } } static inline void sgiseeq_tx(struct net_device *dev, struct sgiseeq_private *sp, struct hpc3_ethregs *hregs, struct sgiseeq_regs *sregs) { struct sgiseeq_tx_desc *td; unsigned long status = hregs->tx_ctrl; int j; tx_maybe_reset_collisions(sp, sregs); if (!(status & (HPC3_ETXCTRL_ACTIVE | SEEQ_TSTAT_PTRANS))) { /* Oops, HPC detected some sort of error. */ if (status & SEEQ_TSTAT_R16) dev->stats.tx_aborted_errors++; if (status & SEEQ_TSTAT_UFLOW) dev->stats.tx_fifo_errors++; if (status & SEEQ_TSTAT_LCLS) dev->stats.collisions++; } /* Ack 'em... */ for (j = sp->tx_old; j != sp->tx_new; j = NEXT_TX(j)) { td = &sp->tx_desc[j]; dma_sync_desc_cpu(dev, td); if (!(td->tdma.cntinfo & (HPCDMA_XIU))) break; if (!(td->tdma.cntinfo & (HPCDMA_ETXD))) { dma_sync_desc_dev(dev, td); if (!(status & HPC3_ETXCTRL_ACTIVE)) { hregs->tx_ndptr = VIRT_TO_DMA(sp, td); hregs->tx_ctrl = HPC3_ETXCTRL_ACTIVE; } break; } dev->stats.tx_packets++; sp->tx_old = NEXT_TX(sp->tx_old); td->tdma.cntinfo &= ~(HPCDMA_XIU | HPCDMA_XIE); td->tdma.cntinfo |= HPCDMA_EOX; if (td->skb) { dev_kfree_skb_any(td->skb); td->skb = NULL; } dma_sync_desc_dev(dev, td); } } static irqreturn_t sgiseeq_interrupt(int irq, void *dev_id) { struct net_device *dev = (struct net_device *) dev_id; struct sgiseeq_private *sp = netdev_priv(dev); struct hpc3_ethregs *hregs = sp->hregs; struct sgiseeq_regs *sregs = sp->sregs; spin_lock(&sp->tx_lock); /* Ack the IRQ and set software state. */ hregs->reset = HPC3_ERST_CLRIRQ; /* Always check for received packets. */ sgiseeq_rx(dev, sp, hregs, sregs); /* Only check for tx acks if we have something queued. */ if (sp->tx_old != sp->tx_new) sgiseeq_tx(dev, sp, hregs, sregs); if ((TX_BUFFS_AVAIL(sp) > 0) && netif_queue_stopped(dev)) { netif_wake_queue(dev); } spin_unlock(&sp->tx_lock); return IRQ_HANDLED; } static int sgiseeq_open(struct net_device *dev) { struct sgiseeq_private *sp = netdev_priv(dev); struct sgiseeq_regs *sregs = sp->sregs; unsigned int irq = dev->irq; int err; if (request_irq(irq, sgiseeq_interrupt, 0, sgiseeqstr, dev)) { printk(KERN_ERR "Seeq8003: Can't get irq %d\n", dev->irq); return -EAGAIN; } err = init_seeq(dev, sp, sregs); if (err) goto out_free_irq; netif_start_queue(dev); return 0; out_free_irq: free_irq(irq, dev); return err; } static int sgiseeq_close(struct net_device *dev) { struct sgiseeq_private *sp = netdev_priv(dev); struct sgiseeq_regs *sregs = sp->sregs; unsigned int irq = dev->irq; netif_stop_queue(dev); /* Shutdown the Seeq. */ reset_hpc3_and_seeq(sp->hregs, sregs); free_irq(irq, dev); seeq_purge_ring(dev); return 0; } static inline int sgiseeq_reset(struct net_device *dev) { struct sgiseeq_private *sp = netdev_priv(dev); struct sgiseeq_regs *sregs = sp->sregs; int err; err = init_seeq(dev, sp, sregs); if (err) return err; netif_trans_update(dev); /* prevent tx timeout */ netif_wake_queue(dev); return 0; } static netdev_tx_t sgiseeq_start_xmit(struct sk_buff *skb, struct net_device *dev) { struct sgiseeq_private *sp = netdev_priv(dev); struct hpc3_ethregs *hregs = sp->hregs; unsigned long flags; struct sgiseeq_tx_desc *td; int len, entry; spin_lock_irqsave(&sp->tx_lock, flags); /* Setup... */ len = skb->len; if (len < ETH_ZLEN) { if (skb_padto(skb, ETH_ZLEN)) { spin_unlock_irqrestore(&sp->tx_lock, flags); return NETDEV_TX_OK; } len = ETH_ZLEN; } dev->stats.tx_bytes += len; entry = sp->tx_new; td = &sp->tx_desc[entry]; dma_sync_desc_cpu(dev, td); /* Create entry. There are so many races with adding a new * descriptor to the chain: * 1) Assume that the HPC is off processing a DMA chain while * we are changing all of the following. * 2) Do no allow the HPC to look at a new descriptor until * we have completely set up it's state. This means, do * not clear HPCDMA_EOX in the current last descritptor * until the one we are adding looks consistent and could * be processes right now. * 3) The tx interrupt code must notice when we've added a new * entry and the HPC got to the end of the chain before we * added this new entry and restarted it. */ td->skb = skb; td->tdma.pbuf = dma_map_single(dev->dev.parent, skb->data, len, DMA_TO_DEVICE); td->tdma.cntinfo = (len & HPCDMA_BCNT) | HPCDMA_XIU | HPCDMA_EOXP | HPCDMA_XIE | HPCDMA_EOX; dma_sync_desc_dev(dev, td); if (sp->tx_old != sp->tx_new) { struct sgiseeq_tx_desc *backend; backend = &sp->tx_desc[PREV_TX(sp->tx_new)]; dma_sync_desc_cpu(dev, backend); backend->tdma.cntinfo &= ~HPCDMA_EOX; dma_sync_desc_dev(dev, backend); } sp->tx_new = NEXT_TX(sp->tx_new); /* Advance. */ /* Maybe kick the HPC back into motion. */ if (!(hregs->tx_ctrl & HPC3_ETXCTRL_ACTIVE)) kick_tx(dev, sp, hregs); if (!TX_BUFFS_AVAIL(sp)) netif_stop_queue(dev); spin_unlock_irqrestore(&sp->tx_lock, flags); return NETDEV_TX_OK; } static void timeout(struct net_device *dev, unsigned int txqueue) { printk(KERN_NOTICE "%s: transmit timed out, resetting\n", dev->name); sgiseeq_reset(dev); netif_trans_update(dev); /* prevent tx timeout */ netif_wake_queue(dev); } static void sgiseeq_set_multicast(struct net_device *dev) { struct sgiseeq_private *sp = netdev_priv(dev); unsigned char oldmode = sp->mode; if(dev->flags & IFF_PROMISC) sp->mode = SEEQ_RCMD_RANY; else if ((dev->flags & IFF_ALLMULTI) || !netdev_mc_empty(dev)) sp->mode = SEEQ_RCMD_RBMCAST; else sp->mode = SEEQ_RCMD_RBCAST; /* XXX I know this sucks, but is there a better way to reprogram * XXX the receiver? At least, this shouldn't happen too often. */ if (oldmode != sp->mode) sgiseeq_reset(dev); } static inline void setup_tx_ring(struct net_device *dev, struct sgiseeq_tx_desc *buf, int nbufs) { struct sgiseeq_private *sp = netdev_priv(dev); int i = 0; while (i < (nbufs - 1)) { buf[i].tdma.pnext = VIRT_TO_DMA(sp, buf + i + 1); buf[i].tdma.pbuf = 0; dma_sync_desc_dev(dev, &buf[i]); i++; } buf[i].tdma.pnext = VIRT_TO_DMA(sp, buf); dma_sync_desc_dev(dev, &buf[i]); } static inline void setup_rx_ring(struct net_device *dev, struct sgiseeq_rx_desc *buf, int nbufs) { struct sgiseeq_private *sp = netdev_priv(dev); int i = 0; while (i < (nbufs - 1)) { buf[i].rdma.pnext = VIRT_TO_DMA(sp, buf + i + 1); buf[i].rdma.pbuf = 0; dma_sync_desc_dev(dev, &buf[i]); i++; } buf[i].rdma.pbuf = 0; buf[i].rdma.pnext = VIRT_TO_DMA(sp, buf); dma_sync_desc_dev(dev, &buf[i]); } static const struct net_device_ops sgiseeq_netdev_ops = { .ndo_open = sgiseeq_open, .ndo_stop = sgiseeq_close, .ndo_start_xmit = sgiseeq_start_xmit, .ndo_tx_timeout = timeout, .ndo_set_rx_mode = sgiseeq_set_multicast, .ndo_set_mac_address = sgiseeq_set_mac_address, .ndo_validate_addr = eth_validate_addr, }; static int sgiseeq_probe(struct platform_device *pdev) { struct sgiseeq_platform_data *pd = dev_get_platdata(&pdev->dev); struct hpc3_regs *hpcregs = pd->hpc; struct sgiseeq_init_block *sr; unsigned int irq = pd->irq; struct sgiseeq_private *sp; struct net_device *dev; int err; dev = alloc_etherdev(sizeof (struct sgiseeq_private)); if (!dev) { err = -ENOMEM; goto err_out; } platform_set_drvdata(pdev, dev); SET_NETDEV_DEV(dev, &pdev->dev); sp = netdev_priv(dev); /* Make private data page aligned */ sr = dma_alloc_noncoherent(&pdev->dev, sizeof(*sp->srings), &sp->srings_dma, DMA_BIDIRECTIONAL, GFP_KERNEL); if (!sr) { printk(KERN_ERR "Sgiseeq: Page alloc failed, aborting.\n"); err = -ENOMEM; goto err_out_free_dev; } sp->srings = sr; sp->rx_desc = sp->srings->rxvector; sp->tx_desc = sp->srings->txvector; spin_lock_init(&sp->tx_lock); /* A couple calculations now, saves many cycles later. */ setup_rx_ring(dev, sp->rx_desc, SEEQ_RX_BUFFERS); setup_tx_ring(dev, sp->tx_desc, SEEQ_TX_BUFFERS); eth_hw_addr_set(dev, pd->mac); #ifdef DEBUG gpriv = sp; gdev = dev; #endif sp->sregs = (struct sgiseeq_regs *) &hpcregs->eth_ext[0]; sp->hregs = &hpcregs->ethregs; sp->name = sgiseeqstr; sp->mode = SEEQ_RCMD_RBCAST; /* Setup PIO and DMA transfer timing */ sp->hregs->pconfig = 0x161; sp->hregs->dconfig = HPC3_EDCFG_FIRQ | HPC3_EDCFG_FEOP | HPC3_EDCFG_FRXDC | HPC3_EDCFG_PTO | 0x026; /* Setup PIO and DMA transfer timing */ sp->hregs->pconfig = 0x161; sp->hregs->dconfig = HPC3_EDCFG_FIRQ | HPC3_EDCFG_FEOP | HPC3_EDCFG_FRXDC | HPC3_EDCFG_PTO | 0x026; /* Reset the chip. */ hpc3_eth_reset(sp->hregs); sp->is_edlc = !(sp->sregs->rw.rregs.collision_tx[0] & 0xff); if (sp->is_edlc) sp->control = SEEQ_CTRL_XCNT | SEEQ_CTRL_ACCNT | SEEQ_CTRL_SFLAG | SEEQ_CTRL_ESHORT | SEEQ_CTRL_ENCARR; dev->netdev_ops = &sgiseeq_netdev_ops; dev->watchdog_timeo = (200 * HZ) / 1000; dev->irq = irq; if (register_netdev(dev)) { printk(KERN_ERR "Sgiseeq: Cannot register net device, " "aborting.\n"); err = -ENODEV; goto err_out_free_attrs; } printk(KERN_INFO "%s: %s %pM\n", dev->name, sgiseeqstr, dev->dev_addr); return 0; err_out_free_attrs: dma_free_noncoherent(&pdev->dev, sizeof(*sp->srings), sp->srings, sp->srings_dma, DMA_BIDIRECTIONAL); err_out_free_dev: free_netdev(dev); err_out: return err; } static int sgiseeq_remove(struct platform_device *pdev) { struct net_device *dev = platform_get_drvdata(pdev); struct sgiseeq_private *sp = netdev_priv(dev); unregister_netdev(dev); dma_free_noncoherent(&pdev->dev, sizeof(*sp->srings), sp->srings, sp->srings_dma, DMA_BIDIRECTIONAL); free_netdev(dev); return 0; } static struct platform_driver sgiseeq_driver = { .probe = sgiseeq_probe, .remove = sgiseeq_remove, .driver = { .name = "sgiseeq", } }; module_platform_driver(sgiseeq_driver); MODULE_DESCRIPTION("SGI Seeq 8003 driver"); MODULE_AUTHOR("Linux/MIPS Mailing List <[email protected]>"); MODULE_LICENSE("GPL"); MODULE_ALIAS("platform:sgiseeq");
linux-master
drivers/net/ethernet/seeq/sgiseeq.c
// SPDX-License-Identifier: GPL-2.0 /* sunvnet.c: Sun LDOM Virtual Network Driver. * * Copyright (C) 2007, 2008 David S. Miller <[email protected]> * Copyright (C) 2016-2017 Oracle. All rights reserved. */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/types.h> #include <linux/slab.h> #include <linux/delay.h> #include <linux/init.h> #include <linux/netdevice.h> #include <linux/ethtool.h> #include <linux/etherdevice.h> #include <linux/mutex.h> #include <linux/highmem.h> #include <linux/if_vlan.h> #define CREATE_TRACE_POINTS #include <trace/events/sunvnet.h> #if IS_ENABLED(CONFIG_IPV6) #include <linux/icmpv6.h> #endif #include <net/ip.h> #include <net/gso.h> #include <net/icmp.h> #include <net/route.h> #include <asm/vio.h> #include <asm/ldc.h> #include "sunvnet_common.h" /* Heuristic for the number of times to exponentially backoff and * retry sending an LDC trigger when EAGAIN is encountered */ #define VNET_MAX_RETRIES 10 MODULE_AUTHOR("David S. Miller ([email protected])"); MODULE_DESCRIPTION("Sun LDOM virtual network support library"); MODULE_LICENSE("GPL"); MODULE_VERSION("1.1"); static int __vnet_tx_trigger(struct vnet_port *port, u32 start); static inline u32 vnet_tx_dring_avail(struct vio_dring_state *dr) { return vio_dring_avail(dr, VNET_TX_RING_SIZE); } static int vnet_handle_unknown(struct vnet_port *port, void *arg) { struct vio_msg_tag *pkt = arg; pr_err("Received unknown msg [%02x:%02x:%04x:%08x]\n", pkt->type, pkt->stype, pkt->stype_env, pkt->sid); pr_err("Resetting connection\n"); ldc_disconnect(port->vio.lp); return -ECONNRESET; } static int vnet_port_alloc_tx_ring(struct vnet_port *port); int sunvnet_send_attr_common(struct vio_driver_state *vio) { struct vnet_port *port = to_vnet_port(vio); struct net_device *dev = VNET_PORT_TO_NET_DEVICE(port); struct vio_net_attr_info pkt; int framelen = ETH_FRAME_LEN; int i, err; err = vnet_port_alloc_tx_ring(to_vnet_port(vio)); if (err) return err; memset(&pkt, 0, sizeof(pkt)); pkt.tag.type = VIO_TYPE_CTRL; pkt.tag.stype = VIO_SUBTYPE_INFO; pkt.tag.stype_env = VIO_ATTR_INFO; pkt.tag.sid = vio_send_sid(vio); if (vio_version_before(vio, 1, 2)) pkt.xfer_mode = VIO_DRING_MODE; else pkt.xfer_mode = VIO_NEW_DRING_MODE; pkt.addr_type = VNET_ADDR_ETHERMAC; pkt.ack_freq = 0; for (i = 0; i < 6; i++) pkt.addr |= (u64)dev->dev_addr[i] << ((5 - i) * 8); if (vio_version_after(vio, 1, 3)) { if (port->rmtu) { port->rmtu = min(VNET_MAXPACKET, port->rmtu); pkt.mtu = port->rmtu; } else { port->rmtu = VNET_MAXPACKET; pkt.mtu = port->rmtu; } if (vio_version_after_eq(vio, 1, 6)) pkt.options = VIO_TX_DRING; } else if (vio_version_before(vio, 1, 3)) { pkt.mtu = framelen; } else { /* v1.3 */ pkt.mtu = framelen + VLAN_HLEN; } pkt.cflags = 0; if (vio_version_after_eq(vio, 1, 7) && port->tso) { pkt.cflags |= VNET_LSO_IPV4_CAPAB; if (!port->tsolen) port->tsolen = VNET_MAXTSO; pkt.ipv4_lso_maxlen = port->tsolen; } pkt.plnk_updt = PHYSLINK_UPDATE_NONE; viodbg(HS, "SEND NET ATTR xmode[0x%x] atype[0x%x] addr[%llx] " "ackfreq[%u] plnk_updt[0x%02x] opts[0x%02x] mtu[%llu] " "cflags[0x%04x] lso_max[%u]\n", pkt.xfer_mode, pkt.addr_type, (unsigned long long)pkt.addr, pkt.ack_freq, pkt.plnk_updt, pkt.options, (unsigned long long)pkt.mtu, pkt.cflags, pkt.ipv4_lso_maxlen); return vio_ldc_send(vio, &pkt, sizeof(pkt)); } EXPORT_SYMBOL_GPL(sunvnet_send_attr_common); static int handle_attr_info(struct vio_driver_state *vio, struct vio_net_attr_info *pkt) { struct vnet_port *port = to_vnet_port(vio); u64 localmtu; u8 xfer_mode; viodbg(HS, "GOT NET ATTR xmode[0x%x] atype[0x%x] addr[%llx] " "ackfreq[%u] plnk_updt[0x%02x] opts[0x%02x] mtu[%llu] " " (rmtu[%llu]) cflags[0x%04x] lso_max[%u]\n", pkt->xfer_mode, pkt->addr_type, (unsigned long long)pkt->addr, pkt->ack_freq, pkt->plnk_updt, pkt->options, (unsigned long long)pkt->mtu, port->rmtu, pkt->cflags, pkt->ipv4_lso_maxlen); pkt->tag.sid = vio_send_sid(vio); xfer_mode = pkt->xfer_mode; /* for version < 1.2, VIO_DRING_MODE = 0x3 and no bitmask */ if (vio_version_before(vio, 1, 2) && xfer_mode == VIO_DRING_MODE) xfer_mode = VIO_NEW_DRING_MODE; /* MTU negotiation: * < v1.3 - ETH_FRAME_LEN exactly * > v1.3 - MIN(pkt.mtu, VNET_MAXPACKET, port->rmtu) and change * pkt->mtu for ACK * = v1.3 - ETH_FRAME_LEN + VLAN_HLEN exactly */ if (vio_version_before(vio, 1, 3)) { localmtu = ETH_FRAME_LEN; } else if (vio_version_after(vio, 1, 3)) { localmtu = port->rmtu ? port->rmtu : VNET_MAXPACKET; localmtu = min(pkt->mtu, localmtu); pkt->mtu = localmtu; } else { /* v1.3 */ localmtu = ETH_FRAME_LEN + VLAN_HLEN; } port->rmtu = localmtu; /* LSO negotiation */ if (vio_version_after_eq(vio, 1, 7)) port->tso &= !!(pkt->cflags & VNET_LSO_IPV4_CAPAB); else port->tso = false; if (port->tso) { if (!port->tsolen) port->tsolen = VNET_MAXTSO; port->tsolen = min(port->tsolen, pkt->ipv4_lso_maxlen); if (port->tsolen < VNET_MINTSO) { port->tso = false; port->tsolen = 0; pkt->cflags &= ~VNET_LSO_IPV4_CAPAB; } pkt->ipv4_lso_maxlen = port->tsolen; } else { pkt->cflags &= ~VNET_LSO_IPV4_CAPAB; pkt->ipv4_lso_maxlen = 0; port->tsolen = 0; } /* for version >= 1.6, ACK packet mode we support */ if (vio_version_after_eq(vio, 1, 6)) { pkt->xfer_mode = VIO_NEW_DRING_MODE; pkt->options = VIO_TX_DRING; } if (!(xfer_mode | VIO_NEW_DRING_MODE) || pkt->addr_type != VNET_ADDR_ETHERMAC || pkt->mtu != localmtu) { viodbg(HS, "SEND NET ATTR NACK\n"); pkt->tag.stype = VIO_SUBTYPE_NACK; (void)vio_ldc_send(vio, pkt, sizeof(*pkt)); return -ECONNRESET; } viodbg(HS, "SEND NET ATTR ACK xmode[0x%x] atype[0x%x] " "addr[%llx] ackfreq[%u] plnk_updt[0x%02x] opts[0x%02x] " "mtu[%llu] (rmtu[%llu]) cflags[0x%04x] lso_max[%u]\n", pkt->xfer_mode, pkt->addr_type, (unsigned long long)pkt->addr, pkt->ack_freq, pkt->plnk_updt, pkt->options, (unsigned long long)pkt->mtu, port->rmtu, pkt->cflags, pkt->ipv4_lso_maxlen); pkt->tag.stype = VIO_SUBTYPE_ACK; return vio_ldc_send(vio, pkt, sizeof(*pkt)); } static int handle_attr_ack(struct vio_driver_state *vio, struct vio_net_attr_info *pkt) { viodbg(HS, "GOT NET ATTR ACK\n"); return 0; } static int handle_attr_nack(struct vio_driver_state *vio, struct vio_net_attr_info *pkt) { viodbg(HS, "GOT NET ATTR NACK\n"); return -ECONNRESET; } int sunvnet_handle_attr_common(struct vio_driver_state *vio, void *arg) { struct vio_net_attr_info *pkt = arg; switch (pkt->tag.stype) { case VIO_SUBTYPE_INFO: return handle_attr_info(vio, pkt); case VIO_SUBTYPE_ACK: return handle_attr_ack(vio, pkt); case VIO_SUBTYPE_NACK: return handle_attr_nack(vio, pkt); default: return -ECONNRESET; } } EXPORT_SYMBOL_GPL(sunvnet_handle_attr_common); void sunvnet_handshake_complete_common(struct vio_driver_state *vio) { struct vio_dring_state *dr; dr = &vio->drings[VIO_DRIVER_RX_RING]; dr->rcv_nxt = 1; dr->snd_nxt = 1; dr = &vio->drings[VIO_DRIVER_TX_RING]; dr->rcv_nxt = 1; dr->snd_nxt = 1; } EXPORT_SYMBOL_GPL(sunvnet_handshake_complete_common); /* The hypervisor interface that implements copying to/from imported * memory from another domain requires that copies are done to 8-byte * aligned buffers, and that the lengths of such copies are also 8-byte * multiples. * * So we align skb->data to an 8-byte multiple and pad-out the data * area so we can round the copy length up to the next multiple of * 8 for the copy. * * The transmitter puts the actual start of the packet 6 bytes into * the buffer it sends over, so that the IP headers after the ethernet * header are aligned properly. These 6 bytes are not in the descriptor * length, they are simply implied. This offset is represented using * the VNET_PACKET_SKIP macro. */ static struct sk_buff *alloc_and_align_skb(struct net_device *dev, unsigned int len) { struct sk_buff *skb; unsigned long addr, off; skb = netdev_alloc_skb(dev, len + VNET_PACKET_SKIP + 8 + 8); if (unlikely(!skb)) return NULL; addr = (unsigned long)skb->data; off = ((addr + 7UL) & ~7UL) - addr; if (off) skb_reserve(skb, off); return skb; } static inline void vnet_fullcsum_ipv4(struct sk_buff *skb) { struct iphdr *iph = ip_hdr(skb); int offset = skb_transport_offset(skb); if (skb->protocol != htons(ETH_P_IP)) return; if (iph->protocol != IPPROTO_TCP && iph->protocol != IPPROTO_UDP) return; skb->ip_summed = CHECKSUM_NONE; skb->csum_level = 1; skb->csum = 0; if (iph->protocol == IPPROTO_TCP) { struct tcphdr *ptcp = tcp_hdr(skb); ptcp->check = 0; skb->csum = skb_checksum(skb, offset, skb->len - offset, 0); ptcp->check = csum_tcpudp_magic(iph->saddr, iph->daddr, skb->len - offset, IPPROTO_TCP, skb->csum); } else if (iph->protocol == IPPROTO_UDP) { struct udphdr *pudp = udp_hdr(skb); pudp->check = 0; skb->csum = skb_checksum(skb, offset, skb->len - offset, 0); pudp->check = csum_tcpudp_magic(iph->saddr, iph->daddr, skb->len - offset, IPPROTO_UDP, skb->csum); } } #if IS_ENABLED(CONFIG_IPV6) static inline void vnet_fullcsum_ipv6(struct sk_buff *skb) { struct ipv6hdr *ip6h = ipv6_hdr(skb); int offset = skb_transport_offset(skb); if (skb->protocol != htons(ETH_P_IPV6)) return; if (ip6h->nexthdr != IPPROTO_TCP && ip6h->nexthdr != IPPROTO_UDP) return; skb->ip_summed = CHECKSUM_NONE; skb->csum_level = 1; skb->csum = 0; if (ip6h->nexthdr == IPPROTO_TCP) { struct tcphdr *ptcp = tcp_hdr(skb); ptcp->check = 0; skb->csum = skb_checksum(skb, offset, skb->len - offset, 0); ptcp->check = csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr, skb->len - offset, IPPROTO_TCP, skb->csum); } else if (ip6h->nexthdr == IPPROTO_UDP) { struct udphdr *pudp = udp_hdr(skb); pudp->check = 0; skb->csum = skb_checksum(skb, offset, skb->len - offset, 0); pudp->check = csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr, skb->len - offset, IPPROTO_UDP, skb->csum); } } #endif static int vnet_rx_one(struct vnet_port *port, struct vio_net_desc *desc) { struct net_device *dev = VNET_PORT_TO_NET_DEVICE(port); unsigned int len = desc->size; unsigned int copy_len; struct sk_buff *skb; int maxlen; int err; err = -EMSGSIZE; if (port->tso && port->tsolen > port->rmtu) maxlen = port->tsolen; else maxlen = port->rmtu; if (unlikely(len < ETH_ZLEN || len > maxlen)) { dev->stats.rx_length_errors++; goto out_dropped; } skb = alloc_and_align_skb(dev, len); err = -ENOMEM; if (unlikely(!skb)) { dev->stats.rx_missed_errors++; goto out_dropped; } copy_len = (len + VNET_PACKET_SKIP + 7U) & ~7U; skb_put(skb, copy_len); err = ldc_copy(port->vio.lp, LDC_COPY_IN, skb->data, copy_len, 0, desc->cookies, desc->ncookies); if (unlikely(err < 0)) { dev->stats.rx_frame_errors++; goto out_free_skb; } skb_pull(skb, VNET_PACKET_SKIP); skb_trim(skb, len); skb->protocol = eth_type_trans(skb, dev); if (vio_version_after_eq(&port->vio, 1, 8)) { struct vio_net_dext *dext = vio_net_ext(desc); skb_reset_network_header(skb); if (dext->flags & VNET_PKT_HCK_IPV4_HDRCKSUM) { if (skb->protocol == ETH_P_IP) { struct iphdr *iph = ip_hdr(skb); iph->check = 0; ip_send_check(iph); } } if ((dext->flags & VNET_PKT_HCK_FULLCKSUM) && skb->ip_summed == CHECKSUM_NONE) { if (skb->protocol == htons(ETH_P_IP)) { struct iphdr *iph = ip_hdr(skb); int ihl = iph->ihl * 4; skb_set_transport_header(skb, ihl); vnet_fullcsum_ipv4(skb); #if IS_ENABLED(CONFIG_IPV6) } else if (skb->protocol == htons(ETH_P_IPV6)) { skb_set_transport_header(skb, sizeof(struct ipv6hdr)); vnet_fullcsum_ipv6(skb); #endif } } if (dext->flags & VNET_PKT_HCK_IPV4_HDRCKSUM_OK) { skb->ip_summed = CHECKSUM_PARTIAL; skb->csum_level = 0; if (dext->flags & VNET_PKT_HCK_FULLCKSUM_OK) skb->csum_level = 1; } } skb->ip_summed = port->switch_port ? CHECKSUM_NONE : CHECKSUM_PARTIAL; if (unlikely(is_multicast_ether_addr(eth_hdr(skb)->h_dest))) dev->stats.multicast++; dev->stats.rx_packets++; dev->stats.rx_bytes += len; port->stats.rx_packets++; port->stats.rx_bytes += len; napi_gro_receive(&port->napi, skb); return 0; out_free_skb: kfree_skb(skb); out_dropped: dev->stats.rx_dropped++; return err; } static int vnet_send_ack(struct vnet_port *port, struct vio_dring_state *dr, u32 start, u32 end, u8 vio_dring_state) { struct vio_dring_data hdr = { .tag = { .type = VIO_TYPE_DATA, .stype = VIO_SUBTYPE_ACK, .stype_env = VIO_DRING_DATA, .sid = vio_send_sid(&port->vio), }, .dring_ident = dr->ident, .start_idx = start, .end_idx = end, .state = vio_dring_state, }; int err, delay; int retries = 0; hdr.seq = dr->snd_nxt; delay = 1; do { err = vio_ldc_send(&port->vio, &hdr, sizeof(hdr)); if (err > 0) { dr->snd_nxt++; break; } udelay(delay); if ((delay <<= 1) > 128) delay = 128; if (retries++ > VNET_MAX_RETRIES) { pr_info("ECONNRESET %x:%x:%x:%x:%x:%x\n", port->raddr[0], port->raddr[1], port->raddr[2], port->raddr[3], port->raddr[4], port->raddr[5]); break; } } while (err == -EAGAIN); if (err <= 0 && vio_dring_state == VIO_DRING_STOPPED) { port->stop_rx_idx = end; port->stop_rx = true; } else { port->stop_rx_idx = 0; port->stop_rx = false; } return err; } static struct vio_net_desc *get_rx_desc(struct vnet_port *port, struct vio_dring_state *dr, u32 index) { struct vio_net_desc *desc = port->vio.desc_buf; int err; err = ldc_get_dring_entry(port->vio.lp, desc, dr->entry_size, (index * dr->entry_size), dr->cookies, dr->ncookies); if (err < 0) return ERR_PTR(err); return desc; } static int put_rx_desc(struct vnet_port *port, struct vio_dring_state *dr, struct vio_net_desc *desc, u32 index) { int err; err = ldc_put_dring_entry(port->vio.lp, desc, dr->entry_size, (index * dr->entry_size), dr->cookies, dr->ncookies); if (err < 0) return err; return 0; } static int vnet_walk_rx_one(struct vnet_port *port, struct vio_dring_state *dr, u32 index, int *needs_ack) { struct vio_net_desc *desc = get_rx_desc(port, dr, index); struct vio_driver_state *vio = &port->vio; int err; BUG_ON(!desc); if (IS_ERR(desc)) return PTR_ERR(desc); if (desc->hdr.state != VIO_DESC_READY) return 1; dma_rmb(); viodbg(DATA, "vio_walk_rx_one desc[%02x:%02x:%08x:%08x:%llx:%llx]\n", desc->hdr.state, desc->hdr.ack, desc->size, desc->ncookies, desc->cookies[0].cookie_addr, desc->cookies[0].cookie_size); err = vnet_rx_one(port, desc); if (err == -ECONNRESET) return err; trace_vnet_rx_one(port->vio._local_sid, port->vio._peer_sid, index, desc->hdr.ack); desc->hdr.state = VIO_DESC_DONE; err = put_rx_desc(port, dr, desc, index); if (err < 0) return err; *needs_ack = desc->hdr.ack; return 0; } static int vnet_walk_rx(struct vnet_port *port, struct vio_dring_state *dr, u32 start, u32 end, int *npkts, int budget) { struct vio_driver_state *vio = &port->vio; int ack_start = -1, ack_end = -1; bool send_ack = true; end = (end == (u32)-1) ? vio_dring_prev(dr, start) : vio_dring_next(dr, end); viodbg(DATA, "vnet_walk_rx start[%08x] end[%08x]\n", start, end); while (start != end) { int ack = 0, err = vnet_walk_rx_one(port, dr, start, &ack); if (err == -ECONNRESET) return err; if (err != 0) break; (*npkts)++; if (ack_start == -1) ack_start = start; ack_end = start; start = vio_dring_next(dr, start); if (ack && start != end) { err = vnet_send_ack(port, dr, ack_start, ack_end, VIO_DRING_ACTIVE); if (err == -ECONNRESET) return err; ack_start = -1; } if ((*npkts) >= budget) { send_ack = false; break; } } if (unlikely(ack_start == -1)) { ack_end = vio_dring_prev(dr, start); ack_start = ack_end; } if (send_ack) { port->napi_resume = false; trace_vnet_tx_send_stopped_ack(port->vio._local_sid, port->vio._peer_sid, ack_end, *npkts); return vnet_send_ack(port, dr, ack_start, ack_end, VIO_DRING_STOPPED); } else { trace_vnet_tx_defer_stopped_ack(port->vio._local_sid, port->vio._peer_sid, ack_end, *npkts); port->napi_resume = true; port->napi_stop_idx = ack_end; return 1; } } static int vnet_rx(struct vnet_port *port, void *msgbuf, int *npkts, int budget) { struct vio_dring_data *pkt = msgbuf; struct vio_dring_state *dr = &port->vio.drings[VIO_DRIVER_RX_RING]; struct vio_driver_state *vio = &port->vio; viodbg(DATA, "vnet_rx stype_env[%04x] seq[%016llx] rcv_nxt[%016llx]\n", pkt->tag.stype_env, pkt->seq, dr->rcv_nxt); if (unlikely(pkt->tag.stype_env != VIO_DRING_DATA)) return 0; if (unlikely(pkt->seq != dr->rcv_nxt)) { pr_err("RX out of sequence seq[0x%llx] rcv_nxt[0x%llx]\n", pkt->seq, dr->rcv_nxt); return 0; } if (!port->napi_resume) dr->rcv_nxt++; /* XXX Validate pkt->start_idx and pkt->end_idx XXX */ return vnet_walk_rx(port, dr, pkt->start_idx, pkt->end_idx, npkts, budget); } static int idx_is_pending(struct vio_dring_state *dr, u32 end) { u32 idx = dr->cons; int found = 0; while (idx != dr->prod) { if (idx == end) { found = 1; break; } idx = vio_dring_next(dr, idx); } return found; } static int vnet_ack(struct vnet_port *port, void *msgbuf) { struct vio_dring_state *dr = &port->vio.drings[VIO_DRIVER_TX_RING]; struct vio_dring_data *pkt = msgbuf; struct net_device *dev; u32 end; struct vio_net_desc *desc; struct netdev_queue *txq; if (unlikely(pkt->tag.stype_env != VIO_DRING_DATA)) return 0; end = pkt->end_idx; dev = VNET_PORT_TO_NET_DEVICE(port); netif_tx_lock(dev); if (unlikely(!idx_is_pending(dr, end))) { netif_tx_unlock(dev); return 0; } /* sync for race conditions with vnet_start_xmit() and tell xmit it * is time to send a trigger. */ trace_vnet_rx_stopped_ack(port->vio._local_sid, port->vio._peer_sid, end); dr->cons = vio_dring_next(dr, end); desc = vio_dring_entry(dr, dr->cons); if (desc->hdr.state == VIO_DESC_READY && !port->start_cons) { /* vnet_start_xmit() just populated this dring but missed * sending the "start" LDC message to the consumer. * Send a "start" trigger on its behalf. */ if (__vnet_tx_trigger(port, dr->cons) > 0) port->start_cons = false; else port->start_cons = true; } else { port->start_cons = true; } netif_tx_unlock(dev); txq = netdev_get_tx_queue(dev, port->q_index); if (unlikely(netif_tx_queue_stopped(txq) && vnet_tx_dring_avail(dr) >= VNET_TX_WAKEUP_THRESH(dr))) return 1; return 0; } static int vnet_nack(struct vnet_port *port, void *msgbuf) { /* XXX just reset or similar XXX */ return 0; } static int handle_mcast(struct vnet_port *port, void *msgbuf) { struct vio_net_mcast_info *pkt = msgbuf; struct net_device *dev = VNET_PORT_TO_NET_DEVICE(port); if (pkt->tag.stype != VIO_SUBTYPE_ACK) pr_err("%s: Got unexpected MCAST reply [%02x:%02x:%04x:%08x]\n", dev->name, pkt->tag.type, pkt->tag.stype, pkt->tag.stype_env, pkt->tag.sid); return 0; } /* If the queue is stopped, wake it up so that we'll * send out another START message at the next TX. */ static void maybe_tx_wakeup(struct vnet_port *port) { struct netdev_queue *txq; txq = netdev_get_tx_queue(VNET_PORT_TO_NET_DEVICE(port), port->q_index); __netif_tx_lock(txq, smp_processor_id()); if (likely(netif_tx_queue_stopped(txq))) netif_tx_wake_queue(txq); __netif_tx_unlock(txq); } bool sunvnet_port_is_up_common(struct vnet_port *vnet) { struct vio_driver_state *vio = &vnet->vio; return !!(vio->hs_state & VIO_HS_COMPLETE); } EXPORT_SYMBOL_GPL(sunvnet_port_is_up_common); static int vnet_event_napi(struct vnet_port *port, int budget) { struct net_device *dev = VNET_PORT_TO_NET_DEVICE(port); struct vio_driver_state *vio = &port->vio; int tx_wakeup, err; int npkts = 0; /* we don't expect any other bits */ BUG_ON(port->rx_event & ~(LDC_EVENT_DATA_READY | LDC_EVENT_RESET | LDC_EVENT_UP)); /* RESET takes precedent over any other event */ if (port->rx_event & LDC_EVENT_RESET) { /* a link went down */ if (port->vsw == 1) { netif_tx_stop_all_queues(dev); netif_carrier_off(dev); } vio_link_state_change(vio, LDC_EVENT_RESET); vnet_port_reset(port); vio_port_up(vio); /* If the device is running but its tx queue was * stopped (due to flow control), restart it. * This is necessary since vnet_port_reset() * clears the tx drings and thus we may never get * back a VIO_TYPE_DATA ACK packet - which is * the normal mechanism to restart the tx queue. */ if (netif_running(dev)) maybe_tx_wakeup(port); port->rx_event = 0; port->stats.event_reset++; return 0; } if (port->rx_event & LDC_EVENT_UP) { /* a link came up */ if (port->vsw == 1) { netif_carrier_on(port->dev); netif_tx_start_all_queues(port->dev); } vio_link_state_change(vio, LDC_EVENT_UP); port->rx_event = 0; port->stats.event_up++; return 0; } err = 0; tx_wakeup = 0; while (1) { union { struct vio_msg_tag tag; u64 raw[8]; } msgbuf; if (port->napi_resume) { struct vio_dring_data *pkt = (struct vio_dring_data *)&msgbuf; struct vio_dring_state *dr = &port->vio.drings[VIO_DRIVER_RX_RING]; pkt->tag.type = VIO_TYPE_DATA; pkt->tag.stype = VIO_SUBTYPE_INFO; pkt->tag.stype_env = VIO_DRING_DATA; pkt->seq = dr->rcv_nxt; pkt->start_idx = vio_dring_next(dr, port->napi_stop_idx); pkt->end_idx = -1; } else { err = ldc_read(vio->lp, &msgbuf, sizeof(msgbuf)); if (unlikely(err < 0)) { if (err == -ECONNRESET) vio_conn_reset(vio); break; } if (err == 0) break; viodbg(DATA, "TAG [%02x:%02x:%04x:%08x]\n", msgbuf.tag.type, msgbuf.tag.stype, msgbuf.tag.stype_env, msgbuf.tag.sid); err = vio_validate_sid(vio, &msgbuf.tag); if (err < 0) break; } if (likely(msgbuf.tag.type == VIO_TYPE_DATA)) { if (msgbuf.tag.stype == VIO_SUBTYPE_INFO) { if (!sunvnet_port_is_up_common(port)) { /* failures like handshake_failure() * may have cleaned up dring, but * NAPI polling may bring us here. */ err = -ECONNRESET; break; } err = vnet_rx(port, &msgbuf, &npkts, budget); if (npkts >= budget) break; if (npkts == 0) break; } else if (msgbuf.tag.stype == VIO_SUBTYPE_ACK) { err = vnet_ack(port, &msgbuf); if (err > 0) tx_wakeup |= err; } else if (msgbuf.tag.stype == VIO_SUBTYPE_NACK) { err = vnet_nack(port, &msgbuf); } } else if (msgbuf.tag.type == VIO_TYPE_CTRL) { if (msgbuf.tag.stype_env == VNET_MCAST_INFO) err = handle_mcast(port, &msgbuf); else err = vio_control_pkt_engine(vio, &msgbuf); if (err) break; } else { err = vnet_handle_unknown(port, &msgbuf); } if (err == -ECONNRESET) break; } if (unlikely(tx_wakeup && err != -ECONNRESET)) maybe_tx_wakeup(port); return npkts; } int sunvnet_poll_common(struct napi_struct *napi, int budget) { struct vnet_port *port = container_of(napi, struct vnet_port, napi); struct vio_driver_state *vio = &port->vio; int processed = vnet_event_napi(port, budget); if (processed < budget) { napi_complete_done(napi, processed); port->rx_event &= ~LDC_EVENT_DATA_READY; vio_set_intr(vio->vdev->rx_ino, HV_INTR_ENABLED); } return processed; } EXPORT_SYMBOL_GPL(sunvnet_poll_common); void sunvnet_event_common(void *arg, int event) { struct vnet_port *port = arg; struct vio_driver_state *vio = &port->vio; port->rx_event |= event; vio_set_intr(vio->vdev->rx_ino, HV_INTR_DISABLED); napi_schedule(&port->napi); } EXPORT_SYMBOL_GPL(sunvnet_event_common); static int __vnet_tx_trigger(struct vnet_port *port, u32 start) { struct vio_dring_state *dr = &port->vio.drings[VIO_DRIVER_TX_RING]; struct vio_dring_data hdr = { .tag = { .type = VIO_TYPE_DATA, .stype = VIO_SUBTYPE_INFO, .stype_env = VIO_DRING_DATA, .sid = vio_send_sid(&port->vio), }, .dring_ident = dr->ident, .start_idx = start, .end_idx = (u32)-1, }; int err, delay; int retries = 0; if (port->stop_rx) { trace_vnet_tx_pending_stopped_ack(port->vio._local_sid, port->vio._peer_sid, port->stop_rx_idx, -1); err = vnet_send_ack(port, &port->vio.drings[VIO_DRIVER_RX_RING], port->stop_rx_idx, -1, VIO_DRING_STOPPED); if (err <= 0) return err; } hdr.seq = dr->snd_nxt; delay = 1; do { err = vio_ldc_send(&port->vio, &hdr, sizeof(hdr)); if (err > 0) { dr->snd_nxt++; break; } udelay(delay); if ((delay <<= 1) > 128) delay = 128; if (retries++ > VNET_MAX_RETRIES) break; } while (err == -EAGAIN); trace_vnet_tx_trigger(port->vio._local_sid, port->vio._peer_sid, start, err); return err; } static struct sk_buff *vnet_clean_tx_ring(struct vnet_port *port, unsigned *pending) { struct vio_dring_state *dr = &port->vio.drings[VIO_DRIVER_TX_RING]; struct sk_buff *skb = NULL; int i, txi; *pending = 0; txi = dr->prod; for (i = 0; i < VNET_TX_RING_SIZE; ++i) { struct vio_net_desc *d; --txi; if (txi < 0) txi = VNET_TX_RING_SIZE - 1; d = vio_dring_entry(dr, txi); if (d->hdr.state == VIO_DESC_READY) { (*pending)++; continue; } if (port->tx_bufs[txi].skb) { if (d->hdr.state != VIO_DESC_DONE) pr_notice("invalid ring buffer state %d\n", d->hdr.state); BUG_ON(port->tx_bufs[txi].skb->next); port->tx_bufs[txi].skb->next = skb; skb = port->tx_bufs[txi].skb; port->tx_bufs[txi].skb = NULL; ldc_unmap(port->vio.lp, port->tx_bufs[txi].cookies, port->tx_bufs[txi].ncookies); } else if (d->hdr.state == VIO_DESC_FREE) { break; } d->hdr.state = VIO_DESC_FREE; } return skb; } static inline void vnet_free_skbs(struct sk_buff *skb) { struct sk_buff *next; while (skb) { next = skb->next; skb->next = NULL; dev_kfree_skb(skb); skb = next; } } void sunvnet_clean_timer_expire_common(struct timer_list *t) { struct vnet_port *port = from_timer(port, t, clean_timer); struct sk_buff *freeskbs; unsigned pending; netif_tx_lock(VNET_PORT_TO_NET_DEVICE(port)); freeskbs = vnet_clean_tx_ring(port, &pending); netif_tx_unlock(VNET_PORT_TO_NET_DEVICE(port)); vnet_free_skbs(freeskbs); if (pending) (void)mod_timer(&port->clean_timer, jiffies + VNET_CLEAN_TIMEOUT); else del_timer(&port->clean_timer); } EXPORT_SYMBOL_GPL(sunvnet_clean_timer_expire_common); static inline int vnet_skb_map(struct ldc_channel *lp, struct sk_buff *skb, struct ldc_trans_cookie *cookies, int ncookies, unsigned int map_perm) { int i, nc, err, blen; /* header */ blen = skb_headlen(skb); if (blen < ETH_ZLEN) blen = ETH_ZLEN; blen += VNET_PACKET_SKIP; blen += 8 - (blen & 7); err = ldc_map_single(lp, skb->data - VNET_PACKET_SKIP, blen, cookies, ncookies, map_perm); if (err < 0) return err; nc = err; for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { skb_frag_t *f = &skb_shinfo(skb)->frags[i]; u8 *vaddr; if (nc < ncookies) { vaddr = kmap_local_page(skb_frag_page(f)); blen = skb_frag_size(f); blen += 8 - (blen & 7); err = ldc_map_single(lp, vaddr + skb_frag_off(f), blen, cookies + nc, ncookies - nc, map_perm); kunmap_local(vaddr); } else { err = -EMSGSIZE; } if (err < 0) { ldc_unmap(lp, cookies, nc); return err; } nc += err; } return nc; } static inline struct sk_buff *vnet_skb_shape(struct sk_buff *skb, int ncookies) { struct sk_buff *nskb; int i, len, pad, docopy; len = skb->len; pad = 0; if (len < ETH_ZLEN) { pad += ETH_ZLEN - skb->len; len += pad; } len += VNET_PACKET_SKIP; pad += 8 - (len & 7); /* make sure we have enough cookies and alignment in every frag */ docopy = skb_shinfo(skb)->nr_frags >= ncookies; for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { skb_frag_t *f = &skb_shinfo(skb)->frags[i]; docopy |= skb_frag_off(f) & 7; } if (((unsigned long)skb->data & 7) != VNET_PACKET_SKIP || skb_tailroom(skb) < pad || skb_headroom(skb) < VNET_PACKET_SKIP || docopy) { int start = 0, offset; __wsum csum; len = skb->len > ETH_ZLEN ? skb->len : ETH_ZLEN; nskb = alloc_and_align_skb(skb->dev, len); if (!nskb) { dev_kfree_skb(skb); return NULL; } skb_reserve(nskb, VNET_PACKET_SKIP); nskb->protocol = skb->protocol; offset = skb_mac_header(skb) - skb->data; skb_set_mac_header(nskb, offset); offset = skb_network_header(skb) - skb->data; skb_set_network_header(nskb, offset); offset = skb_transport_header(skb) - skb->data; skb_set_transport_header(nskb, offset); offset = 0; nskb->csum_offset = skb->csum_offset; nskb->ip_summed = skb->ip_summed; if (skb->ip_summed == CHECKSUM_PARTIAL) start = skb_checksum_start_offset(skb); if (start) { int offset = start + nskb->csum_offset; /* copy the headers, no csum here */ if (skb_copy_bits(skb, 0, nskb->data, start)) { dev_kfree_skb(nskb); dev_kfree_skb(skb); return NULL; } /* copy the rest, with csum calculation */ *(__sum16 *)(skb->data + offset) = 0; csum = skb_copy_and_csum_bits(skb, start, nskb->data + start, skb->len - start); /* add in the header checksums */ if (skb->protocol == htons(ETH_P_IP)) { struct iphdr *iph = ip_hdr(nskb); if (iph->protocol == IPPROTO_TCP || iph->protocol == IPPROTO_UDP) { csum = csum_tcpudp_magic(iph->saddr, iph->daddr, skb->len - start, iph->protocol, csum); } } else if (skb->protocol == htons(ETH_P_IPV6)) { struct ipv6hdr *ip6h = ipv6_hdr(nskb); if (ip6h->nexthdr == IPPROTO_TCP || ip6h->nexthdr == IPPROTO_UDP) { csum = csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr, skb->len - start, ip6h->nexthdr, csum); } } /* save the final result */ *(__sum16 *)(nskb->data + offset) = csum; nskb->ip_summed = CHECKSUM_NONE; } else if (skb_copy_bits(skb, 0, nskb->data, skb->len)) { dev_kfree_skb(nskb); dev_kfree_skb(skb); return NULL; } (void)skb_put(nskb, skb->len); if (skb_is_gso(skb)) { skb_shinfo(nskb)->gso_size = skb_shinfo(skb)->gso_size; skb_shinfo(nskb)->gso_type = skb_shinfo(skb)->gso_type; } nskb->queue_mapping = skb->queue_mapping; dev_kfree_skb(skb); skb = nskb; } return skb; } static netdev_tx_t vnet_handle_offloads(struct vnet_port *port, struct sk_buff *skb, struct vnet_port *(*vnet_tx_port) (struct sk_buff *, struct net_device *)) { struct net_device *dev = VNET_PORT_TO_NET_DEVICE(port); struct vio_dring_state *dr = &port->vio.drings[VIO_DRIVER_TX_RING]; struct sk_buff *segs, *curr, *next; int maclen, datalen; int status; int gso_size, gso_type, gso_segs; int hlen = skb_transport_header(skb) - skb_mac_header(skb); int proto = IPPROTO_IP; if (skb->protocol == htons(ETH_P_IP)) proto = ip_hdr(skb)->protocol; else if (skb->protocol == htons(ETH_P_IPV6)) proto = ipv6_hdr(skb)->nexthdr; if (proto == IPPROTO_TCP) { hlen += tcp_hdr(skb)->doff * 4; } else if (proto == IPPROTO_UDP) { hlen += sizeof(struct udphdr); } else { pr_err("vnet_handle_offloads GSO with unknown transport " "protocol %d tproto %d\n", skb->protocol, proto); hlen = 128; /* XXX */ } datalen = port->tsolen - hlen; gso_size = skb_shinfo(skb)->gso_size; gso_type = skb_shinfo(skb)->gso_type; gso_segs = skb_shinfo(skb)->gso_segs; if (port->tso && gso_size < datalen) gso_segs = DIV_ROUND_UP(skb->len - hlen, datalen); if (unlikely(vnet_tx_dring_avail(dr) < gso_segs)) { struct netdev_queue *txq; txq = netdev_get_tx_queue(dev, port->q_index); netif_tx_stop_queue(txq); if (vnet_tx_dring_avail(dr) < skb_shinfo(skb)->gso_segs) return NETDEV_TX_BUSY; netif_tx_wake_queue(txq); } maclen = skb_network_header(skb) - skb_mac_header(skb); skb_pull(skb, maclen); if (port->tso && gso_size < datalen) { if (skb_unclone(skb, GFP_ATOMIC)) goto out_dropped; /* segment to TSO size */ skb_shinfo(skb)->gso_size = datalen; skb_shinfo(skb)->gso_segs = gso_segs; } segs = skb_gso_segment(skb, dev->features & ~NETIF_F_TSO); if (IS_ERR(segs)) goto out_dropped; skb_push(skb, maclen); skb_reset_mac_header(skb); status = 0; skb_list_walk_safe(segs, curr, next) { skb_mark_not_on_list(curr); if (port->tso && curr->len > dev->mtu) { skb_shinfo(curr)->gso_size = gso_size; skb_shinfo(curr)->gso_type = gso_type; skb_shinfo(curr)->gso_segs = DIV_ROUND_UP(curr->len - hlen, gso_size); } else { skb_shinfo(curr)->gso_size = 0; } skb_push(curr, maclen); skb_reset_mac_header(curr); memcpy(skb_mac_header(curr), skb_mac_header(skb), maclen); curr->csum_start = skb_transport_header(curr) - curr->head; if (ip_hdr(curr)->protocol == IPPROTO_TCP) curr->csum_offset = offsetof(struct tcphdr, check); else if (ip_hdr(curr)->protocol == IPPROTO_UDP) curr->csum_offset = offsetof(struct udphdr, check); if (!(status & NETDEV_TX_MASK)) status = sunvnet_start_xmit_common(curr, dev, vnet_tx_port); if (status & NETDEV_TX_MASK) dev_kfree_skb_any(curr); } if (!(status & NETDEV_TX_MASK)) dev_kfree_skb_any(skb); return status; out_dropped: dev->stats.tx_dropped++; dev_kfree_skb_any(skb); return NETDEV_TX_OK; } netdev_tx_t sunvnet_start_xmit_common(struct sk_buff *skb, struct net_device *dev, struct vnet_port *(*vnet_tx_port) (struct sk_buff *, struct net_device *)) { struct vnet_port *port = NULL; struct vio_dring_state *dr; struct vio_net_desc *d; unsigned int len; struct sk_buff *freeskbs = NULL; int i, err, txi; unsigned pending = 0; struct netdev_queue *txq; rcu_read_lock(); port = vnet_tx_port(skb, dev); if (unlikely(!port)) goto out_dropped; if (skb_is_gso(skb) && skb->len > port->tsolen) { err = vnet_handle_offloads(port, skb, vnet_tx_port); rcu_read_unlock(); return err; } if (!skb_is_gso(skb) && skb->len > port->rmtu) { unsigned long localmtu = port->rmtu - ETH_HLEN; if (vio_version_after_eq(&port->vio, 1, 3)) localmtu -= VLAN_HLEN; if (skb->protocol == htons(ETH_P_IP)) icmp_ndo_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED, htonl(localmtu)); #if IS_ENABLED(CONFIG_IPV6) else if (skb->protocol == htons(ETH_P_IPV6)) icmpv6_ndo_send(skb, ICMPV6_PKT_TOOBIG, 0, localmtu); #endif goto out_dropped; } skb = vnet_skb_shape(skb, 2); if (unlikely(!skb)) goto out_dropped; if (skb->ip_summed == CHECKSUM_PARTIAL) { if (skb->protocol == htons(ETH_P_IP)) vnet_fullcsum_ipv4(skb); #if IS_ENABLED(CONFIG_IPV6) else if (skb->protocol == htons(ETH_P_IPV6)) vnet_fullcsum_ipv6(skb); #endif } dr = &port->vio.drings[VIO_DRIVER_TX_RING]; i = skb_get_queue_mapping(skb); txq = netdev_get_tx_queue(dev, i); if (unlikely(vnet_tx_dring_avail(dr) < 1)) { if (!netif_tx_queue_stopped(txq)) { netif_tx_stop_queue(txq); /* This is a hard error, log it. */ netdev_err(dev, "BUG! Tx Ring full when queue awake!\n"); dev->stats.tx_errors++; } rcu_read_unlock(); return NETDEV_TX_BUSY; } d = vio_dring_cur(dr); txi = dr->prod; freeskbs = vnet_clean_tx_ring(port, &pending); BUG_ON(port->tx_bufs[txi].skb); len = skb->len; if (len < ETH_ZLEN) len = ETH_ZLEN; err = vnet_skb_map(port->vio.lp, skb, port->tx_bufs[txi].cookies, 2, (LDC_MAP_SHADOW | LDC_MAP_DIRECT | LDC_MAP_RW)); if (err < 0) { netdev_info(dev, "tx buffer map error %d\n", err); goto out_dropped; } port->tx_bufs[txi].skb = skb; skb = NULL; port->tx_bufs[txi].ncookies = err; /* We don't rely on the ACKs to free the skb in vnet_start_xmit(), * thus it is safe to not set VIO_ACK_ENABLE for each transmission: * the protocol itself does not require it as long as the peer * sends a VIO_SUBTYPE_ACK for VIO_DRING_STOPPED. * * An ACK for every packet in the ring is expensive as the * sending of LDC messages is slow and affects performance. */ d->hdr.ack = VIO_ACK_DISABLE; d->size = len; d->ncookies = port->tx_bufs[txi].ncookies; for (i = 0; i < d->ncookies; i++) d->cookies[i] = port->tx_bufs[txi].cookies[i]; if (vio_version_after_eq(&port->vio, 1, 7)) { struct vio_net_dext *dext = vio_net_ext(d); memset(dext, 0, sizeof(*dext)); if (skb_is_gso(port->tx_bufs[txi].skb)) { dext->ipv4_lso_mss = skb_shinfo(port->tx_bufs[txi].skb) ->gso_size; dext->flags |= VNET_PKT_IPV4_LSO; } if (vio_version_after_eq(&port->vio, 1, 8) && !port->switch_port) { dext->flags |= VNET_PKT_HCK_IPV4_HDRCKSUM_OK; dext->flags |= VNET_PKT_HCK_FULLCKSUM_OK; } } /* This has to be a non-SMP write barrier because we are writing * to memory which is shared with the peer LDOM. */ dma_wmb(); d->hdr.state = VIO_DESC_READY; /* Exactly one ldc "start" trigger (for dr->cons) needs to be sent * to notify the consumer that some descriptors are READY. * After that "start" trigger, no additional triggers are needed until * a DRING_STOPPED is received from the consumer. The dr->cons field * (set up by vnet_ack()) has the value of the next dring index * that has not yet been ack-ed. We send a "start" trigger here * if, and only if, start_cons is true (reset it afterward). Conversely, * vnet_ack() should check if the dring corresponding to cons * is marked READY, but start_cons was false. * If so, vnet_ack() should send out the missed "start" trigger. * * Note that the dma_wmb() above makes sure the cookies et al. are * not globally visible before the VIO_DESC_READY, and that the * stores are ordered correctly by the compiler. The consumer will * not proceed until the VIO_DESC_READY is visible assuring that * the consumer does not observe anything related to descriptors * out of order. The HV trap from the LDC start trigger is the * producer to consumer announcement that work is available to the * consumer */ if (!port->start_cons) { /* previous trigger suffices */ trace_vnet_skip_tx_trigger(port->vio._local_sid, port->vio._peer_sid, dr->cons); goto ldc_start_done; } err = __vnet_tx_trigger(port, dr->cons); if (unlikely(err < 0)) { netdev_info(dev, "TX trigger error %d\n", err); d->hdr.state = VIO_DESC_FREE; skb = port->tx_bufs[txi].skb; port->tx_bufs[txi].skb = NULL; dev->stats.tx_carrier_errors++; goto out_dropped; } ldc_start_done: port->start_cons = false; dev->stats.tx_packets++; dev->stats.tx_bytes += port->tx_bufs[txi].skb->len; port->stats.tx_packets++; port->stats.tx_bytes += port->tx_bufs[txi].skb->len; dr->prod = (dr->prod + 1) & (VNET_TX_RING_SIZE - 1); if (unlikely(vnet_tx_dring_avail(dr) < 1)) { netif_tx_stop_queue(txq); smp_rmb(); if (vnet_tx_dring_avail(dr) > VNET_TX_WAKEUP_THRESH(dr)) netif_tx_wake_queue(txq); } (void)mod_timer(&port->clean_timer, jiffies + VNET_CLEAN_TIMEOUT); rcu_read_unlock(); vnet_free_skbs(freeskbs); return NETDEV_TX_OK; out_dropped: if (pending) (void)mod_timer(&port->clean_timer, jiffies + VNET_CLEAN_TIMEOUT); else if (port) del_timer(&port->clean_timer); rcu_read_unlock(); dev_kfree_skb(skb); vnet_free_skbs(freeskbs); dev->stats.tx_dropped++; return NETDEV_TX_OK; } EXPORT_SYMBOL_GPL(sunvnet_start_xmit_common); void sunvnet_tx_timeout_common(struct net_device *dev, unsigned int txqueue) { /* XXX Implement me XXX */ } EXPORT_SYMBOL_GPL(sunvnet_tx_timeout_common); int sunvnet_open_common(struct net_device *dev) { netif_carrier_on(dev); netif_tx_start_all_queues(dev); return 0; } EXPORT_SYMBOL_GPL(sunvnet_open_common); int sunvnet_close_common(struct net_device *dev) { netif_tx_stop_all_queues(dev); netif_carrier_off(dev); return 0; } EXPORT_SYMBOL_GPL(sunvnet_close_common); static struct vnet_mcast_entry *__vnet_mc_find(struct vnet *vp, u8 *addr) { struct vnet_mcast_entry *m; for (m = vp->mcast_list; m; m = m->next) { if (ether_addr_equal(m->addr, addr)) return m; } return NULL; } static void __update_mc_list(struct vnet *vp, struct net_device *dev) { struct netdev_hw_addr *ha; netdev_for_each_mc_addr(ha, dev) { struct vnet_mcast_entry *m; m = __vnet_mc_find(vp, ha->addr); if (m) { m->hit = 1; continue; } if (!m) { m = kzalloc(sizeof(*m), GFP_ATOMIC); if (!m) continue; memcpy(m->addr, ha->addr, ETH_ALEN); m->hit = 1; m->next = vp->mcast_list; vp->mcast_list = m; } } } static void __send_mc_list(struct vnet *vp, struct vnet_port *port) { struct vio_net_mcast_info info; struct vnet_mcast_entry *m, **pp; int n_addrs; memset(&info, 0, sizeof(info)); info.tag.type = VIO_TYPE_CTRL; info.tag.stype = VIO_SUBTYPE_INFO; info.tag.stype_env = VNET_MCAST_INFO; info.tag.sid = vio_send_sid(&port->vio); info.set = 1; n_addrs = 0; for (m = vp->mcast_list; m; m = m->next) { if (m->sent) continue; m->sent = 1; memcpy(&info.mcast_addr[n_addrs * ETH_ALEN], m->addr, ETH_ALEN); if (++n_addrs == VNET_NUM_MCAST) { info.count = n_addrs; (void)vio_ldc_send(&port->vio, &info, sizeof(info)); n_addrs = 0; } } if (n_addrs) { info.count = n_addrs; (void)vio_ldc_send(&port->vio, &info, sizeof(info)); } info.set = 0; n_addrs = 0; pp = &vp->mcast_list; while ((m = *pp) != NULL) { if (m->hit) { m->hit = 0; pp = &m->next; continue; } memcpy(&info.mcast_addr[n_addrs * ETH_ALEN], m->addr, ETH_ALEN); if (++n_addrs == VNET_NUM_MCAST) { info.count = n_addrs; (void)vio_ldc_send(&port->vio, &info, sizeof(info)); n_addrs = 0; } *pp = m->next; kfree(m); } if (n_addrs) { info.count = n_addrs; (void)vio_ldc_send(&port->vio, &info, sizeof(info)); } } void sunvnet_set_rx_mode_common(struct net_device *dev, struct vnet *vp) { struct vnet_port *port; rcu_read_lock(); list_for_each_entry_rcu(port, &vp->port_list, list) { if (port->switch_port) { __update_mc_list(vp, dev); __send_mc_list(vp, port); break; } } rcu_read_unlock(); } EXPORT_SYMBOL_GPL(sunvnet_set_rx_mode_common); int sunvnet_set_mac_addr_common(struct net_device *dev, void *p) { return -EINVAL; } EXPORT_SYMBOL_GPL(sunvnet_set_mac_addr_common); void sunvnet_port_free_tx_bufs_common(struct vnet_port *port) { struct vio_dring_state *dr; int i; dr = &port->vio.drings[VIO_DRIVER_TX_RING]; if (!dr->base) return; for (i = 0; i < VNET_TX_RING_SIZE; i++) { struct vio_net_desc *d; void *skb = port->tx_bufs[i].skb; if (!skb) continue; d = vio_dring_entry(dr, i); ldc_unmap(port->vio.lp, port->tx_bufs[i].cookies, port->tx_bufs[i].ncookies); dev_kfree_skb(skb); port->tx_bufs[i].skb = NULL; d->hdr.state = VIO_DESC_FREE; } ldc_free_exp_dring(port->vio.lp, dr->base, (dr->entry_size * dr->num_entries), dr->cookies, dr->ncookies); dr->base = NULL; dr->entry_size = 0; dr->num_entries = 0; dr->pending = 0; dr->ncookies = 0; } EXPORT_SYMBOL_GPL(sunvnet_port_free_tx_bufs_common); void vnet_port_reset(struct vnet_port *port) { del_timer(&port->clean_timer); sunvnet_port_free_tx_bufs_common(port); port->rmtu = 0; port->tso = (port->vsw == 0); /* no tso in vsw, misbehaves in bridge */ port->tsolen = 0; } EXPORT_SYMBOL_GPL(vnet_port_reset); static int vnet_port_alloc_tx_ring(struct vnet_port *port) { struct vio_dring_state *dr; unsigned long len, elen; int i, err, ncookies; void *dring; dr = &port->vio.drings[VIO_DRIVER_TX_RING]; elen = sizeof(struct vio_net_desc) + sizeof(struct ldc_trans_cookie) * 2; if (vio_version_after_eq(&port->vio, 1, 7)) elen += sizeof(struct vio_net_dext); len = VNET_TX_RING_SIZE * elen; ncookies = VIO_MAX_RING_COOKIES; dring = ldc_alloc_exp_dring(port->vio.lp, len, dr->cookies, &ncookies, (LDC_MAP_SHADOW | LDC_MAP_DIRECT | LDC_MAP_RW)); if (IS_ERR(dring)) { err = PTR_ERR(dring); goto err_out; } dr->base = dring; dr->entry_size = elen; dr->num_entries = VNET_TX_RING_SIZE; dr->prod = 0; dr->cons = 0; port->start_cons = true; /* need an initial trigger */ dr->pending = VNET_TX_RING_SIZE; dr->ncookies = ncookies; for (i = 0; i < VNET_TX_RING_SIZE; ++i) { struct vio_net_desc *d; d = vio_dring_entry(dr, i); d->hdr.state = VIO_DESC_FREE; } return 0; err_out: sunvnet_port_free_tx_bufs_common(port); return err; } #ifdef CONFIG_NET_POLL_CONTROLLER void sunvnet_poll_controller_common(struct net_device *dev, struct vnet *vp) { struct vnet_port *port; unsigned long flags; spin_lock_irqsave(&vp->lock, flags); if (!list_empty(&vp->port_list)) { port = list_entry(vp->port_list.next, struct vnet_port, list); napi_schedule(&port->napi); } spin_unlock_irqrestore(&vp->lock, flags); } EXPORT_SYMBOL_GPL(sunvnet_poll_controller_common); #endif void sunvnet_port_add_txq_common(struct vnet_port *port) { struct vnet *vp = port->vp; int smallest = 0; int i; /* find the first least-used q * When there are more ldoms than q's, we start to * double up on ports per queue. */ for (i = 0; i < VNET_MAX_TXQS; i++) { if (vp->q_used[i] == 0) { smallest = i; break; } if (vp->q_used[i] < vp->q_used[smallest]) smallest = i; } vp->nports++; vp->q_used[smallest]++; port->q_index = smallest; } EXPORT_SYMBOL_GPL(sunvnet_port_add_txq_common); void sunvnet_port_rm_txq_common(struct vnet_port *port) { port->vp->nports--; port->vp->q_used[port->q_index]--; port->q_index = 0; } EXPORT_SYMBOL_GPL(sunvnet_port_rm_txq_common);
linux-master
drivers/net/ethernet/sun/sunvnet_common.c
// SPDX-License-Identifier: GPL-2.0 /* $Id: sungem.c,v 1.44.2.22 2002/03/13 01:18:12 davem Exp $ * sungem.c: Sun GEM ethernet driver. * * Copyright (C) 2000, 2001, 2002, 2003 David S. Miller ([email protected]) * * Support for Apple GMAC and assorted PHYs, WOL, Power Management * (C) 2001,2002,2003 Benjamin Herrenscmidt ([email protected]) * (C) 2004,2005 Benjamin Herrenscmidt, IBM Corp. * * NAPI and NETPOLL support * (C) 2004 by Eric Lemoine ([email protected]) * */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/module.h> #include <linux/kernel.h> #include <linux/types.h> #include <linux/fcntl.h> #include <linux/interrupt.h> #include <linux/ioport.h> #include <linux/in.h> #include <linux/sched.h> #include <linux/string.h> #include <linux/delay.h> #include <linux/errno.h> #include <linux/pci.h> #include <linux/dma-mapping.h> #include <linux/netdevice.h> #include <linux/etherdevice.h> #include <linux/skbuff.h> #include <linux/mii.h> #include <linux/ethtool.h> #include <linux/crc32.h> #include <linux/random.h> #include <linux/workqueue.h> #include <linux/if_vlan.h> #include <linux/bitops.h> #include <linux/mm.h> #include <linux/gfp.h> #include <linux/of.h> #include <asm/io.h> #include <asm/byteorder.h> #include <linux/uaccess.h> #include <asm/irq.h> #ifdef CONFIG_SPARC #include <asm/idprom.h> #include <asm/prom.h> #endif #ifdef CONFIG_PPC_PMAC #include <asm/machdep.h> #include <asm/pmac_feature.h> #endif #include <linux/sungem_phy.h> #include "sungem.h" #define STRIP_FCS #define DEFAULT_MSG (NETIF_MSG_DRV | \ NETIF_MSG_PROBE | \ NETIF_MSG_LINK) #define ADVERTISE_MASK (SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full | \ SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full | \ SUPPORTED_1000baseT_Half | SUPPORTED_1000baseT_Full | \ SUPPORTED_Pause | SUPPORTED_Autoneg) #define DRV_NAME "sungem" #define DRV_VERSION "1.0" #define DRV_AUTHOR "David S. Miller <[email protected]>" static char version[] = DRV_NAME ".c:v" DRV_VERSION " " DRV_AUTHOR "\n"; MODULE_AUTHOR(DRV_AUTHOR); MODULE_DESCRIPTION("Sun GEM Gbit ethernet driver"); MODULE_LICENSE("GPL"); #define GEM_MODULE_NAME "gem" static const struct pci_device_id gem_pci_tbl[] = { { PCI_VENDOR_ID_SUN, PCI_DEVICE_ID_SUN_GEM, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, /* These models only differ from the original GEM in * that their tx/rx fifos are of a different size and * they only support 10/100 speeds. -DaveM * * Apple's GMAC does support gigabit on machines with * the BCM54xx PHYs. -BenH */ { PCI_VENDOR_ID_SUN, PCI_DEVICE_ID_SUN_RIO_GEM, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, { PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_GMAC, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, { PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_GMACP, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, { PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_GMAC2, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, { PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_K2_GMAC, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, { PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_SH_SUNGEM, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, { PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_IPID2_GMAC, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, {0, } }; MODULE_DEVICE_TABLE(pci, gem_pci_tbl); static u16 __sungem_phy_read(struct gem *gp, int phy_addr, int reg) { u32 cmd; int limit = 10000; cmd = (1 << 30); cmd |= (2 << 28); cmd |= (phy_addr << 23) & MIF_FRAME_PHYAD; cmd |= (reg << 18) & MIF_FRAME_REGAD; cmd |= (MIF_FRAME_TAMSB); writel(cmd, gp->regs + MIF_FRAME); while (--limit) { cmd = readl(gp->regs + MIF_FRAME); if (cmd & MIF_FRAME_TALSB) break; udelay(10); } if (!limit) cmd = 0xffff; return cmd & MIF_FRAME_DATA; } static inline int _sungem_phy_read(struct net_device *dev, int mii_id, int reg) { struct gem *gp = netdev_priv(dev); return __sungem_phy_read(gp, mii_id, reg); } static inline u16 sungem_phy_read(struct gem *gp, int reg) { return __sungem_phy_read(gp, gp->mii_phy_addr, reg); } static void __sungem_phy_write(struct gem *gp, int phy_addr, int reg, u16 val) { u32 cmd; int limit = 10000; cmd = (1 << 30); cmd |= (1 << 28); cmd |= (phy_addr << 23) & MIF_FRAME_PHYAD; cmd |= (reg << 18) & MIF_FRAME_REGAD; cmd |= (MIF_FRAME_TAMSB); cmd |= (val & MIF_FRAME_DATA); writel(cmd, gp->regs + MIF_FRAME); while (limit--) { cmd = readl(gp->regs + MIF_FRAME); if (cmd & MIF_FRAME_TALSB) break; udelay(10); } } static inline void _sungem_phy_write(struct net_device *dev, int mii_id, int reg, int val) { struct gem *gp = netdev_priv(dev); __sungem_phy_write(gp, mii_id, reg, val & 0xffff); } static inline void sungem_phy_write(struct gem *gp, int reg, u16 val) { __sungem_phy_write(gp, gp->mii_phy_addr, reg, val); } static inline void gem_enable_ints(struct gem *gp) { /* Enable all interrupts but TXDONE */ writel(GREG_STAT_TXDONE, gp->regs + GREG_IMASK); } static inline void gem_disable_ints(struct gem *gp) { /* Disable all interrupts, including TXDONE */ writel(GREG_STAT_NAPI | GREG_STAT_TXDONE, gp->regs + GREG_IMASK); (void)readl(gp->regs + GREG_IMASK); /* write posting */ } static void gem_get_cell(struct gem *gp) { BUG_ON(gp->cell_enabled < 0); gp->cell_enabled++; #ifdef CONFIG_PPC_PMAC if (gp->cell_enabled == 1) { mb(); pmac_call_feature(PMAC_FTR_GMAC_ENABLE, gp->of_node, 0, 1); udelay(10); } #endif /* CONFIG_PPC_PMAC */ } /* Turn off the chip's clock */ static void gem_put_cell(struct gem *gp) { BUG_ON(gp->cell_enabled <= 0); gp->cell_enabled--; #ifdef CONFIG_PPC_PMAC if (gp->cell_enabled == 0) { mb(); pmac_call_feature(PMAC_FTR_GMAC_ENABLE, gp->of_node, 0, 0); udelay(10); } #endif /* CONFIG_PPC_PMAC */ } static inline void gem_netif_stop(struct gem *gp) { netif_trans_update(gp->dev); /* prevent tx timeout */ napi_disable(&gp->napi); netif_tx_disable(gp->dev); } static inline void gem_netif_start(struct gem *gp) { /* NOTE: unconditional netif_wake_queue is only * appropriate so long as all callers are assured to * have free tx slots. */ netif_wake_queue(gp->dev); napi_enable(&gp->napi); } static void gem_schedule_reset(struct gem *gp) { gp->reset_task_pending = 1; schedule_work(&gp->reset_task); } static void gem_handle_mif_event(struct gem *gp, u32 reg_val, u32 changed_bits) { if (netif_msg_intr(gp)) printk(KERN_DEBUG "%s: mif interrupt\n", gp->dev->name); } static int gem_pcs_interrupt(struct net_device *dev, struct gem *gp, u32 gem_status) { u32 pcs_istat = readl(gp->regs + PCS_ISTAT); u32 pcs_miistat; if (netif_msg_intr(gp)) printk(KERN_DEBUG "%s: pcs interrupt, pcs_istat: 0x%x\n", gp->dev->name, pcs_istat); if (!(pcs_istat & PCS_ISTAT_LSC)) { netdev_err(dev, "PCS irq but no link status change???\n"); return 0; } /* The link status bit latches on zero, so you must * read it twice in such a case to see a transition * to the link being up. */ pcs_miistat = readl(gp->regs + PCS_MIISTAT); if (!(pcs_miistat & PCS_MIISTAT_LS)) pcs_miistat |= (readl(gp->regs + PCS_MIISTAT) & PCS_MIISTAT_LS); if (pcs_miistat & PCS_MIISTAT_ANC) { /* The remote-fault indication is only valid * when autoneg has completed. */ if (pcs_miistat & PCS_MIISTAT_RF) netdev_info(dev, "PCS AutoNEG complete, RemoteFault\n"); else netdev_info(dev, "PCS AutoNEG complete\n"); } if (pcs_miistat & PCS_MIISTAT_LS) { netdev_info(dev, "PCS link is now up\n"); netif_carrier_on(gp->dev); } else { netdev_info(dev, "PCS link is now down\n"); netif_carrier_off(gp->dev); /* If this happens and the link timer is not running, * reset so we re-negotiate. */ if (!timer_pending(&gp->link_timer)) return 1; } return 0; } static int gem_txmac_interrupt(struct net_device *dev, struct gem *gp, u32 gem_status) { u32 txmac_stat = readl(gp->regs + MAC_TXSTAT); if (netif_msg_intr(gp)) printk(KERN_DEBUG "%s: txmac interrupt, txmac_stat: 0x%x\n", gp->dev->name, txmac_stat); /* Defer timer expiration is quite normal, * don't even log the event. */ if ((txmac_stat & MAC_TXSTAT_DTE) && !(txmac_stat & ~MAC_TXSTAT_DTE)) return 0; if (txmac_stat & MAC_TXSTAT_URUN) { netdev_err(dev, "TX MAC xmit underrun\n"); dev->stats.tx_fifo_errors++; } if (txmac_stat & MAC_TXSTAT_MPE) { netdev_err(dev, "TX MAC max packet size error\n"); dev->stats.tx_errors++; } /* The rest are all cases of one of the 16-bit TX * counters expiring. */ if (txmac_stat & MAC_TXSTAT_NCE) dev->stats.collisions += 0x10000; if (txmac_stat & MAC_TXSTAT_ECE) { dev->stats.tx_aborted_errors += 0x10000; dev->stats.collisions += 0x10000; } if (txmac_stat & MAC_TXSTAT_LCE) { dev->stats.tx_aborted_errors += 0x10000; dev->stats.collisions += 0x10000; } /* We do not keep track of MAC_TXSTAT_FCE and * MAC_TXSTAT_PCE events. */ return 0; } /* When we get a RX fifo overflow, the RX unit in GEM is probably hung * so we do the following. * * If any part of the reset goes wrong, we return 1 and that causes the * whole chip to be reset. */ static int gem_rxmac_reset(struct gem *gp) { struct net_device *dev = gp->dev; int limit, i; u64 desc_dma; u32 val; /* First, reset & disable MAC RX. */ writel(MAC_RXRST_CMD, gp->regs + MAC_RXRST); for (limit = 0; limit < 5000; limit++) { if (!(readl(gp->regs + MAC_RXRST) & MAC_RXRST_CMD)) break; udelay(10); } if (limit == 5000) { netdev_err(dev, "RX MAC will not reset, resetting whole chip\n"); return 1; } writel(gp->mac_rx_cfg & ~MAC_RXCFG_ENAB, gp->regs + MAC_RXCFG); for (limit = 0; limit < 5000; limit++) { if (!(readl(gp->regs + MAC_RXCFG) & MAC_RXCFG_ENAB)) break; udelay(10); } if (limit == 5000) { netdev_err(dev, "RX MAC will not disable, resetting whole chip\n"); return 1; } /* Second, disable RX DMA. */ writel(0, gp->regs + RXDMA_CFG); for (limit = 0; limit < 5000; limit++) { if (!(readl(gp->regs + RXDMA_CFG) & RXDMA_CFG_ENABLE)) break; udelay(10); } if (limit == 5000) { netdev_err(dev, "RX DMA will not disable, resetting whole chip\n"); return 1; } mdelay(5); /* Execute RX reset command. */ writel(gp->swrst_base | GREG_SWRST_RXRST, gp->regs + GREG_SWRST); for (limit = 0; limit < 5000; limit++) { if (!(readl(gp->regs + GREG_SWRST) & GREG_SWRST_RXRST)) break; udelay(10); } if (limit == 5000) { netdev_err(dev, "RX reset command will not execute, resetting whole chip\n"); return 1; } /* Refresh the RX ring. */ for (i = 0; i < RX_RING_SIZE; i++) { struct gem_rxd *rxd = &gp->init_block->rxd[i]; if (gp->rx_skbs[i] == NULL) { netdev_err(dev, "Parts of RX ring empty, resetting whole chip\n"); return 1; } rxd->status_word = cpu_to_le64(RXDCTRL_FRESH(gp)); } gp->rx_new = gp->rx_old = 0; /* Now we must reprogram the rest of RX unit. */ desc_dma = (u64) gp->gblock_dvma; desc_dma += (INIT_BLOCK_TX_RING_SIZE * sizeof(struct gem_txd)); writel(desc_dma >> 32, gp->regs + RXDMA_DBHI); writel(desc_dma & 0xffffffff, gp->regs + RXDMA_DBLOW); writel(RX_RING_SIZE - 4, gp->regs + RXDMA_KICK); val = (RXDMA_CFG_BASE | (RX_OFFSET << 10) | (ETH_HLEN << 13) | RXDMA_CFG_FTHRESH_128); writel(val, gp->regs + RXDMA_CFG); if (readl(gp->regs + GREG_BIFCFG) & GREG_BIFCFG_M66EN) writel(((5 & RXDMA_BLANK_IPKTS) | ((8 << 12) & RXDMA_BLANK_ITIME)), gp->regs + RXDMA_BLANK); else writel(((5 & RXDMA_BLANK_IPKTS) | ((4 << 12) & RXDMA_BLANK_ITIME)), gp->regs + RXDMA_BLANK); val = (((gp->rx_pause_off / 64) << 0) & RXDMA_PTHRESH_OFF); val |= (((gp->rx_pause_on / 64) << 12) & RXDMA_PTHRESH_ON); writel(val, gp->regs + RXDMA_PTHRESH); val = readl(gp->regs + RXDMA_CFG); writel(val | RXDMA_CFG_ENABLE, gp->regs + RXDMA_CFG); writel(MAC_RXSTAT_RCV, gp->regs + MAC_RXMASK); val = readl(gp->regs + MAC_RXCFG); writel(val | MAC_RXCFG_ENAB, gp->regs + MAC_RXCFG); return 0; } static int gem_rxmac_interrupt(struct net_device *dev, struct gem *gp, u32 gem_status) { u32 rxmac_stat = readl(gp->regs + MAC_RXSTAT); int ret = 0; if (netif_msg_intr(gp)) printk(KERN_DEBUG "%s: rxmac interrupt, rxmac_stat: 0x%x\n", gp->dev->name, rxmac_stat); if (rxmac_stat & MAC_RXSTAT_OFLW) { u32 smac = readl(gp->regs + MAC_SMACHINE); netdev_err(dev, "RX MAC fifo overflow smac[%08x]\n", smac); dev->stats.rx_over_errors++; dev->stats.rx_fifo_errors++; ret = gem_rxmac_reset(gp); } if (rxmac_stat & MAC_RXSTAT_ACE) dev->stats.rx_frame_errors += 0x10000; if (rxmac_stat & MAC_RXSTAT_CCE) dev->stats.rx_crc_errors += 0x10000; if (rxmac_stat & MAC_RXSTAT_LCE) dev->stats.rx_length_errors += 0x10000; /* We do not track MAC_RXSTAT_FCE and MAC_RXSTAT_VCE * events. */ return ret; } static int gem_mac_interrupt(struct net_device *dev, struct gem *gp, u32 gem_status) { u32 mac_cstat = readl(gp->regs + MAC_CSTAT); if (netif_msg_intr(gp)) printk(KERN_DEBUG "%s: mac interrupt, mac_cstat: 0x%x\n", gp->dev->name, mac_cstat); /* This interrupt is just for pause frame and pause * tracking. It is useful for diagnostics and debug * but probably by default we will mask these events. */ if (mac_cstat & MAC_CSTAT_PS) gp->pause_entered++; if (mac_cstat & MAC_CSTAT_PRCV) gp->pause_last_time_recvd = (mac_cstat >> 16); return 0; } static int gem_mif_interrupt(struct net_device *dev, struct gem *gp, u32 gem_status) { u32 mif_status = readl(gp->regs + MIF_STATUS); u32 reg_val, changed_bits; reg_val = (mif_status & MIF_STATUS_DATA) >> 16; changed_bits = (mif_status & MIF_STATUS_STAT); gem_handle_mif_event(gp, reg_val, changed_bits); return 0; } static int gem_pci_interrupt(struct net_device *dev, struct gem *gp, u32 gem_status) { u32 pci_estat = readl(gp->regs + GREG_PCIESTAT); if (gp->pdev->vendor == PCI_VENDOR_ID_SUN && gp->pdev->device == PCI_DEVICE_ID_SUN_GEM) { netdev_err(dev, "PCI error [%04x]", pci_estat); if (pci_estat & GREG_PCIESTAT_BADACK) pr_cont(" <No ACK64# during ABS64 cycle>"); if (pci_estat & GREG_PCIESTAT_DTRTO) pr_cont(" <Delayed transaction timeout>"); if (pci_estat & GREG_PCIESTAT_OTHER) pr_cont(" <other>"); pr_cont("\n"); } else { pci_estat |= GREG_PCIESTAT_OTHER; netdev_err(dev, "PCI error\n"); } if (pci_estat & GREG_PCIESTAT_OTHER) { int pci_errs; /* Interrogate PCI config space for the * true cause. */ pci_errs = pci_status_get_and_clear_errors(gp->pdev); netdev_err(dev, "PCI status errors[%04x]\n", pci_errs); if (pci_errs & PCI_STATUS_PARITY) netdev_err(dev, "PCI parity error detected\n"); if (pci_errs & PCI_STATUS_SIG_TARGET_ABORT) netdev_err(dev, "PCI target abort\n"); if (pci_errs & PCI_STATUS_REC_TARGET_ABORT) netdev_err(dev, "PCI master acks target abort\n"); if (pci_errs & PCI_STATUS_REC_MASTER_ABORT) netdev_err(dev, "PCI master abort\n"); if (pci_errs & PCI_STATUS_SIG_SYSTEM_ERROR) netdev_err(dev, "PCI system error SERR#\n"); if (pci_errs & PCI_STATUS_DETECTED_PARITY) netdev_err(dev, "PCI parity error\n"); } /* For all PCI errors, we should reset the chip. */ return 1; } /* All non-normal interrupt conditions get serviced here. * Returns non-zero if we should just exit the interrupt * handler right now (ie. if we reset the card which invalidates * all of the other original irq status bits). */ static int gem_abnormal_irq(struct net_device *dev, struct gem *gp, u32 gem_status) { if (gem_status & GREG_STAT_RXNOBUF) { /* Frame arrived, no free RX buffers available. */ if (netif_msg_rx_err(gp)) printk(KERN_DEBUG "%s: no buffer for rx frame\n", gp->dev->name); dev->stats.rx_dropped++; } if (gem_status & GREG_STAT_RXTAGERR) { /* corrupt RX tag framing */ if (netif_msg_rx_err(gp)) printk(KERN_DEBUG "%s: corrupt rx tag framing\n", gp->dev->name); dev->stats.rx_errors++; return 1; } if (gem_status & GREG_STAT_PCS) { if (gem_pcs_interrupt(dev, gp, gem_status)) return 1; } if (gem_status & GREG_STAT_TXMAC) { if (gem_txmac_interrupt(dev, gp, gem_status)) return 1; } if (gem_status & GREG_STAT_RXMAC) { if (gem_rxmac_interrupt(dev, gp, gem_status)) return 1; } if (gem_status & GREG_STAT_MAC) { if (gem_mac_interrupt(dev, gp, gem_status)) return 1; } if (gem_status & GREG_STAT_MIF) { if (gem_mif_interrupt(dev, gp, gem_status)) return 1; } if (gem_status & GREG_STAT_PCIERR) { if (gem_pci_interrupt(dev, gp, gem_status)) return 1; } return 0; } static __inline__ void gem_tx(struct net_device *dev, struct gem *gp, u32 gem_status) { int entry, limit; entry = gp->tx_old; limit = ((gem_status & GREG_STAT_TXNR) >> GREG_STAT_TXNR_SHIFT); while (entry != limit) { struct sk_buff *skb; struct gem_txd *txd; dma_addr_t dma_addr; u32 dma_len; int frag; if (netif_msg_tx_done(gp)) printk(KERN_DEBUG "%s: tx done, slot %d\n", gp->dev->name, entry); skb = gp->tx_skbs[entry]; if (skb_shinfo(skb)->nr_frags) { int last = entry + skb_shinfo(skb)->nr_frags; int walk = entry; int incomplete = 0; last &= (TX_RING_SIZE - 1); for (;;) { walk = NEXT_TX(walk); if (walk == limit) incomplete = 1; if (walk == last) break; } if (incomplete) break; } gp->tx_skbs[entry] = NULL; dev->stats.tx_bytes += skb->len; for (frag = 0; frag <= skb_shinfo(skb)->nr_frags; frag++) { txd = &gp->init_block->txd[entry]; dma_addr = le64_to_cpu(txd->buffer); dma_len = le64_to_cpu(txd->control_word) & TXDCTRL_BUFSZ; dma_unmap_page(&gp->pdev->dev, dma_addr, dma_len, DMA_TO_DEVICE); entry = NEXT_TX(entry); } dev->stats.tx_packets++; dev_consume_skb_any(skb); } gp->tx_old = entry; /* Need to make the tx_old update visible to gem_start_xmit() * before checking for netif_queue_stopped(). Without the * memory barrier, there is a small possibility that gem_start_xmit() * will miss it and cause the queue to be stopped forever. */ smp_mb(); if (unlikely(netif_queue_stopped(dev) && TX_BUFFS_AVAIL(gp) > (MAX_SKB_FRAGS + 1))) { struct netdev_queue *txq = netdev_get_tx_queue(dev, 0); __netif_tx_lock(txq, smp_processor_id()); if (netif_queue_stopped(dev) && TX_BUFFS_AVAIL(gp) > (MAX_SKB_FRAGS + 1)) netif_wake_queue(dev); __netif_tx_unlock(txq); } } static __inline__ void gem_post_rxds(struct gem *gp, int limit) { int cluster_start, curr, count, kick; cluster_start = curr = (gp->rx_new & ~(4 - 1)); count = 0; kick = -1; dma_wmb(); while (curr != limit) { curr = NEXT_RX(curr); if (++count == 4) { struct gem_rxd *rxd = &gp->init_block->rxd[cluster_start]; for (;;) { rxd->status_word = cpu_to_le64(RXDCTRL_FRESH(gp)); rxd++; cluster_start = NEXT_RX(cluster_start); if (cluster_start == curr) break; } kick = curr; count = 0; } } if (kick >= 0) { mb(); writel(kick, gp->regs + RXDMA_KICK); } } #define ALIGNED_RX_SKB_ADDR(addr) \ ((((unsigned long)(addr) + (64UL - 1UL)) & ~(64UL - 1UL)) - (unsigned long)(addr)) static __inline__ struct sk_buff *gem_alloc_skb(struct net_device *dev, int size, gfp_t gfp_flags) { struct sk_buff *skb = alloc_skb(size + 64, gfp_flags); if (likely(skb)) { unsigned long offset = ALIGNED_RX_SKB_ADDR(skb->data); skb_reserve(skb, offset); } return skb; } static int gem_rx(struct gem *gp, int work_to_do) { struct net_device *dev = gp->dev; int entry, drops, work_done = 0; u32 done; if (netif_msg_rx_status(gp)) printk(KERN_DEBUG "%s: rx interrupt, done: %d, rx_new: %d\n", gp->dev->name, readl(gp->regs + RXDMA_DONE), gp->rx_new); entry = gp->rx_new; drops = 0; done = readl(gp->regs + RXDMA_DONE); for (;;) { struct gem_rxd *rxd = &gp->init_block->rxd[entry]; struct sk_buff *skb; u64 status = le64_to_cpu(rxd->status_word); dma_addr_t dma_addr; int len; if ((status & RXDCTRL_OWN) != 0) break; if (work_done >= RX_RING_SIZE || work_done >= work_to_do) break; /* When writing back RX descriptor, GEM writes status * then buffer address, possibly in separate transactions. * If we don't wait for the chip to write both, we could * post a new buffer to this descriptor then have GEM spam * on the buffer address. We sync on the RX completion * register to prevent this from happening. */ if (entry == done) { done = readl(gp->regs + RXDMA_DONE); if (entry == done) break; } /* We can now account for the work we're about to do */ work_done++; skb = gp->rx_skbs[entry]; len = (status & RXDCTRL_BUFSZ) >> 16; if ((len < ETH_ZLEN) || (status & RXDCTRL_BAD)) { dev->stats.rx_errors++; if (len < ETH_ZLEN) dev->stats.rx_length_errors++; if (len & RXDCTRL_BAD) dev->stats.rx_crc_errors++; /* We'll just return it to GEM. */ drop_it: dev->stats.rx_dropped++; goto next; } dma_addr = le64_to_cpu(rxd->buffer); if (len > RX_COPY_THRESHOLD) { struct sk_buff *new_skb; new_skb = gem_alloc_skb(dev, RX_BUF_ALLOC_SIZE(gp), GFP_ATOMIC); if (new_skb == NULL) { drops++; goto drop_it; } dma_unmap_page(&gp->pdev->dev, dma_addr, RX_BUF_ALLOC_SIZE(gp), DMA_FROM_DEVICE); gp->rx_skbs[entry] = new_skb; skb_put(new_skb, (gp->rx_buf_sz + RX_OFFSET)); rxd->buffer = cpu_to_le64(dma_map_page(&gp->pdev->dev, virt_to_page(new_skb->data), offset_in_page(new_skb->data), RX_BUF_ALLOC_SIZE(gp), DMA_FROM_DEVICE)); skb_reserve(new_skb, RX_OFFSET); /* Trim the original skb for the netif. */ skb_trim(skb, len); } else { struct sk_buff *copy_skb = netdev_alloc_skb(dev, len + 2); if (copy_skb == NULL) { drops++; goto drop_it; } skb_reserve(copy_skb, 2); skb_put(copy_skb, len); dma_sync_single_for_cpu(&gp->pdev->dev, dma_addr, len, DMA_FROM_DEVICE); skb_copy_from_linear_data(skb, copy_skb->data, len); dma_sync_single_for_device(&gp->pdev->dev, dma_addr, len, DMA_FROM_DEVICE); /* We'll reuse the original ring buffer. */ skb = copy_skb; } if (likely(dev->features & NETIF_F_RXCSUM)) { __sum16 csum; csum = (__force __sum16)htons((status & RXDCTRL_TCPCSUM) ^ 0xffff); skb->csum = csum_unfold(csum); skb->ip_summed = CHECKSUM_COMPLETE; } skb->protocol = eth_type_trans(skb, gp->dev); napi_gro_receive(&gp->napi, skb); dev->stats.rx_packets++; dev->stats.rx_bytes += len; next: entry = NEXT_RX(entry); } gem_post_rxds(gp, entry); gp->rx_new = entry; if (drops) netdev_info(gp->dev, "Memory squeeze, deferring packet\n"); return work_done; } static int gem_poll(struct napi_struct *napi, int budget) { struct gem *gp = container_of(napi, struct gem, napi); struct net_device *dev = gp->dev; int work_done; work_done = 0; do { /* Handle anomalies */ if (unlikely(gp->status & GREG_STAT_ABNORMAL)) { struct netdev_queue *txq = netdev_get_tx_queue(dev, 0); int reset; /* We run the abnormal interrupt handling code with * the Tx lock. It only resets the Rx portion of the * chip, but we need to guard it against DMA being * restarted by the link poll timer */ __netif_tx_lock(txq, smp_processor_id()); reset = gem_abnormal_irq(dev, gp, gp->status); __netif_tx_unlock(txq); if (reset) { gem_schedule_reset(gp); napi_complete(napi); return work_done; } } /* Run TX completion thread */ gem_tx(dev, gp, gp->status); /* Run RX thread. We don't use any locking here, * code willing to do bad things - like cleaning the * rx ring - must call napi_disable(), which * schedule_timeout()'s if polling is already disabled. */ work_done += gem_rx(gp, budget - work_done); if (work_done >= budget) return work_done; gp->status = readl(gp->regs + GREG_STAT); } while (gp->status & GREG_STAT_NAPI); napi_complete_done(napi, work_done); gem_enable_ints(gp); return work_done; } static irqreturn_t gem_interrupt(int irq, void *dev_id) { struct net_device *dev = dev_id; struct gem *gp = netdev_priv(dev); if (napi_schedule_prep(&gp->napi)) { u32 gem_status = readl(gp->regs + GREG_STAT); if (unlikely(gem_status == 0)) { napi_enable(&gp->napi); return IRQ_NONE; } if (netif_msg_intr(gp)) printk(KERN_DEBUG "%s: gem_interrupt() gem_status: 0x%x\n", gp->dev->name, gem_status); gp->status = gem_status; gem_disable_ints(gp); __napi_schedule(&gp->napi); } /* If polling was disabled at the time we received that * interrupt, we may return IRQ_HANDLED here while we * should return IRQ_NONE. No big deal... */ return IRQ_HANDLED; } #ifdef CONFIG_NET_POLL_CONTROLLER static void gem_poll_controller(struct net_device *dev) { struct gem *gp = netdev_priv(dev); disable_irq(gp->pdev->irq); gem_interrupt(gp->pdev->irq, dev); enable_irq(gp->pdev->irq); } #endif static void gem_tx_timeout(struct net_device *dev, unsigned int txqueue) { struct gem *gp = netdev_priv(dev); netdev_err(dev, "transmit timed out, resetting\n"); netdev_err(dev, "TX_STATE[%08x:%08x:%08x]\n", readl(gp->regs + TXDMA_CFG), readl(gp->regs + MAC_TXSTAT), readl(gp->regs + MAC_TXCFG)); netdev_err(dev, "RX_STATE[%08x:%08x:%08x]\n", readl(gp->regs + RXDMA_CFG), readl(gp->regs + MAC_RXSTAT), readl(gp->regs + MAC_RXCFG)); gem_schedule_reset(gp); } static __inline__ int gem_intme(int entry) { /* Algorithm: IRQ every 1/2 of descriptors. */ if (!(entry & ((TX_RING_SIZE>>1)-1))) return 1; return 0; } static netdev_tx_t gem_start_xmit(struct sk_buff *skb, struct net_device *dev) { struct gem *gp = netdev_priv(dev); int entry; u64 ctrl; ctrl = 0; if (skb->ip_summed == CHECKSUM_PARTIAL) { const u64 csum_start_off = skb_checksum_start_offset(skb); const u64 csum_stuff_off = csum_start_off + skb->csum_offset; ctrl = (TXDCTRL_CENAB | (csum_start_off << 15) | (csum_stuff_off << 21)); } if (unlikely(TX_BUFFS_AVAIL(gp) <= (skb_shinfo(skb)->nr_frags + 1))) { /* This is a hard error, log it. */ if (!netif_queue_stopped(dev)) { netif_stop_queue(dev); netdev_err(dev, "BUG! Tx Ring full when queue awake!\n"); } return NETDEV_TX_BUSY; } entry = gp->tx_new; gp->tx_skbs[entry] = skb; if (skb_shinfo(skb)->nr_frags == 0) { struct gem_txd *txd = &gp->init_block->txd[entry]; dma_addr_t mapping; u32 len; len = skb->len; mapping = dma_map_page(&gp->pdev->dev, virt_to_page(skb->data), offset_in_page(skb->data), len, DMA_TO_DEVICE); ctrl |= TXDCTRL_SOF | TXDCTRL_EOF | len; if (gem_intme(entry)) ctrl |= TXDCTRL_INTME; txd->buffer = cpu_to_le64(mapping); dma_wmb(); txd->control_word = cpu_to_le64(ctrl); entry = NEXT_TX(entry); } else { struct gem_txd *txd; u32 first_len; u64 intme; dma_addr_t first_mapping; int frag, first_entry = entry; intme = 0; if (gem_intme(entry)) intme |= TXDCTRL_INTME; /* We must give this initial chunk to the device last. * Otherwise we could race with the device. */ first_len = skb_headlen(skb); first_mapping = dma_map_page(&gp->pdev->dev, virt_to_page(skb->data), offset_in_page(skb->data), first_len, DMA_TO_DEVICE); entry = NEXT_TX(entry); for (frag = 0; frag < skb_shinfo(skb)->nr_frags; frag++) { const skb_frag_t *this_frag = &skb_shinfo(skb)->frags[frag]; u32 len; dma_addr_t mapping; u64 this_ctrl; len = skb_frag_size(this_frag); mapping = skb_frag_dma_map(&gp->pdev->dev, this_frag, 0, len, DMA_TO_DEVICE); this_ctrl = ctrl; if (frag == skb_shinfo(skb)->nr_frags - 1) this_ctrl |= TXDCTRL_EOF; txd = &gp->init_block->txd[entry]; txd->buffer = cpu_to_le64(mapping); dma_wmb(); txd->control_word = cpu_to_le64(this_ctrl | len); if (gem_intme(entry)) intme |= TXDCTRL_INTME; entry = NEXT_TX(entry); } txd = &gp->init_block->txd[first_entry]; txd->buffer = cpu_to_le64(first_mapping); dma_wmb(); txd->control_word = cpu_to_le64(ctrl | TXDCTRL_SOF | intme | first_len); } gp->tx_new = entry; if (unlikely(TX_BUFFS_AVAIL(gp) <= (MAX_SKB_FRAGS + 1))) { netif_stop_queue(dev); /* netif_stop_queue() must be done before checking * tx index in TX_BUFFS_AVAIL() below, because * in gem_tx(), we update tx_old before checking for * netif_queue_stopped(). */ smp_mb(); if (TX_BUFFS_AVAIL(gp) > (MAX_SKB_FRAGS + 1)) netif_wake_queue(dev); } if (netif_msg_tx_queued(gp)) printk(KERN_DEBUG "%s: tx queued, slot %d, skblen %d\n", dev->name, entry, skb->len); mb(); writel(gp->tx_new, gp->regs + TXDMA_KICK); return NETDEV_TX_OK; } static void gem_pcs_reset(struct gem *gp) { int limit; u32 val; /* Reset PCS unit. */ val = readl(gp->regs + PCS_MIICTRL); val |= PCS_MIICTRL_RST; writel(val, gp->regs + PCS_MIICTRL); limit = 32; while (readl(gp->regs + PCS_MIICTRL) & PCS_MIICTRL_RST) { udelay(100); if (limit-- <= 0) break; } if (limit < 0) netdev_warn(gp->dev, "PCS reset bit would not clear\n"); } static void gem_pcs_reinit_adv(struct gem *gp) { u32 val; /* Make sure PCS is disabled while changing advertisement * configuration. */ val = readl(gp->regs + PCS_CFG); val &= ~(PCS_CFG_ENABLE | PCS_CFG_TO); writel(val, gp->regs + PCS_CFG); /* Advertise all capabilities except asymmetric * pause. */ val = readl(gp->regs + PCS_MIIADV); val |= (PCS_MIIADV_FD | PCS_MIIADV_HD | PCS_MIIADV_SP | PCS_MIIADV_AP); writel(val, gp->regs + PCS_MIIADV); /* Enable and restart auto-negotiation, disable wrapback/loopback, * and re-enable PCS. */ val = readl(gp->regs + PCS_MIICTRL); val |= (PCS_MIICTRL_RAN | PCS_MIICTRL_ANE); val &= ~PCS_MIICTRL_WB; writel(val, gp->regs + PCS_MIICTRL); val = readl(gp->regs + PCS_CFG); val |= PCS_CFG_ENABLE; writel(val, gp->regs + PCS_CFG); /* Make sure serialink loopback is off. The meaning * of this bit is logically inverted based upon whether * you are in Serialink or SERDES mode. */ val = readl(gp->regs + PCS_SCTRL); if (gp->phy_type == phy_serialink) val &= ~PCS_SCTRL_LOOP; else val |= PCS_SCTRL_LOOP; writel(val, gp->regs + PCS_SCTRL); } #define STOP_TRIES 32 static void gem_reset(struct gem *gp) { int limit; u32 val; /* Make sure we won't get any more interrupts */ writel(0xffffffff, gp->regs + GREG_IMASK); /* Reset the chip */ writel(gp->swrst_base | GREG_SWRST_TXRST | GREG_SWRST_RXRST, gp->regs + GREG_SWRST); limit = STOP_TRIES; do { udelay(20); val = readl(gp->regs + GREG_SWRST); if (limit-- <= 0) break; } while (val & (GREG_SWRST_TXRST | GREG_SWRST_RXRST)); if (limit < 0) netdev_err(gp->dev, "SW reset is ghetto\n"); if (gp->phy_type == phy_serialink || gp->phy_type == phy_serdes) gem_pcs_reinit_adv(gp); } static void gem_start_dma(struct gem *gp) { u32 val; /* We are ready to rock, turn everything on. */ val = readl(gp->regs + TXDMA_CFG); writel(val | TXDMA_CFG_ENABLE, gp->regs + TXDMA_CFG); val = readl(gp->regs + RXDMA_CFG); writel(val | RXDMA_CFG_ENABLE, gp->regs + RXDMA_CFG); val = readl(gp->regs + MAC_TXCFG); writel(val | MAC_TXCFG_ENAB, gp->regs + MAC_TXCFG); val = readl(gp->regs + MAC_RXCFG); writel(val | MAC_RXCFG_ENAB, gp->regs + MAC_RXCFG); (void) readl(gp->regs + MAC_RXCFG); udelay(100); gem_enable_ints(gp); writel(RX_RING_SIZE - 4, gp->regs + RXDMA_KICK); } /* DMA won't be actually stopped before about 4ms tho ... */ static void gem_stop_dma(struct gem *gp) { u32 val; /* We are done rocking, turn everything off. */ val = readl(gp->regs + TXDMA_CFG); writel(val & ~TXDMA_CFG_ENABLE, gp->regs + TXDMA_CFG); val = readl(gp->regs + RXDMA_CFG); writel(val & ~RXDMA_CFG_ENABLE, gp->regs + RXDMA_CFG); val = readl(gp->regs + MAC_TXCFG); writel(val & ~MAC_TXCFG_ENAB, gp->regs + MAC_TXCFG); val = readl(gp->regs + MAC_RXCFG); writel(val & ~MAC_RXCFG_ENAB, gp->regs + MAC_RXCFG); (void) readl(gp->regs + MAC_RXCFG); /* Need to wait a bit ... done by the caller */ } // XXX dbl check what that function should do when called on PCS PHY static void gem_begin_auto_negotiation(struct gem *gp, const struct ethtool_link_ksettings *ep) { u32 advertise, features; int autoneg; int speed; int duplex; u32 advertising; if (ep) ethtool_convert_link_mode_to_legacy_u32( &advertising, ep->link_modes.advertising); if (gp->phy_type != phy_mii_mdio0 && gp->phy_type != phy_mii_mdio1) goto non_mii; /* Setup advertise */ if (found_mii_phy(gp)) features = gp->phy_mii.def->features; else features = 0; advertise = features & ADVERTISE_MASK; if (gp->phy_mii.advertising != 0) advertise &= gp->phy_mii.advertising; autoneg = gp->want_autoneg; speed = gp->phy_mii.speed; duplex = gp->phy_mii.duplex; /* Setup link parameters */ if (!ep) goto start_aneg; if (ep->base.autoneg == AUTONEG_ENABLE) { advertise = advertising; autoneg = 1; } else { autoneg = 0; speed = ep->base.speed; duplex = ep->base.duplex; } start_aneg: /* Sanitize settings based on PHY capabilities */ if ((features & SUPPORTED_Autoneg) == 0) autoneg = 0; if (speed == SPEED_1000 && !(features & (SUPPORTED_1000baseT_Half | SUPPORTED_1000baseT_Full))) speed = SPEED_100; if (speed == SPEED_100 && !(features & (SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full))) speed = SPEED_10; if (duplex == DUPLEX_FULL && !(features & (SUPPORTED_1000baseT_Full | SUPPORTED_100baseT_Full | SUPPORTED_10baseT_Full))) duplex = DUPLEX_HALF; if (speed == 0) speed = SPEED_10; /* If we are asleep, we don't try to actually setup the PHY, we * just store the settings */ if (!netif_device_present(gp->dev)) { gp->phy_mii.autoneg = gp->want_autoneg = autoneg; gp->phy_mii.speed = speed; gp->phy_mii.duplex = duplex; return; } /* Configure PHY & start aneg */ gp->want_autoneg = autoneg; if (autoneg) { if (found_mii_phy(gp)) gp->phy_mii.def->ops->setup_aneg(&gp->phy_mii, advertise); gp->lstate = link_aneg; } else { if (found_mii_phy(gp)) gp->phy_mii.def->ops->setup_forced(&gp->phy_mii, speed, duplex); gp->lstate = link_force_ok; } non_mii: gp->timer_ticks = 0; mod_timer(&gp->link_timer, jiffies + ((12 * HZ) / 10)); } /* A link-up condition has occurred, initialize and enable the * rest of the chip. */ static int gem_set_link_modes(struct gem *gp) { struct netdev_queue *txq = netdev_get_tx_queue(gp->dev, 0); int full_duplex, speed, pause; u32 val; full_duplex = 0; speed = SPEED_10; pause = 0; if (found_mii_phy(gp)) { if (gp->phy_mii.def->ops->read_link(&gp->phy_mii)) return 1; full_duplex = (gp->phy_mii.duplex == DUPLEX_FULL); speed = gp->phy_mii.speed; pause = gp->phy_mii.pause; } else if (gp->phy_type == phy_serialink || gp->phy_type == phy_serdes) { u32 pcs_lpa = readl(gp->regs + PCS_MIILP); if ((pcs_lpa & PCS_MIIADV_FD) || gp->phy_type == phy_serdes) full_duplex = 1; speed = SPEED_1000; } netif_info(gp, link, gp->dev, "Link is up at %d Mbps, %s-duplex\n", speed, (full_duplex ? "full" : "half")); /* We take the tx queue lock to avoid collisions between * this code, the tx path and the NAPI-driven error path */ __netif_tx_lock(txq, smp_processor_id()); val = (MAC_TXCFG_EIPG0 | MAC_TXCFG_NGU); if (full_duplex) { val |= (MAC_TXCFG_ICS | MAC_TXCFG_ICOLL); } else { /* MAC_TXCFG_NBO must be zero. */ } writel(val, gp->regs + MAC_TXCFG); val = (MAC_XIFCFG_OE | MAC_XIFCFG_LLED); if (!full_duplex && (gp->phy_type == phy_mii_mdio0 || gp->phy_type == phy_mii_mdio1)) { val |= MAC_XIFCFG_DISE; } else if (full_duplex) { val |= MAC_XIFCFG_FLED; } if (speed == SPEED_1000) val |= (MAC_XIFCFG_GMII); writel(val, gp->regs + MAC_XIFCFG); /* If gigabit and half-duplex, enable carrier extension * mode. Else, disable it. */ if (speed == SPEED_1000 && !full_duplex) { val = readl(gp->regs + MAC_TXCFG); writel(val | MAC_TXCFG_TCE, gp->regs + MAC_TXCFG); val = readl(gp->regs + MAC_RXCFG); writel(val | MAC_RXCFG_RCE, gp->regs + MAC_RXCFG); } else { val = readl(gp->regs + MAC_TXCFG); writel(val & ~MAC_TXCFG_TCE, gp->regs + MAC_TXCFG); val = readl(gp->regs + MAC_RXCFG); writel(val & ~MAC_RXCFG_RCE, gp->regs + MAC_RXCFG); } if (gp->phy_type == phy_serialink || gp->phy_type == phy_serdes) { u32 pcs_lpa = readl(gp->regs + PCS_MIILP); if (pcs_lpa & (PCS_MIIADV_SP | PCS_MIIADV_AP)) pause = 1; } if (!full_duplex) writel(512, gp->regs + MAC_STIME); else writel(64, gp->regs + MAC_STIME); val = readl(gp->regs + MAC_MCCFG); if (pause) val |= (MAC_MCCFG_SPE | MAC_MCCFG_RPE); else val &= ~(MAC_MCCFG_SPE | MAC_MCCFG_RPE); writel(val, gp->regs + MAC_MCCFG); gem_start_dma(gp); __netif_tx_unlock(txq); if (netif_msg_link(gp)) { if (pause) { netdev_info(gp->dev, "Pause is enabled (rxfifo: %d off: %d on: %d)\n", gp->rx_fifo_sz, gp->rx_pause_off, gp->rx_pause_on); } else { netdev_info(gp->dev, "Pause is disabled\n"); } } return 0; } static int gem_mdio_link_not_up(struct gem *gp) { switch (gp->lstate) { case link_force_ret: netif_info(gp, link, gp->dev, "Autoneg failed again, keeping forced mode\n"); gp->phy_mii.def->ops->setup_forced(&gp->phy_mii, gp->last_forced_speed, DUPLEX_HALF); gp->timer_ticks = 5; gp->lstate = link_force_ok; return 0; case link_aneg: /* We try forced modes after a failed aneg only on PHYs that don't * have "magic_aneg" bit set, which means they internally do the * while forced-mode thingy. On these, we just restart aneg */ if (gp->phy_mii.def->magic_aneg) return 1; netif_info(gp, link, gp->dev, "switching to forced 100bt\n"); /* Try forced modes. */ gp->phy_mii.def->ops->setup_forced(&gp->phy_mii, SPEED_100, DUPLEX_HALF); gp->timer_ticks = 5; gp->lstate = link_force_try; return 0; case link_force_try: /* Downgrade from 100 to 10 Mbps if necessary. * If already at 10Mbps, warn user about the * situation every 10 ticks. */ if (gp->phy_mii.speed == SPEED_100) { gp->phy_mii.def->ops->setup_forced(&gp->phy_mii, SPEED_10, DUPLEX_HALF); gp->timer_ticks = 5; netif_info(gp, link, gp->dev, "switching to forced 10bt\n"); return 0; } else return 1; default: return 0; } } static void gem_link_timer(struct timer_list *t) { struct gem *gp = from_timer(gp, t, link_timer); struct net_device *dev = gp->dev; int restart_aneg = 0; /* There's no point doing anything if we're going to be reset */ if (gp->reset_task_pending) return; if (gp->phy_type == phy_serialink || gp->phy_type == phy_serdes) { u32 val = readl(gp->regs + PCS_MIISTAT); if (!(val & PCS_MIISTAT_LS)) val = readl(gp->regs + PCS_MIISTAT); if ((val & PCS_MIISTAT_LS) != 0) { if (gp->lstate == link_up) goto restart; gp->lstate = link_up; netif_carrier_on(dev); (void)gem_set_link_modes(gp); } goto restart; } if (found_mii_phy(gp) && gp->phy_mii.def->ops->poll_link(&gp->phy_mii)) { /* Ok, here we got a link. If we had it due to a forced * fallback, and we were configured for autoneg, we do * retry a short autoneg pass. If you know your hub is * broken, use ethtool ;) */ if (gp->lstate == link_force_try && gp->want_autoneg) { gp->lstate = link_force_ret; gp->last_forced_speed = gp->phy_mii.speed; gp->timer_ticks = 5; if (netif_msg_link(gp)) netdev_info(dev, "Got link after fallback, retrying autoneg once...\n"); gp->phy_mii.def->ops->setup_aneg(&gp->phy_mii, gp->phy_mii.advertising); } else if (gp->lstate != link_up) { gp->lstate = link_up; netif_carrier_on(dev); if (gem_set_link_modes(gp)) restart_aneg = 1; } } else { /* If the link was previously up, we restart the * whole process */ if (gp->lstate == link_up) { gp->lstate = link_down; netif_info(gp, link, dev, "Link down\n"); netif_carrier_off(dev); gem_schedule_reset(gp); /* The reset task will restart the timer */ return; } else if (++gp->timer_ticks > 10) { if (found_mii_phy(gp)) restart_aneg = gem_mdio_link_not_up(gp); else restart_aneg = 1; } } if (restart_aneg) { gem_begin_auto_negotiation(gp, NULL); return; } restart: mod_timer(&gp->link_timer, jiffies + ((12 * HZ) / 10)); } static void gem_clean_rings(struct gem *gp) { struct gem_init_block *gb = gp->init_block; struct sk_buff *skb; int i; dma_addr_t dma_addr; for (i = 0; i < RX_RING_SIZE; i++) { struct gem_rxd *rxd; rxd = &gb->rxd[i]; if (gp->rx_skbs[i] != NULL) { skb = gp->rx_skbs[i]; dma_addr = le64_to_cpu(rxd->buffer); dma_unmap_page(&gp->pdev->dev, dma_addr, RX_BUF_ALLOC_SIZE(gp), DMA_FROM_DEVICE); dev_kfree_skb_any(skb); gp->rx_skbs[i] = NULL; } rxd->status_word = 0; dma_wmb(); rxd->buffer = 0; } for (i = 0; i < TX_RING_SIZE; i++) { if (gp->tx_skbs[i] != NULL) { struct gem_txd *txd; int frag; skb = gp->tx_skbs[i]; gp->tx_skbs[i] = NULL; for (frag = 0; frag <= skb_shinfo(skb)->nr_frags; frag++) { int ent = i & (TX_RING_SIZE - 1); txd = &gb->txd[ent]; dma_addr = le64_to_cpu(txd->buffer); dma_unmap_page(&gp->pdev->dev, dma_addr, le64_to_cpu(txd->control_word) & TXDCTRL_BUFSZ, DMA_TO_DEVICE); if (frag != skb_shinfo(skb)->nr_frags) i++; } dev_kfree_skb_any(skb); } } } static void gem_init_rings(struct gem *gp) { struct gem_init_block *gb = gp->init_block; struct net_device *dev = gp->dev; int i; dma_addr_t dma_addr; gp->rx_new = gp->rx_old = gp->tx_new = gp->tx_old = 0; gem_clean_rings(gp); gp->rx_buf_sz = max(dev->mtu + ETH_HLEN + VLAN_HLEN, (unsigned)VLAN_ETH_FRAME_LEN); for (i = 0; i < RX_RING_SIZE; i++) { struct sk_buff *skb; struct gem_rxd *rxd = &gb->rxd[i]; skb = gem_alloc_skb(dev, RX_BUF_ALLOC_SIZE(gp), GFP_KERNEL); if (!skb) { rxd->buffer = 0; rxd->status_word = 0; continue; } gp->rx_skbs[i] = skb; skb_put(skb, (gp->rx_buf_sz + RX_OFFSET)); dma_addr = dma_map_page(&gp->pdev->dev, virt_to_page(skb->data), offset_in_page(skb->data), RX_BUF_ALLOC_SIZE(gp), DMA_FROM_DEVICE); rxd->buffer = cpu_to_le64(dma_addr); dma_wmb(); rxd->status_word = cpu_to_le64(RXDCTRL_FRESH(gp)); skb_reserve(skb, RX_OFFSET); } for (i = 0; i < TX_RING_SIZE; i++) { struct gem_txd *txd = &gb->txd[i]; txd->control_word = 0; dma_wmb(); txd->buffer = 0; } wmb(); } /* Init PHY interface and start link poll state machine */ static void gem_init_phy(struct gem *gp) { u32 mifcfg; /* Revert MIF CFG setting done on stop_phy */ mifcfg = readl(gp->regs + MIF_CFG); mifcfg &= ~MIF_CFG_BBMODE; writel(mifcfg, gp->regs + MIF_CFG); if (gp->pdev->vendor == PCI_VENDOR_ID_APPLE) { int i; /* Those delays sucks, the HW seems to love them though, I'll * seriously consider breaking some locks here to be able * to schedule instead */ for (i = 0; i < 3; i++) { #ifdef CONFIG_PPC_PMAC pmac_call_feature(PMAC_FTR_GMAC_PHY_RESET, gp->of_node, 0, 0); msleep(20); #endif /* Some PHYs used by apple have problem getting back to us, * we do an additional reset here */ sungem_phy_write(gp, MII_BMCR, BMCR_RESET); msleep(20); if (sungem_phy_read(gp, MII_BMCR) != 0xffff) break; if (i == 2) netdev_warn(gp->dev, "GMAC PHY not responding !\n"); } } if (gp->pdev->vendor == PCI_VENDOR_ID_SUN && gp->pdev->device == PCI_DEVICE_ID_SUN_GEM) { u32 val; /* Init datapath mode register. */ if (gp->phy_type == phy_mii_mdio0 || gp->phy_type == phy_mii_mdio1) { val = PCS_DMODE_MGM; } else if (gp->phy_type == phy_serialink) { val = PCS_DMODE_SM | PCS_DMODE_GMOE; } else { val = PCS_DMODE_ESM; } writel(val, gp->regs + PCS_DMODE); } if (gp->phy_type == phy_mii_mdio0 || gp->phy_type == phy_mii_mdio1) { /* Reset and detect MII PHY */ sungem_phy_probe(&gp->phy_mii, gp->mii_phy_addr); /* Init PHY */ if (gp->phy_mii.def && gp->phy_mii.def->ops->init) gp->phy_mii.def->ops->init(&gp->phy_mii); } else { gem_pcs_reset(gp); gem_pcs_reinit_adv(gp); } /* Default aneg parameters */ gp->timer_ticks = 0; gp->lstate = link_down; netif_carrier_off(gp->dev); /* Print things out */ if (gp->phy_type == phy_mii_mdio0 || gp->phy_type == phy_mii_mdio1) netdev_info(gp->dev, "Found %s PHY\n", gp->phy_mii.def ? gp->phy_mii.def->name : "no"); gem_begin_auto_negotiation(gp, NULL); } static void gem_init_dma(struct gem *gp) { u64 desc_dma = (u64) gp->gblock_dvma; u32 val; val = (TXDMA_CFG_BASE | (0x7ff << 10) | TXDMA_CFG_PMODE); writel(val, gp->regs + TXDMA_CFG); writel(desc_dma >> 32, gp->regs + TXDMA_DBHI); writel(desc_dma & 0xffffffff, gp->regs + TXDMA_DBLOW); desc_dma += (INIT_BLOCK_TX_RING_SIZE * sizeof(struct gem_txd)); writel(0, gp->regs + TXDMA_KICK); val = (RXDMA_CFG_BASE | (RX_OFFSET << 10) | (ETH_HLEN << 13) | RXDMA_CFG_FTHRESH_128); writel(val, gp->regs + RXDMA_CFG); writel(desc_dma >> 32, gp->regs + RXDMA_DBHI); writel(desc_dma & 0xffffffff, gp->regs + RXDMA_DBLOW); writel(RX_RING_SIZE - 4, gp->regs + RXDMA_KICK); val = (((gp->rx_pause_off / 64) << 0) & RXDMA_PTHRESH_OFF); val |= (((gp->rx_pause_on / 64) << 12) & RXDMA_PTHRESH_ON); writel(val, gp->regs + RXDMA_PTHRESH); if (readl(gp->regs + GREG_BIFCFG) & GREG_BIFCFG_M66EN) writel(((5 & RXDMA_BLANK_IPKTS) | ((8 << 12) & RXDMA_BLANK_ITIME)), gp->regs + RXDMA_BLANK); else writel(((5 & RXDMA_BLANK_IPKTS) | ((4 << 12) & RXDMA_BLANK_ITIME)), gp->regs + RXDMA_BLANK); } static u32 gem_setup_multicast(struct gem *gp) { u32 rxcfg = 0; int i; if ((gp->dev->flags & IFF_ALLMULTI) || (netdev_mc_count(gp->dev) > 256)) { for (i=0; i<16; i++) writel(0xffff, gp->regs + MAC_HASH0 + (i << 2)); rxcfg |= MAC_RXCFG_HFE; } else if (gp->dev->flags & IFF_PROMISC) { rxcfg |= MAC_RXCFG_PROM; } else { u16 hash_table[16]; u32 crc; struct netdev_hw_addr *ha; int i; memset(hash_table, 0, sizeof(hash_table)); netdev_for_each_mc_addr(ha, gp->dev) { crc = ether_crc_le(6, ha->addr); crc >>= 24; hash_table[crc >> 4] |= 1 << (15 - (crc & 0xf)); } for (i=0; i<16; i++) writel(hash_table[i], gp->regs + MAC_HASH0 + (i << 2)); rxcfg |= MAC_RXCFG_HFE; } return rxcfg; } static void gem_init_mac(struct gem *gp) { const unsigned char *e = &gp->dev->dev_addr[0]; writel(0x1bf0, gp->regs + MAC_SNDPAUSE); writel(0x00, gp->regs + MAC_IPG0); writel(0x08, gp->regs + MAC_IPG1); writel(0x04, gp->regs + MAC_IPG2); writel(0x40, gp->regs + MAC_STIME); writel(0x40, gp->regs + MAC_MINFSZ); /* Ethernet payload + header + FCS + optional VLAN tag. */ writel(0x20000000 | (gp->rx_buf_sz + 4), gp->regs + MAC_MAXFSZ); writel(0x07, gp->regs + MAC_PASIZE); writel(0x04, gp->regs + MAC_JAMSIZE); writel(0x10, gp->regs + MAC_ATTLIM); writel(0x8808, gp->regs + MAC_MCTYPE); writel((e[5] | (e[4] << 8)) & 0x3ff, gp->regs + MAC_RANDSEED); writel((e[4] << 8) | e[5], gp->regs + MAC_ADDR0); writel((e[2] << 8) | e[3], gp->regs + MAC_ADDR1); writel((e[0] << 8) | e[1], gp->regs + MAC_ADDR2); writel(0, gp->regs + MAC_ADDR3); writel(0, gp->regs + MAC_ADDR4); writel(0, gp->regs + MAC_ADDR5); writel(0x0001, gp->regs + MAC_ADDR6); writel(0xc200, gp->regs + MAC_ADDR7); writel(0x0180, gp->regs + MAC_ADDR8); writel(0, gp->regs + MAC_AFILT0); writel(0, gp->regs + MAC_AFILT1); writel(0, gp->regs + MAC_AFILT2); writel(0, gp->regs + MAC_AF21MSK); writel(0, gp->regs + MAC_AF0MSK); gp->mac_rx_cfg = gem_setup_multicast(gp); #ifdef STRIP_FCS gp->mac_rx_cfg |= MAC_RXCFG_SFCS; #endif writel(0, gp->regs + MAC_NCOLL); writel(0, gp->regs + MAC_FASUCC); writel(0, gp->regs + MAC_ECOLL); writel(0, gp->regs + MAC_LCOLL); writel(0, gp->regs + MAC_DTIMER); writel(0, gp->regs + MAC_PATMPS); writel(0, gp->regs + MAC_RFCTR); writel(0, gp->regs + MAC_LERR); writel(0, gp->regs + MAC_AERR); writel(0, gp->regs + MAC_FCSERR); writel(0, gp->regs + MAC_RXCVERR); /* Clear RX/TX/MAC/XIF config, we will set these up and enable * them once a link is established. */ writel(0, gp->regs + MAC_TXCFG); writel(gp->mac_rx_cfg, gp->regs + MAC_RXCFG); writel(0, gp->regs + MAC_MCCFG); writel(0, gp->regs + MAC_XIFCFG); /* Setup MAC interrupts. We want to get all of the interesting * counter expiration events, but we do not want to hear about * normal rx/tx as the DMA engine tells us that. */ writel(MAC_TXSTAT_XMIT, gp->regs + MAC_TXMASK); writel(MAC_RXSTAT_RCV, gp->regs + MAC_RXMASK); /* Don't enable even the PAUSE interrupts for now, we * make no use of those events other than to record them. */ writel(0xffffffff, gp->regs + MAC_MCMASK); /* Don't enable GEM's WOL in normal operations */ if (gp->has_wol) writel(0, gp->regs + WOL_WAKECSR); } static void gem_init_pause_thresholds(struct gem *gp) { u32 cfg; /* Calculate pause thresholds. Setting the OFF threshold to the * full RX fifo size effectively disables PAUSE generation which * is what we do for 10/100 only GEMs which have FIFOs too small * to make real gains from PAUSE. */ if (gp->rx_fifo_sz <= (2 * 1024)) { gp->rx_pause_off = gp->rx_pause_on = gp->rx_fifo_sz; } else { int max_frame = (gp->rx_buf_sz + 4 + 64) & ~63; int off = (gp->rx_fifo_sz - (max_frame * 2)); int on = off - max_frame; gp->rx_pause_off = off; gp->rx_pause_on = on; } /* Configure the chip "burst" DMA mode & enable some * HW bug fixes on Apple version */ cfg = 0; if (gp->pdev->vendor == PCI_VENDOR_ID_APPLE) cfg |= GREG_CFG_RONPAULBIT | GREG_CFG_ENBUG2FIX; #if !defined(CONFIG_SPARC64) && !defined(CONFIG_ALPHA) cfg |= GREG_CFG_IBURST; #endif cfg |= ((31 << 1) & GREG_CFG_TXDMALIM); cfg |= ((31 << 6) & GREG_CFG_RXDMALIM); writel(cfg, gp->regs + GREG_CFG); /* If Infinite Burst didn't stick, then use different * thresholds (and Apple bug fixes don't exist) */ if (!(readl(gp->regs + GREG_CFG) & GREG_CFG_IBURST)) { cfg = ((2 << 1) & GREG_CFG_TXDMALIM); cfg |= ((8 << 6) & GREG_CFG_RXDMALIM); writel(cfg, gp->regs + GREG_CFG); } } static int gem_check_invariants(struct gem *gp) { struct pci_dev *pdev = gp->pdev; u32 mif_cfg; /* On Apple's sungem, we can't rely on registers as the chip * was been powered down by the firmware. The PHY is looked * up later on. */ if (pdev->vendor == PCI_VENDOR_ID_APPLE) { gp->phy_type = phy_mii_mdio0; gp->tx_fifo_sz = readl(gp->regs + TXDMA_FSZ) * 64; gp->rx_fifo_sz = readl(gp->regs + RXDMA_FSZ) * 64; gp->swrst_base = 0; mif_cfg = readl(gp->regs + MIF_CFG); mif_cfg &= ~(MIF_CFG_PSELECT|MIF_CFG_POLL|MIF_CFG_BBMODE|MIF_CFG_MDI1); mif_cfg |= MIF_CFG_MDI0; writel(mif_cfg, gp->regs + MIF_CFG); writel(PCS_DMODE_MGM, gp->regs + PCS_DMODE); writel(MAC_XIFCFG_OE, gp->regs + MAC_XIFCFG); /* We hard-code the PHY address so we can properly bring it out of * reset later on, we can't really probe it at this point, though * that isn't an issue. */ if (gp->pdev->device == PCI_DEVICE_ID_APPLE_K2_GMAC) gp->mii_phy_addr = 1; else gp->mii_phy_addr = 0; return 0; } mif_cfg = readl(gp->regs + MIF_CFG); if (pdev->vendor == PCI_VENDOR_ID_SUN && pdev->device == PCI_DEVICE_ID_SUN_RIO_GEM) { /* One of the MII PHYs _must_ be present * as this chip has no gigabit PHY. */ if ((mif_cfg & (MIF_CFG_MDI0 | MIF_CFG_MDI1)) == 0) { pr_err("RIO GEM lacks MII phy, mif_cfg[%08x]\n", mif_cfg); return -1; } } /* Determine initial PHY interface type guess. MDIO1 is the * external PHY and thus takes precedence over MDIO0. */ if (mif_cfg & MIF_CFG_MDI1) { gp->phy_type = phy_mii_mdio1; mif_cfg |= MIF_CFG_PSELECT; writel(mif_cfg, gp->regs + MIF_CFG); } else if (mif_cfg & MIF_CFG_MDI0) { gp->phy_type = phy_mii_mdio0; mif_cfg &= ~MIF_CFG_PSELECT; writel(mif_cfg, gp->regs + MIF_CFG); } else { #ifdef CONFIG_SPARC const char *p; p = of_get_property(gp->of_node, "shared-pins", NULL); if (p && !strcmp(p, "serdes")) gp->phy_type = phy_serdes; else #endif gp->phy_type = phy_serialink; } if (gp->phy_type == phy_mii_mdio1 || gp->phy_type == phy_mii_mdio0) { int i; for (i = 0; i < 32; i++) { gp->mii_phy_addr = i; if (sungem_phy_read(gp, MII_BMCR) != 0xffff) break; } if (i == 32) { if (pdev->device != PCI_DEVICE_ID_SUN_GEM) { pr_err("RIO MII phy will not respond\n"); return -1; } gp->phy_type = phy_serdes; } } /* Fetch the FIFO configurations now too. */ gp->tx_fifo_sz = readl(gp->regs + TXDMA_FSZ) * 64; gp->rx_fifo_sz = readl(gp->regs + RXDMA_FSZ) * 64; if (pdev->vendor == PCI_VENDOR_ID_SUN) { if (pdev->device == PCI_DEVICE_ID_SUN_GEM) { if (gp->tx_fifo_sz != (9 * 1024) || gp->rx_fifo_sz != (20 * 1024)) { pr_err("GEM has bogus fifo sizes tx(%d) rx(%d)\n", gp->tx_fifo_sz, gp->rx_fifo_sz); return -1; } gp->swrst_base = 0; } else { if (gp->tx_fifo_sz != (2 * 1024) || gp->rx_fifo_sz != (2 * 1024)) { pr_err("RIO GEM has bogus fifo sizes tx(%d) rx(%d)\n", gp->tx_fifo_sz, gp->rx_fifo_sz); return -1; } gp->swrst_base = (64 / 4) << GREG_SWRST_CACHE_SHIFT; } } return 0; } static void gem_reinit_chip(struct gem *gp) { /* Reset the chip */ gem_reset(gp); /* Make sure ints are disabled */ gem_disable_ints(gp); /* Allocate & setup ring buffers */ gem_init_rings(gp); /* Configure pause thresholds */ gem_init_pause_thresholds(gp); /* Init DMA & MAC engines */ gem_init_dma(gp); gem_init_mac(gp); } static void gem_stop_phy(struct gem *gp, int wol) { u32 mifcfg; /* Let the chip settle down a bit, it seems that helps * for sleep mode on some models */ msleep(10); /* Make sure we aren't polling PHY status change. We * don't currently use that feature though */ mifcfg = readl(gp->regs + MIF_CFG); mifcfg &= ~MIF_CFG_POLL; writel(mifcfg, gp->regs + MIF_CFG); if (wol && gp->has_wol) { const unsigned char *e = &gp->dev->dev_addr[0]; u32 csr; /* Setup wake-on-lan for MAGIC packet */ writel(MAC_RXCFG_HFE | MAC_RXCFG_SFCS | MAC_RXCFG_ENAB, gp->regs + MAC_RXCFG); writel((e[4] << 8) | e[5], gp->regs + WOL_MATCH0); writel((e[2] << 8) | e[3], gp->regs + WOL_MATCH1); writel((e[0] << 8) | e[1], gp->regs + WOL_MATCH2); writel(WOL_MCOUNT_N | WOL_MCOUNT_M, gp->regs + WOL_MCOUNT); csr = WOL_WAKECSR_ENABLE; if ((readl(gp->regs + MAC_XIFCFG) & MAC_XIFCFG_GMII) == 0) csr |= WOL_WAKECSR_MII; writel(csr, gp->regs + WOL_WAKECSR); } else { writel(0, gp->regs + MAC_RXCFG); (void)readl(gp->regs + MAC_RXCFG); /* Machine sleep will die in strange ways if we * dont wait a bit here, looks like the chip takes * some time to really shut down */ msleep(10); } writel(0, gp->regs + MAC_TXCFG); writel(0, gp->regs + MAC_XIFCFG); writel(0, gp->regs + TXDMA_CFG); writel(0, gp->regs + RXDMA_CFG); if (!wol) { gem_reset(gp); writel(MAC_TXRST_CMD, gp->regs + MAC_TXRST); writel(MAC_RXRST_CMD, gp->regs + MAC_RXRST); if (found_mii_phy(gp) && gp->phy_mii.def->ops->suspend) gp->phy_mii.def->ops->suspend(&gp->phy_mii); /* According to Apple, we must set the MDIO pins to this begnign * state or we may 1) eat more current, 2) damage some PHYs */ writel(mifcfg | MIF_CFG_BBMODE, gp->regs + MIF_CFG); writel(0, gp->regs + MIF_BBCLK); writel(0, gp->regs + MIF_BBDATA); writel(0, gp->regs + MIF_BBOENAB); writel(MAC_XIFCFG_GMII | MAC_XIFCFG_LBCK, gp->regs + MAC_XIFCFG); (void) readl(gp->regs + MAC_XIFCFG); } } static int gem_do_start(struct net_device *dev) { struct gem *gp = netdev_priv(dev); int rc; pci_set_master(gp->pdev); /* Init & setup chip hardware */ gem_reinit_chip(gp); /* An interrupt might come in handy */ rc = request_irq(gp->pdev->irq, gem_interrupt, IRQF_SHARED, dev->name, (void *)dev); if (rc) { netdev_err(dev, "failed to request irq !\n"); gem_reset(gp); gem_clean_rings(gp); gem_put_cell(gp); return rc; } /* Mark us as attached again if we come from resume(), this has * no effect if we weren't detached and needs to be done now. */ netif_device_attach(dev); /* Restart NAPI & queues */ gem_netif_start(gp); /* Detect & init PHY, start autoneg etc... this will * eventually result in starting DMA operations when * the link is up */ gem_init_phy(gp); return 0; } static void gem_do_stop(struct net_device *dev, int wol) { struct gem *gp = netdev_priv(dev); /* Stop NAPI and stop tx queue */ gem_netif_stop(gp); /* Make sure ints are disabled. We don't care about * synchronizing as NAPI is disabled, thus a stray * interrupt will do nothing bad (our irq handler * just schedules NAPI) */ gem_disable_ints(gp); /* Stop the link timer */ del_timer_sync(&gp->link_timer); /* We cannot cancel the reset task while holding the * rtnl lock, we'd get an A->B / B->A deadlock stituation * if we did. This is not an issue however as the reset * task is synchronized vs. us (rtnl_lock) and will do * nothing if the device is down or suspended. We do * still clear reset_task_pending to avoid a spurrious * reset later on in case we do resume before it gets * scheduled. */ gp->reset_task_pending = 0; /* If we are going to sleep with WOL */ gem_stop_dma(gp); msleep(10); if (!wol) gem_reset(gp); msleep(10); /* Get rid of rings */ gem_clean_rings(gp); /* No irq needed anymore */ free_irq(gp->pdev->irq, (void *) dev); /* Shut the PHY down eventually and setup WOL */ gem_stop_phy(gp, wol); } static void gem_reset_task(struct work_struct *work) { struct gem *gp = container_of(work, struct gem, reset_task); /* Lock out the network stack (essentially shield ourselves * against a racing open, close, control call, or suspend */ rtnl_lock(); /* Skip the reset task if suspended or closed, or if it's * been cancelled by gem_do_stop (see comment there) */ if (!netif_device_present(gp->dev) || !netif_running(gp->dev) || !gp->reset_task_pending) { rtnl_unlock(); return; } /* Stop the link timer */ del_timer_sync(&gp->link_timer); /* Stop NAPI and tx */ gem_netif_stop(gp); /* Reset the chip & rings */ gem_reinit_chip(gp); if (gp->lstate == link_up) gem_set_link_modes(gp); /* Restart NAPI and Tx */ gem_netif_start(gp); /* We are back ! */ gp->reset_task_pending = 0; /* If the link is not up, restart autoneg, else restart the * polling timer */ if (gp->lstate != link_up) gem_begin_auto_negotiation(gp, NULL); else mod_timer(&gp->link_timer, jiffies + ((12 * HZ) / 10)); rtnl_unlock(); } static int gem_open(struct net_device *dev) { struct gem *gp = netdev_priv(dev); int rc; /* We allow open while suspended, we just do nothing, * the chip will be initialized in resume() */ if (netif_device_present(dev)) { /* Enable the cell */ gem_get_cell(gp); /* Make sure PCI access and bus master are enabled */ rc = pci_enable_device(gp->pdev); if (rc) { netdev_err(dev, "Failed to enable chip on PCI bus !\n"); /* Put cell and forget it for now, it will be considered *as still asleep, a new sleep cycle may bring it back */ gem_put_cell(gp); return -ENXIO; } return gem_do_start(dev); } return 0; } static int gem_close(struct net_device *dev) { struct gem *gp = netdev_priv(dev); if (netif_device_present(dev)) { gem_do_stop(dev, 0); /* Make sure bus master is disabled */ pci_disable_device(gp->pdev); /* Cell not needed neither if no WOL */ if (!gp->asleep_wol) gem_put_cell(gp); } return 0; } static int __maybe_unused gem_suspend(struct device *dev_d) { struct net_device *dev = dev_get_drvdata(dev_d); struct gem *gp = netdev_priv(dev); /* Lock the network stack first to avoid racing with open/close, * reset task and setting calls */ rtnl_lock(); /* Not running, mark ourselves non-present, no need for * a lock here */ if (!netif_running(dev)) { netif_device_detach(dev); rtnl_unlock(); return 0; } netdev_info(dev, "suspending, WakeOnLan %s\n", (gp->wake_on_lan && netif_running(dev)) ? "enabled" : "disabled"); /* Tell the network stack we're gone. gem_do_stop() below will * synchronize with TX, stop NAPI etc... */ netif_device_detach(dev); /* Switch off chip, remember WOL setting */ gp->asleep_wol = !!gp->wake_on_lan; gem_do_stop(dev, gp->asleep_wol); /* Cell not needed neither if no WOL */ if (!gp->asleep_wol) gem_put_cell(gp); /* Unlock the network stack */ rtnl_unlock(); return 0; } static int __maybe_unused gem_resume(struct device *dev_d) { struct net_device *dev = dev_get_drvdata(dev_d); struct gem *gp = netdev_priv(dev); /* See locking comment in gem_suspend */ rtnl_lock(); /* Not running, mark ourselves present, no need for * a lock here */ if (!netif_running(dev)) { netif_device_attach(dev); rtnl_unlock(); return 0; } /* Enable the cell */ gem_get_cell(gp); /* Restart chip. If that fails there isn't much we can do, we * leave things stopped. */ gem_do_start(dev); /* If we had WOL enabled, the cell clock was never turned off during * sleep, so we end up beeing unbalanced. Fix that here */ if (gp->asleep_wol) gem_put_cell(gp); /* Unlock the network stack */ rtnl_unlock(); return 0; } static struct net_device_stats *gem_get_stats(struct net_device *dev) { struct gem *gp = netdev_priv(dev); /* I have seen this being called while the PM was in progress, * so we shield against this. Let's also not poke at registers * while the reset task is going on. * * TODO: Move stats collection elsewhere (link timer ?) and * make this a nop to avoid all those synchro issues */ if (!netif_device_present(dev) || !netif_running(dev)) goto bail; /* Better safe than sorry... */ if (WARN_ON(!gp->cell_enabled)) goto bail; dev->stats.rx_crc_errors += readl(gp->regs + MAC_FCSERR); writel(0, gp->regs + MAC_FCSERR); dev->stats.rx_frame_errors += readl(gp->regs + MAC_AERR); writel(0, gp->regs + MAC_AERR); dev->stats.rx_length_errors += readl(gp->regs + MAC_LERR); writel(0, gp->regs + MAC_LERR); dev->stats.tx_aborted_errors += readl(gp->regs + MAC_ECOLL); dev->stats.collisions += (readl(gp->regs + MAC_ECOLL) + readl(gp->regs + MAC_LCOLL)); writel(0, gp->regs + MAC_ECOLL); writel(0, gp->regs + MAC_LCOLL); bail: return &dev->stats; } static int gem_set_mac_address(struct net_device *dev, void *addr) { struct sockaddr *macaddr = (struct sockaddr *) addr; const unsigned char *e = &dev->dev_addr[0]; struct gem *gp = netdev_priv(dev); if (!is_valid_ether_addr(macaddr->sa_data)) return -EADDRNOTAVAIL; eth_hw_addr_set(dev, macaddr->sa_data); /* We'll just catch it later when the device is up'd or resumed */ if (!netif_running(dev) || !netif_device_present(dev)) return 0; /* Better safe than sorry... */ if (WARN_ON(!gp->cell_enabled)) return 0; writel((e[4] << 8) | e[5], gp->regs + MAC_ADDR0); writel((e[2] << 8) | e[3], gp->regs + MAC_ADDR1); writel((e[0] << 8) | e[1], gp->regs + MAC_ADDR2); return 0; } static void gem_set_multicast(struct net_device *dev) { struct gem *gp = netdev_priv(dev); u32 rxcfg, rxcfg_new; int limit = 10000; if (!netif_running(dev) || !netif_device_present(dev)) return; /* Better safe than sorry... */ if (gp->reset_task_pending || WARN_ON(!gp->cell_enabled)) return; rxcfg = readl(gp->regs + MAC_RXCFG); rxcfg_new = gem_setup_multicast(gp); #ifdef STRIP_FCS rxcfg_new |= MAC_RXCFG_SFCS; #endif gp->mac_rx_cfg = rxcfg_new; writel(rxcfg & ~MAC_RXCFG_ENAB, gp->regs + MAC_RXCFG); while (readl(gp->regs + MAC_RXCFG) & MAC_RXCFG_ENAB) { if (!limit--) break; udelay(10); } rxcfg &= ~(MAC_RXCFG_PROM | MAC_RXCFG_HFE); rxcfg |= rxcfg_new; writel(rxcfg, gp->regs + MAC_RXCFG); } /* Jumbo-grams don't seem to work :-( */ #define GEM_MIN_MTU ETH_MIN_MTU #if 1 #define GEM_MAX_MTU ETH_DATA_LEN #else #define GEM_MAX_MTU 9000 #endif static int gem_change_mtu(struct net_device *dev, int new_mtu) { struct gem *gp = netdev_priv(dev); dev->mtu = new_mtu; /* We'll just catch it later when the device is up'd or resumed */ if (!netif_running(dev) || !netif_device_present(dev)) return 0; /* Better safe than sorry... */ if (WARN_ON(!gp->cell_enabled)) return 0; gem_netif_stop(gp); gem_reinit_chip(gp); if (gp->lstate == link_up) gem_set_link_modes(gp); gem_netif_start(gp); return 0; } static void gem_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) { struct gem *gp = netdev_priv(dev); strscpy(info->driver, DRV_NAME, sizeof(info->driver)); strscpy(info->version, DRV_VERSION, sizeof(info->version)); strscpy(info->bus_info, pci_name(gp->pdev), sizeof(info->bus_info)); } static int gem_get_link_ksettings(struct net_device *dev, struct ethtool_link_ksettings *cmd) { struct gem *gp = netdev_priv(dev); u32 supported, advertising; if (gp->phy_type == phy_mii_mdio0 || gp->phy_type == phy_mii_mdio1) { if (gp->phy_mii.def) supported = gp->phy_mii.def->features; else supported = (SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full); /* XXX hardcoded stuff for now */ cmd->base.port = PORT_MII; cmd->base.phy_address = 0; /* XXX fixed PHYAD */ /* Return current PHY settings */ cmd->base.autoneg = gp->want_autoneg; cmd->base.speed = gp->phy_mii.speed; cmd->base.duplex = gp->phy_mii.duplex; advertising = gp->phy_mii.advertising; /* If we started with a forced mode, we don't have a default * advertise set, we need to return something sensible so * userland can re-enable autoneg properly. */ if (advertising == 0) advertising = supported; } else { // XXX PCS ? supported = (SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full | SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full | SUPPORTED_Autoneg); advertising = supported; cmd->base.speed = 0; cmd->base.duplex = 0; cmd->base.port = 0; cmd->base.phy_address = 0; cmd->base.autoneg = 0; /* serdes means usually a Fibre connector, with most fixed */ if (gp->phy_type == phy_serdes) { cmd->base.port = PORT_FIBRE; supported = (SUPPORTED_1000baseT_Half | SUPPORTED_1000baseT_Full | SUPPORTED_FIBRE | SUPPORTED_Autoneg | SUPPORTED_Pause | SUPPORTED_Asym_Pause); advertising = supported; if (gp->lstate == link_up) cmd->base.speed = SPEED_1000; cmd->base.duplex = DUPLEX_FULL; cmd->base.autoneg = 1; } } ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported, supported); ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising, advertising); return 0; } static int gem_set_link_ksettings(struct net_device *dev, const struct ethtool_link_ksettings *cmd) { struct gem *gp = netdev_priv(dev); u32 speed = cmd->base.speed; u32 advertising; ethtool_convert_link_mode_to_legacy_u32(&advertising, cmd->link_modes.advertising); /* Verify the settings we care about. */ if (cmd->base.autoneg != AUTONEG_ENABLE && cmd->base.autoneg != AUTONEG_DISABLE) return -EINVAL; if (cmd->base.autoneg == AUTONEG_ENABLE && advertising == 0) return -EINVAL; if (cmd->base.autoneg == AUTONEG_DISABLE && ((speed != SPEED_1000 && speed != SPEED_100 && speed != SPEED_10) || (cmd->base.duplex != DUPLEX_HALF && cmd->base.duplex != DUPLEX_FULL))) return -EINVAL; /* Apply settings and restart link process. */ if (netif_device_present(gp->dev)) { del_timer_sync(&gp->link_timer); gem_begin_auto_negotiation(gp, cmd); } return 0; } static int gem_nway_reset(struct net_device *dev) { struct gem *gp = netdev_priv(dev); if (!gp->want_autoneg) return -EINVAL; /* Restart link process */ if (netif_device_present(gp->dev)) { del_timer_sync(&gp->link_timer); gem_begin_auto_negotiation(gp, NULL); } return 0; } static u32 gem_get_msglevel(struct net_device *dev) { struct gem *gp = netdev_priv(dev); return gp->msg_enable; } static void gem_set_msglevel(struct net_device *dev, u32 value) { struct gem *gp = netdev_priv(dev); gp->msg_enable = value; } /* Add more when I understand how to program the chip */ /* like WAKE_UCAST | WAKE_MCAST | WAKE_BCAST */ #define WOL_SUPPORTED_MASK (WAKE_MAGIC) static void gem_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol) { struct gem *gp = netdev_priv(dev); /* Add more when I understand how to program the chip */ if (gp->has_wol) { wol->supported = WOL_SUPPORTED_MASK; wol->wolopts = gp->wake_on_lan; } else { wol->supported = 0; wol->wolopts = 0; } } static int gem_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol) { struct gem *gp = netdev_priv(dev); if (!gp->has_wol) return -EOPNOTSUPP; gp->wake_on_lan = wol->wolopts & WOL_SUPPORTED_MASK; return 0; } static const struct ethtool_ops gem_ethtool_ops = { .get_drvinfo = gem_get_drvinfo, .get_link = ethtool_op_get_link, .nway_reset = gem_nway_reset, .get_msglevel = gem_get_msglevel, .set_msglevel = gem_set_msglevel, .get_wol = gem_get_wol, .set_wol = gem_set_wol, .get_link_ksettings = gem_get_link_ksettings, .set_link_ksettings = gem_set_link_ksettings, }; static int gem_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) { struct gem *gp = netdev_priv(dev); struct mii_ioctl_data *data = if_mii(ifr); int rc = -EOPNOTSUPP; /* For SIOCGMIIREG and SIOCSMIIREG the core checks for us that * netif_device_present() is true and holds rtnl_lock for us * so we have nothing to worry about */ switch (cmd) { case SIOCGMIIPHY: /* Get address of MII PHY in use. */ data->phy_id = gp->mii_phy_addr; fallthrough; case SIOCGMIIREG: /* Read MII PHY register. */ data->val_out = __sungem_phy_read(gp, data->phy_id & 0x1f, data->reg_num & 0x1f); rc = 0; break; case SIOCSMIIREG: /* Write MII PHY register. */ __sungem_phy_write(gp, data->phy_id & 0x1f, data->reg_num & 0x1f, data->val_in); rc = 0; break; } return rc; } #if (!defined(CONFIG_SPARC) && !defined(CONFIG_PPC_PMAC)) /* Fetch MAC address from vital product data of PCI ROM. */ static int find_eth_addr_in_vpd(void __iomem *rom_base, int len, unsigned char *dev_addr) { int this_offset; for (this_offset = 0x20; this_offset < len; this_offset++) { void __iomem *p = rom_base + this_offset; int i; if (readb(p + 0) != 0x90 || readb(p + 1) != 0x00 || readb(p + 2) != 0x09 || readb(p + 3) != 0x4e || readb(p + 4) != 0x41 || readb(p + 5) != 0x06) continue; this_offset += 6; p += 6; for (i = 0; i < 6; i++) dev_addr[i] = readb(p + i); return 1; } return 0; } static void get_gem_mac_nonobp(struct pci_dev *pdev, unsigned char *dev_addr) { size_t size; void __iomem *p = pci_map_rom(pdev, &size); if (p) { int found; found = readb(p) == 0x55 && readb(p + 1) == 0xaa && find_eth_addr_in_vpd(p, (64 * 1024), dev_addr); pci_unmap_rom(pdev, p); if (found) return; } /* Sun MAC prefix then 3 random bytes. */ dev_addr[0] = 0x08; dev_addr[1] = 0x00; dev_addr[2] = 0x20; get_random_bytes(dev_addr + 3, 3); } #endif /* not Sparc and not PPC */ static int gem_get_device_address(struct gem *gp) { #if defined(CONFIG_SPARC) || defined(CONFIG_PPC_PMAC) struct net_device *dev = gp->dev; const unsigned char *addr; addr = of_get_property(gp->of_node, "local-mac-address", NULL); if (addr == NULL) { #ifdef CONFIG_SPARC addr = idprom->id_ethaddr; #else printk("\n"); pr_err("%s: can't get mac-address\n", dev->name); return -1; #endif } eth_hw_addr_set(dev, addr); #else u8 addr[ETH_ALEN]; get_gem_mac_nonobp(gp->pdev, addr); eth_hw_addr_set(gp->dev, addr); #endif return 0; } static void gem_remove_one(struct pci_dev *pdev) { struct net_device *dev = pci_get_drvdata(pdev); if (dev) { struct gem *gp = netdev_priv(dev); unregister_netdev(dev); /* Ensure reset task is truly gone */ cancel_work_sync(&gp->reset_task); /* Free resources */ dma_free_coherent(&pdev->dev, sizeof(struct gem_init_block), gp->init_block, gp->gblock_dvma); iounmap(gp->regs); pci_release_regions(pdev); free_netdev(dev); } } static const struct net_device_ops gem_netdev_ops = { .ndo_open = gem_open, .ndo_stop = gem_close, .ndo_start_xmit = gem_start_xmit, .ndo_get_stats = gem_get_stats, .ndo_set_rx_mode = gem_set_multicast, .ndo_eth_ioctl = gem_ioctl, .ndo_tx_timeout = gem_tx_timeout, .ndo_change_mtu = gem_change_mtu, .ndo_validate_addr = eth_validate_addr, .ndo_set_mac_address = gem_set_mac_address, #ifdef CONFIG_NET_POLL_CONTROLLER .ndo_poll_controller = gem_poll_controller, #endif }; static int gem_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) { unsigned long gemreg_base, gemreg_len; struct net_device *dev; struct gem *gp; int err, pci_using_dac; printk_once(KERN_INFO "%s", version); /* Apple gmac note: during probe, the chip is powered up by * the arch code to allow the code below to work (and to let * the chip be probed on the config space. It won't stay powered * up until the interface is brought up however, so we can't rely * on register configuration done at this point. */ err = pci_enable_device(pdev); if (err) { pr_err("Cannot enable MMIO operation, aborting\n"); return err; } pci_set_master(pdev); /* Configure DMA attributes. */ /* All of the GEM documentation states that 64-bit DMA addressing * is fully supported and should work just fine. However the * front end for RIO based GEMs is different and only supports * 32-bit addressing. * * For now we assume the various PPC GEMs are 32-bit only as well. */ if (pdev->vendor == PCI_VENDOR_ID_SUN && pdev->device == PCI_DEVICE_ID_SUN_GEM && !dma_set_mask(&pdev->dev, DMA_BIT_MASK(64))) { pci_using_dac = 1; } else { err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)); if (err) { pr_err("No usable DMA configuration, aborting\n"); goto err_disable_device; } pci_using_dac = 0; } gemreg_base = pci_resource_start(pdev, 0); gemreg_len = pci_resource_len(pdev, 0); if ((pci_resource_flags(pdev, 0) & IORESOURCE_IO) != 0) { pr_err("Cannot find proper PCI device base address, aborting\n"); err = -ENODEV; goto err_disable_device; } dev = alloc_etherdev(sizeof(*gp)); if (!dev) { err = -ENOMEM; goto err_disable_device; } SET_NETDEV_DEV(dev, &pdev->dev); gp = netdev_priv(dev); err = pci_request_regions(pdev, DRV_NAME); if (err) { pr_err("Cannot obtain PCI resources, aborting\n"); goto err_out_free_netdev; } gp->pdev = pdev; gp->dev = dev; gp->msg_enable = DEFAULT_MSG; timer_setup(&gp->link_timer, gem_link_timer, 0); INIT_WORK(&gp->reset_task, gem_reset_task); gp->lstate = link_down; gp->timer_ticks = 0; netif_carrier_off(dev); gp->regs = ioremap(gemreg_base, gemreg_len); if (!gp->regs) { pr_err("Cannot map device registers, aborting\n"); err = -EIO; goto err_out_free_res; } /* On Apple, we want a reference to the Open Firmware device-tree * node. We use it for clock control. */ #if defined(CONFIG_PPC_PMAC) || defined(CONFIG_SPARC) gp->of_node = pci_device_to_OF_node(pdev); #endif /* Only Apple version supports WOL afaik */ if (pdev->vendor == PCI_VENDOR_ID_APPLE) gp->has_wol = 1; /* Make sure cell is enabled */ gem_get_cell(gp); /* Make sure everything is stopped and in init state */ gem_reset(gp); /* Fill up the mii_phy structure (even if we won't use it) */ gp->phy_mii.dev = dev; gp->phy_mii.mdio_read = _sungem_phy_read; gp->phy_mii.mdio_write = _sungem_phy_write; #ifdef CONFIG_PPC_PMAC gp->phy_mii.platform_data = gp->of_node; #endif /* By default, we start with autoneg */ gp->want_autoneg = 1; /* Check fifo sizes, PHY type, etc... */ if (gem_check_invariants(gp)) { err = -ENODEV; goto err_out_iounmap; } /* It is guaranteed that the returned buffer will be at least * PAGE_SIZE aligned. */ gp->init_block = dma_alloc_coherent(&pdev->dev, sizeof(struct gem_init_block), &gp->gblock_dvma, GFP_KERNEL); if (!gp->init_block) { pr_err("Cannot allocate init block, aborting\n"); err = -ENOMEM; goto err_out_iounmap; } err = gem_get_device_address(gp); if (err) goto err_out_free_consistent; dev->netdev_ops = &gem_netdev_ops; netif_napi_add(dev, &gp->napi, gem_poll); dev->ethtool_ops = &gem_ethtool_ops; dev->watchdog_timeo = 5 * HZ; dev->dma = 0; /* Set that now, in case PM kicks in now */ pci_set_drvdata(pdev, dev); /* We can do scatter/gather and HW checksum */ dev->hw_features = NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_RXCSUM; dev->features = dev->hw_features; if (pci_using_dac) dev->features |= NETIF_F_HIGHDMA; /* MTU range: 68 - 1500 (Jumbo mode is broken) */ dev->min_mtu = GEM_MIN_MTU; dev->max_mtu = GEM_MAX_MTU; /* Register with kernel */ if (register_netdev(dev)) { pr_err("Cannot register net device, aborting\n"); err = -ENOMEM; goto err_out_free_consistent; } /* Undo the get_cell with appropriate locking (we could use * ndo_init/uninit but that would be even more clumsy imho) */ rtnl_lock(); gem_put_cell(gp); rtnl_unlock(); netdev_info(dev, "Sun GEM (PCI) 10/100/1000BaseT Ethernet %pM\n", dev->dev_addr); return 0; err_out_free_consistent: gem_remove_one(pdev); err_out_iounmap: gem_put_cell(gp); iounmap(gp->regs); err_out_free_res: pci_release_regions(pdev); err_out_free_netdev: free_netdev(dev); err_disable_device: pci_disable_device(pdev); return err; } static SIMPLE_DEV_PM_OPS(gem_pm_ops, gem_suspend, gem_resume); static struct pci_driver gem_driver = { .name = GEM_MODULE_NAME, .id_table = gem_pci_tbl, .probe = gem_init_one, .remove = gem_remove_one, .driver.pm = &gem_pm_ops, }; module_pci_driver(gem_driver);
linux-master
drivers/net/ethernet/sun/sungem.c
// SPDX-License-Identifier: GPL-2.0 /* ldmvsw.c: Sun4v LDOM Virtual Switch Driver. * * Copyright (C) 2016-2017 Oracle. All rights reserved. */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/delay.h> #include <linux/etherdevice.h> #include <linux/ethtool.h> #include <linux/highmem.h> #include <linux/if_vlan.h> #include <linux/init.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/mutex.h> #include <linux/netdevice.h> #include <linux/slab.h> #include <linux/types.h> #if defined(CONFIG_IPV6) #include <linux/icmpv6.h> #endif #include <net/ip.h> #include <net/icmp.h> #include <net/route.h> #include <asm/vio.h> #include <asm/ldc.h> /* This driver makes use of the common code in sunvnet_common.c */ #include "sunvnet_common.h" /* Length of time before we decide the hardware is hung, * and dev->tx_timeout() should be called to fix the problem. */ #define VSW_TX_TIMEOUT (10 * HZ) /* Static HW Addr used for the network interfaces representing vsw ports */ static u8 vsw_port_hwaddr[ETH_ALEN] = {0xFE, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF}; #define DRV_MODULE_NAME "ldmvsw" #define DRV_MODULE_VERSION "1.2" #define DRV_MODULE_RELDATE "March 4, 2017" static char version[] = DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")"; MODULE_AUTHOR("Oracle"); MODULE_DESCRIPTION("Sun4v LDOM Virtual Switch Driver"); MODULE_LICENSE("GPL"); MODULE_VERSION(DRV_MODULE_VERSION); /* Ordered from largest major to lowest */ static struct vio_version vsw_versions[] = { { .major = 1, .minor = 8 }, { .major = 1, .minor = 7 }, { .major = 1, .minor = 6 }, { .major = 1, .minor = 0 }, }; static void vsw_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) { strscpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver)); strscpy(info->version, DRV_MODULE_VERSION, sizeof(info->version)); } static u32 vsw_get_msglevel(struct net_device *dev) { struct vnet_port *port = netdev_priv(dev); return port->vp->msg_enable; } static void vsw_set_msglevel(struct net_device *dev, u32 value) { struct vnet_port *port = netdev_priv(dev); port->vp->msg_enable = value; } static const struct ethtool_ops vsw_ethtool_ops = { .get_drvinfo = vsw_get_drvinfo, .get_msglevel = vsw_get_msglevel, .set_msglevel = vsw_set_msglevel, .get_link = ethtool_op_get_link, }; static LIST_HEAD(vnet_list); static DEFINE_MUTEX(vnet_list_mutex); /* func arg to vnet_start_xmit_common() to get the proper tx port */ static struct vnet_port *vsw_tx_port_find(struct sk_buff *skb, struct net_device *dev) { struct vnet_port *port = netdev_priv(dev); return port; } static u16 vsw_select_queue(struct net_device *dev, struct sk_buff *skb, struct net_device *sb_dev) { struct vnet_port *port = netdev_priv(dev); if (!port) return 0; return port->q_index; } /* Wrappers to common functions */ static netdev_tx_t vsw_start_xmit(struct sk_buff *skb, struct net_device *dev) { return sunvnet_start_xmit_common(skb, dev, vsw_tx_port_find); } static void vsw_set_rx_mode(struct net_device *dev) { struct vnet_port *port = netdev_priv(dev); return sunvnet_set_rx_mode_common(dev, port->vp); } static int ldmvsw_open(struct net_device *dev) { struct vnet_port *port = netdev_priv(dev); struct vio_driver_state *vio = &port->vio; /* reset the channel */ vio_link_state_change(vio, LDC_EVENT_RESET); vnet_port_reset(port); vio_port_up(vio); return 0; } #ifdef CONFIG_NET_POLL_CONTROLLER static void vsw_poll_controller(struct net_device *dev) { struct vnet_port *port = netdev_priv(dev); return sunvnet_poll_controller_common(dev, port->vp); } #endif static const struct net_device_ops vsw_ops = { .ndo_open = ldmvsw_open, .ndo_stop = sunvnet_close_common, .ndo_set_rx_mode = vsw_set_rx_mode, .ndo_set_mac_address = sunvnet_set_mac_addr_common, .ndo_validate_addr = eth_validate_addr, .ndo_tx_timeout = sunvnet_tx_timeout_common, .ndo_start_xmit = vsw_start_xmit, .ndo_select_queue = vsw_select_queue, #ifdef CONFIG_NET_POLL_CONTROLLER .ndo_poll_controller = vsw_poll_controller, #endif }; static const char *local_mac_prop = "local-mac-address"; static const char *cfg_handle_prop = "cfg-handle"; static struct vnet *vsw_get_vnet(struct mdesc_handle *hp, u64 port_node, u64 *handle) { struct vnet *vp; struct vnet *iter; const u64 *local_mac = NULL; const u64 *cfghandle = NULL; u64 a; /* Get the parent virtual-network-switch macaddr and cfghandle */ mdesc_for_each_arc(a, hp, port_node, MDESC_ARC_TYPE_BACK) { u64 target = mdesc_arc_target(hp, a); const char *name; name = mdesc_get_property(hp, target, "name", NULL); if (!name || strcmp(name, "virtual-network-switch")) continue; local_mac = mdesc_get_property(hp, target, local_mac_prop, NULL); cfghandle = mdesc_get_property(hp, target, cfg_handle_prop, NULL); break; } if (!local_mac || !cfghandle) return ERR_PTR(-ENODEV); /* find or create associated vnet */ vp = NULL; mutex_lock(&vnet_list_mutex); list_for_each_entry(iter, &vnet_list, list) { if (iter->local_mac == *local_mac) { vp = iter; break; } } if (!vp) { vp = kzalloc(sizeof(*vp), GFP_KERNEL); if (unlikely(!vp)) { mutex_unlock(&vnet_list_mutex); return ERR_PTR(-ENOMEM); } spin_lock_init(&vp->lock); INIT_LIST_HEAD(&vp->port_list); INIT_LIST_HEAD(&vp->list); vp->local_mac = *local_mac; list_add(&vp->list, &vnet_list); } mutex_unlock(&vnet_list_mutex); *handle = (u64)*cfghandle; return vp; } static struct net_device *vsw_alloc_netdev(u8 hwaddr[], struct vio_dev *vdev, u64 handle, u64 port_id) { struct net_device *dev; struct vnet_port *port; dev = alloc_etherdev_mqs(sizeof(*port), VNET_MAX_TXQS, 1); if (!dev) return ERR_PTR(-ENOMEM); dev->needed_headroom = VNET_PACKET_SKIP + 8; dev->needed_tailroom = 8; eth_hw_addr_set(dev, hwaddr); ether_addr_copy(dev->perm_addr, dev->dev_addr); sprintf(dev->name, "vif%d.%d", (int)handle, (int)port_id); dev->netdev_ops = &vsw_ops; dev->ethtool_ops = &vsw_ethtool_ops; dev->watchdog_timeo = VSW_TX_TIMEOUT; dev->hw_features = NETIF_F_HW_CSUM | NETIF_F_SG; dev->features = dev->hw_features; /* MTU range: 68 - 65535 */ dev->min_mtu = ETH_MIN_MTU; dev->max_mtu = VNET_MAX_MTU; SET_NETDEV_DEV(dev, &vdev->dev); return dev; } static struct ldc_channel_config vsw_ldc_cfg = { .event = sunvnet_event_common, .mtu = 64, .mode = LDC_MODE_UNRELIABLE, }; static struct vio_driver_ops vsw_vio_ops = { .send_attr = sunvnet_send_attr_common, .handle_attr = sunvnet_handle_attr_common, .handshake_complete = sunvnet_handshake_complete_common, }; static const char *remote_macaddr_prop = "remote-mac-address"; static const char *id_prop = "id"; static int vsw_port_probe(struct vio_dev *vdev, const struct vio_device_id *id) { struct mdesc_handle *hp; struct vnet_port *port; unsigned long flags; struct vnet *vp; struct net_device *dev; const u64 *rmac; int len, i, err; const u64 *port_id; u64 handle; hp = mdesc_grab(); if (!hp) return -ENODEV; rmac = mdesc_get_property(hp, vdev->mp, remote_macaddr_prop, &len); err = -ENODEV; if (!rmac) { pr_err("Port lacks %s property\n", remote_macaddr_prop); mdesc_release(hp); return err; } port_id = mdesc_get_property(hp, vdev->mp, id_prop, NULL); err = -ENODEV; if (!port_id) { pr_err("Port lacks %s property\n", id_prop); mdesc_release(hp); return err; } /* Get (or create) the vnet associated with this port */ vp = vsw_get_vnet(hp, vdev->mp, &handle); if (IS_ERR(vp)) { err = PTR_ERR(vp); pr_err("Failed to get vnet for vsw-port\n"); mdesc_release(hp); return err; } mdesc_release(hp); dev = vsw_alloc_netdev(vsw_port_hwaddr, vdev, handle, *port_id); if (IS_ERR(dev)) { err = PTR_ERR(dev); pr_err("Failed to alloc netdev for vsw-port\n"); return err; } port = netdev_priv(dev); INIT_LIST_HEAD(&port->list); for (i = 0; i < ETH_ALEN; i++) port->raddr[i] = (*rmac >> (5 - i) * 8) & 0xff; port->vp = vp; port->dev = dev; port->switch_port = 1; port->tso = false; /* no tso in vsw, misbehaves in bridge */ port->tsolen = 0; /* Mark the port as belonging to ldmvsw which directs the * common code to use the net_device in the vnet_port * rather than the net_device in the vnet (which is used * by sunvnet). This bit is used by the VNET_PORT_TO_NET_DEVICE * macro. */ port->vsw = 1; err = vio_driver_init(&port->vio, vdev, VDEV_NETWORK, vsw_versions, ARRAY_SIZE(vsw_versions), &vsw_vio_ops, dev->name); if (err) goto err_out_free_dev; err = vio_ldc_alloc(&port->vio, &vsw_ldc_cfg, port); if (err) goto err_out_free_dev; dev_set_drvdata(&vdev->dev, port); netif_napi_add(dev, &port->napi, sunvnet_poll_common); spin_lock_irqsave(&vp->lock, flags); list_add_rcu(&port->list, &vp->port_list); spin_unlock_irqrestore(&vp->lock, flags); timer_setup(&port->clean_timer, sunvnet_clean_timer_expire_common, 0); err = register_netdev(dev); if (err) { pr_err("Cannot register net device, aborting\n"); goto err_out_del_timer; } spin_lock_irqsave(&vp->lock, flags); sunvnet_port_add_txq_common(port); spin_unlock_irqrestore(&vp->lock, flags); napi_enable(&port->napi); vio_port_up(&port->vio); /* assure no carrier until we receive an LDC_EVENT_UP, * even if the vsw config script tries to force us up */ netif_carrier_off(dev); netdev_info(dev, "LDOM vsw-port %pM\n", dev->dev_addr); pr_info("%s: PORT ( remote-mac %pM%s )\n", dev->name, port->raddr, " switch-port"); return 0; err_out_del_timer: del_timer_sync(&port->clean_timer); list_del_rcu(&port->list); synchronize_rcu(); netif_napi_del(&port->napi); dev_set_drvdata(&vdev->dev, NULL); vio_ldc_free(&port->vio); err_out_free_dev: free_netdev(dev); return err; } static void vsw_port_remove(struct vio_dev *vdev) { struct vnet_port *port = dev_get_drvdata(&vdev->dev); unsigned long flags; if (port) { del_timer_sync(&port->vio.timer); del_timer_sync(&port->clean_timer); napi_disable(&port->napi); unregister_netdev(port->dev); list_del_rcu(&port->list); synchronize_rcu(); spin_lock_irqsave(&port->vp->lock, flags); sunvnet_port_rm_txq_common(port); spin_unlock_irqrestore(&port->vp->lock, flags); netif_napi_del(&port->napi); sunvnet_port_free_tx_bufs_common(port); vio_ldc_free(&port->vio); dev_set_drvdata(&vdev->dev, NULL); free_netdev(port->dev); } } static void vsw_cleanup(void) { struct vnet *vp; /* just need to free up the vnet list */ mutex_lock(&vnet_list_mutex); while (!list_empty(&vnet_list)) { vp = list_first_entry(&vnet_list, struct vnet, list); list_del(&vp->list); /* vio_unregister_driver() should have cleaned up port_list */ if (!list_empty(&vp->port_list)) pr_err("Ports not removed by VIO subsystem!\n"); kfree(vp); } mutex_unlock(&vnet_list_mutex); } static const struct vio_device_id vsw_port_match[] = { { .type = "vsw-port", }, {}, }; MODULE_DEVICE_TABLE(vio, vsw_port_match); static struct vio_driver vsw_port_driver = { .id_table = vsw_port_match, .probe = vsw_port_probe, .remove = vsw_port_remove, .name = "vsw_port", }; static int __init vsw_init(void) { pr_info("%s\n", version); return vio_register_driver(&vsw_port_driver); } static void __exit vsw_exit(void) { vio_unregister_driver(&vsw_port_driver); vsw_cleanup(); } module_init(vsw_init); module_exit(vsw_exit);
linux-master
drivers/net/ethernet/sun/ldmvsw.c
// SPDX-License-Identifier: GPL-2.0 /* sunvnet.c: Sun LDOM Virtual Network Driver. * * Copyright (C) 2007, 2008 David S. Miller <[email protected]> * Copyright (C) 2016-2017 Oracle. All rights reserved. */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/module.h> #include <linux/kernel.h> #include <linux/types.h> #include <linux/slab.h> #include <linux/delay.h> #include <linux/init.h> #include <linux/netdevice.h> #include <linux/ethtool.h> #include <linux/etherdevice.h> #include <linux/mutex.h> #include <linux/highmem.h> #include <linux/if_vlan.h> #if IS_ENABLED(CONFIG_IPV6) #include <linux/icmpv6.h> #endif #include <net/ip.h> #include <net/icmp.h> #include <net/route.h> #include <asm/vio.h> #include <asm/ldc.h> #include "sunvnet_common.h" /* length of time before we decide the hardware is borked, * and dev->tx_timeout() should be called to fix the problem */ #define VNET_TX_TIMEOUT (5 * HZ) #define DRV_MODULE_NAME "sunvnet" #define DRV_MODULE_VERSION "2.0" #define DRV_MODULE_RELDATE "February 3, 2017" static char version[] = DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")"; MODULE_AUTHOR("David S. Miller ([email protected])"); MODULE_DESCRIPTION("Sun LDOM virtual network driver"); MODULE_LICENSE("GPL"); MODULE_VERSION(DRV_MODULE_VERSION); /* Ordered from largest major to lowest */ static struct vio_version vnet_versions[] = { { .major = 1, .minor = 8 }, { .major = 1, .minor = 7 }, { .major = 1, .minor = 6 }, { .major = 1, .minor = 0 }, }; static void vnet_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) { strscpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver)); strscpy(info->version, DRV_MODULE_VERSION, sizeof(info->version)); } static u32 vnet_get_msglevel(struct net_device *dev) { struct vnet *vp = netdev_priv(dev); return vp->msg_enable; } static void vnet_set_msglevel(struct net_device *dev, u32 value) { struct vnet *vp = netdev_priv(dev); vp->msg_enable = value; } static const struct { const char string[ETH_GSTRING_LEN]; } ethtool_stats_keys[] = { { "rx_packets" }, { "tx_packets" }, { "rx_bytes" }, { "tx_bytes" }, { "rx_errors" }, { "tx_errors" }, { "rx_dropped" }, { "tx_dropped" }, { "multicast" }, { "rx_length_errors" }, { "rx_frame_errors" }, { "rx_missed_errors" }, { "tx_carrier_errors" }, { "nports" }, }; static int vnet_get_sset_count(struct net_device *dev, int sset) { struct vnet *vp = (struct vnet *)netdev_priv(dev); switch (sset) { case ETH_SS_STATS: return ARRAY_SIZE(ethtool_stats_keys) + (NUM_VNET_PORT_STATS * vp->nports); default: return -EOPNOTSUPP; } } static void vnet_get_strings(struct net_device *dev, u32 stringset, u8 *buf) { struct vnet *vp = (struct vnet *)netdev_priv(dev); struct vnet_port *port; char *p = (char *)buf; switch (stringset) { case ETH_SS_STATS: memcpy(buf, &ethtool_stats_keys, sizeof(ethtool_stats_keys)); p += sizeof(ethtool_stats_keys); rcu_read_lock(); list_for_each_entry_rcu(port, &vp->port_list, list) { snprintf(p, ETH_GSTRING_LEN, "p%u.%s-%pM", port->q_index, port->switch_port ? "s" : "q", port->raddr); p += ETH_GSTRING_LEN; snprintf(p, ETH_GSTRING_LEN, "p%u.rx_packets", port->q_index); p += ETH_GSTRING_LEN; snprintf(p, ETH_GSTRING_LEN, "p%u.tx_packets", port->q_index); p += ETH_GSTRING_LEN; snprintf(p, ETH_GSTRING_LEN, "p%u.rx_bytes", port->q_index); p += ETH_GSTRING_LEN; snprintf(p, ETH_GSTRING_LEN, "p%u.tx_bytes", port->q_index); p += ETH_GSTRING_LEN; snprintf(p, ETH_GSTRING_LEN, "p%u.event_up", port->q_index); p += ETH_GSTRING_LEN; snprintf(p, ETH_GSTRING_LEN, "p%u.event_reset", port->q_index); p += ETH_GSTRING_LEN; } rcu_read_unlock(); break; default: WARN_ON(1); break; } } static void vnet_get_ethtool_stats(struct net_device *dev, struct ethtool_stats *estats, u64 *data) { struct vnet *vp = (struct vnet *)netdev_priv(dev); struct vnet_port *port; int i = 0; data[i++] = dev->stats.rx_packets; data[i++] = dev->stats.tx_packets; data[i++] = dev->stats.rx_bytes; data[i++] = dev->stats.tx_bytes; data[i++] = dev->stats.rx_errors; data[i++] = dev->stats.tx_errors; data[i++] = dev->stats.rx_dropped; data[i++] = dev->stats.tx_dropped; data[i++] = dev->stats.multicast; data[i++] = dev->stats.rx_length_errors; data[i++] = dev->stats.rx_frame_errors; data[i++] = dev->stats.rx_missed_errors; data[i++] = dev->stats.tx_carrier_errors; data[i++] = vp->nports; rcu_read_lock(); list_for_each_entry_rcu(port, &vp->port_list, list) { data[i++] = port->q_index; data[i++] = port->stats.rx_packets; data[i++] = port->stats.tx_packets; data[i++] = port->stats.rx_bytes; data[i++] = port->stats.tx_bytes; data[i++] = port->stats.event_up; data[i++] = port->stats.event_reset; } rcu_read_unlock(); } static const struct ethtool_ops vnet_ethtool_ops = { .get_drvinfo = vnet_get_drvinfo, .get_msglevel = vnet_get_msglevel, .set_msglevel = vnet_set_msglevel, .get_link = ethtool_op_get_link, .get_sset_count = vnet_get_sset_count, .get_strings = vnet_get_strings, .get_ethtool_stats = vnet_get_ethtool_stats, }; static LIST_HEAD(vnet_list); static DEFINE_MUTEX(vnet_list_mutex); static struct vnet_port *__tx_port_find(struct vnet *vp, struct sk_buff *skb) { unsigned int hash = vnet_hashfn(skb->data); struct hlist_head *hp = &vp->port_hash[hash]; struct vnet_port *port; hlist_for_each_entry_rcu(port, hp, hash) { if (!sunvnet_port_is_up_common(port)) continue; if (ether_addr_equal(port->raddr, skb->data)) return port; } list_for_each_entry_rcu(port, &vp->port_list, list) { if (!port->switch_port) continue; if (!sunvnet_port_is_up_common(port)) continue; return port; } return NULL; } /* func arg to vnet_start_xmit_common() to get the proper tx port */ static struct vnet_port *vnet_tx_port_find(struct sk_buff *skb, struct net_device *dev) { struct vnet *vp = netdev_priv(dev); return __tx_port_find(vp, skb); } static u16 vnet_select_queue(struct net_device *dev, struct sk_buff *skb, struct net_device *sb_dev) { struct vnet *vp = netdev_priv(dev); struct vnet_port *port = __tx_port_find(vp, skb); if (!port) return 0; return port->q_index; } /* Wrappers to common functions */ static netdev_tx_t vnet_start_xmit(struct sk_buff *skb, struct net_device *dev) { return sunvnet_start_xmit_common(skb, dev, vnet_tx_port_find); } static void vnet_set_rx_mode(struct net_device *dev) { struct vnet *vp = netdev_priv(dev); return sunvnet_set_rx_mode_common(dev, vp); } #ifdef CONFIG_NET_POLL_CONTROLLER static void vnet_poll_controller(struct net_device *dev) { struct vnet *vp = netdev_priv(dev); return sunvnet_poll_controller_common(dev, vp); } #endif static const struct net_device_ops vnet_ops = { .ndo_open = sunvnet_open_common, .ndo_stop = sunvnet_close_common, .ndo_set_rx_mode = vnet_set_rx_mode, .ndo_set_mac_address = sunvnet_set_mac_addr_common, .ndo_validate_addr = eth_validate_addr, .ndo_tx_timeout = sunvnet_tx_timeout_common, .ndo_start_xmit = vnet_start_xmit, .ndo_select_queue = vnet_select_queue, #ifdef CONFIG_NET_POLL_CONTROLLER .ndo_poll_controller = vnet_poll_controller, #endif }; static struct vnet *vnet_new(const u64 *local_mac, struct vio_dev *vdev) { struct net_device *dev; u8 addr[ETH_ALEN]; struct vnet *vp; int err, i; dev = alloc_etherdev_mqs(sizeof(*vp), VNET_MAX_TXQS, 1); if (!dev) return ERR_PTR(-ENOMEM); dev->needed_headroom = VNET_PACKET_SKIP + 8; dev->needed_tailroom = 8; for (i = 0; i < ETH_ALEN; i++) addr[i] = (*local_mac >> (5 - i) * 8) & 0xff; eth_hw_addr_set(dev, addr); vp = netdev_priv(dev); spin_lock_init(&vp->lock); vp->dev = dev; INIT_LIST_HEAD(&vp->port_list); for (i = 0; i < VNET_PORT_HASH_SIZE; i++) INIT_HLIST_HEAD(&vp->port_hash[i]); INIT_LIST_HEAD(&vp->list); vp->local_mac = *local_mac; dev->netdev_ops = &vnet_ops; dev->ethtool_ops = &vnet_ethtool_ops; dev->watchdog_timeo = VNET_TX_TIMEOUT; dev->hw_features = NETIF_F_TSO | NETIF_F_GSO | NETIF_F_ALL_TSO | NETIF_F_HW_CSUM | NETIF_F_SG; dev->features = dev->hw_features; /* MTU range: 68 - 65535 */ dev->min_mtu = ETH_MIN_MTU; dev->max_mtu = VNET_MAX_MTU; SET_NETDEV_DEV(dev, &vdev->dev); err = register_netdev(dev); if (err) { pr_err("Cannot register net device, aborting\n"); goto err_out_free_dev; } netdev_info(dev, "Sun LDOM vnet %pM\n", dev->dev_addr); list_add(&vp->list, &vnet_list); return vp; err_out_free_dev: free_netdev(dev); return ERR_PTR(err); } static struct vnet *vnet_find_or_create(const u64 *local_mac, struct vio_dev *vdev) { struct vnet *iter, *vp; mutex_lock(&vnet_list_mutex); vp = NULL; list_for_each_entry(iter, &vnet_list, list) { if (iter->local_mac == *local_mac) { vp = iter; break; } } if (!vp) vp = vnet_new(local_mac, vdev); mutex_unlock(&vnet_list_mutex); return vp; } static void vnet_cleanup(void) { struct vnet *vp; struct net_device *dev; mutex_lock(&vnet_list_mutex); while (!list_empty(&vnet_list)) { vp = list_first_entry(&vnet_list, struct vnet, list); list_del(&vp->list); dev = vp->dev; /* vio_unregister_driver() should have cleaned up port_list */ BUG_ON(!list_empty(&vp->port_list)); unregister_netdev(dev); free_netdev(dev); } mutex_unlock(&vnet_list_mutex); } static const char *local_mac_prop = "local-mac-address"; static struct vnet *vnet_find_parent(struct mdesc_handle *hp, u64 port_node, struct vio_dev *vdev) { const u64 *local_mac = NULL; u64 a; mdesc_for_each_arc(a, hp, port_node, MDESC_ARC_TYPE_BACK) { u64 target = mdesc_arc_target(hp, a); const char *name; name = mdesc_get_property(hp, target, "name", NULL); if (!name || strcmp(name, "network")) continue; local_mac = mdesc_get_property(hp, target, local_mac_prop, NULL); if (local_mac) break; } if (!local_mac) return ERR_PTR(-ENODEV); return vnet_find_or_create(local_mac, vdev); } static struct ldc_channel_config vnet_ldc_cfg = { .event = sunvnet_event_common, .mtu = 64, .mode = LDC_MODE_UNRELIABLE, }; static struct vio_driver_ops vnet_vio_ops = { .send_attr = sunvnet_send_attr_common, .handle_attr = sunvnet_handle_attr_common, .handshake_complete = sunvnet_handshake_complete_common, }; const char *remote_macaddr_prop = "remote-mac-address"; static int vnet_port_probe(struct vio_dev *vdev, const struct vio_device_id *id) { struct mdesc_handle *hp; struct vnet_port *port; unsigned long flags; struct vnet *vp; const u64 *rmac; int len, i, err, switch_port; hp = mdesc_grab(); if (!hp) return -ENODEV; vp = vnet_find_parent(hp, vdev->mp, vdev); if (IS_ERR(vp)) { pr_err("Cannot find port parent vnet\n"); err = PTR_ERR(vp); goto err_out_put_mdesc; } rmac = mdesc_get_property(hp, vdev->mp, remote_macaddr_prop, &len); err = -ENODEV; if (!rmac) { pr_err("Port lacks %s property\n", remote_macaddr_prop); goto err_out_put_mdesc; } port = kzalloc(sizeof(*port), GFP_KERNEL); err = -ENOMEM; if (!port) goto err_out_put_mdesc; for (i = 0; i < ETH_ALEN; i++) port->raddr[i] = (*rmac >> (5 - i) * 8) & 0xff; port->vp = vp; err = vio_driver_init(&port->vio, vdev, VDEV_NETWORK, vnet_versions, ARRAY_SIZE(vnet_versions), &vnet_vio_ops, vp->dev->name); if (err) goto err_out_free_port; err = vio_ldc_alloc(&port->vio, &vnet_ldc_cfg, port); if (err) goto err_out_free_port; netif_napi_add(port->vp->dev, &port->napi, sunvnet_poll_common); INIT_HLIST_NODE(&port->hash); INIT_LIST_HEAD(&port->list); switch_port = 0; if (mdesc_get_property(hp, vdev->mp, "switch-port", NULL)) switch_port = 1; port->switch_port = switch_port; port->tso = true; port->tsolen = 0; spin_lock_irqsave(&vp->lock, flags); if (switch_port) list_add_rcu(&port->list, &vp->port_list); else list_add_tail_rcu(&port->list, &vp->port_list); hlist_add_head_rcu(&port->hash, &vp->port_hash[vnet_hashfn(port->raddr)]); sunvnet_port_add_txq_common(port); spin_unlock_irqrestore(&vp->lock, flags); dev_set_drvdata(&vdev->dev, port); pr_info("%s: PORT ( remote-mac %pM%s )\n", vp->dev->name, port->raddr, switch_port ? " switch-port" : ""); timer_setup(&port->clean_timer, sunvnet_clean_timer_expire_common, 0); napi_enable(&port->napi); vio_port_up(&port->vio); mdesc_release(hp); return 0; err_out_free_port: kfree(port); err_out_put_mdesc: mdesc_release(hp); return err; } static void vnet_port_remove(struct vio_dev *vdev) { struct vnet_port *port = dev_get_drvdata(&vdev->dev); if (port) { del_timer_sync(&port->vio.timer); napi_disable(&port->napi); list_del_rcu(&port->list); hlist_del_rcu(&port->hash); synchronize_rcu(); timer_shutdown_sync(&port->clean_timer); sunvnet_port_rm_txq_common(port); netif_napi_del(&port->napi); sunvnet_port_free_tx_bufs_common(port); vio_ldc_free(&port->vio); dev_set_drvdata(&vdev->dev, NULL); kfree(port); } } static const struct vio_device_id vnet_port_match[] = { { .type = "vnet-port", }, {}, }; MODULE_DEVICE_TABLE(vio, vnet_port_match); static struct vio_driver vnet_port_driver = { .id_table = vnet_port_match, .probe = vnet_port_probe, .remove = vnet_port_remove, .name = "vnet_port", }; static int __init vnet_init(void) { pr_info("%s\n", version); return vio_register_driver(&vnet_port_driver); } static void __exit vnet_exit(void) { vio_unregister_driver(&vnet_port_driver); vnet_cleanup(); } module_init(vnet_init); module_exit(vnet_exit);
linux-master
drivers/net/ethernet/sun/sunvnet.c
// SPDX-License-Identifier: GPL-2.0 /* sunqe.c: Sparc QuadEthernet 10baseT SBUS card driver. * Once again I am out to prove that every ethernet * controller out there can be most efficiently programmed * if you make it look like a LANCE. * * Copyright (C) 1996, 1999, 2003, 2006, 2008 David S. Miller ([email protected]) */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/types.h> #include <linux/errno.h> #include <linux/fcntl.h> #include <linux/interrupt.h> #include <linux/ioport.h> #include <linux/in.h> #include <linux/slab.h> #include <linux/string.h> #include <linux/delay.h> #include <linux/init.h> #include <linux/crc32.h> #include <linux/netdevice.h> #include <linux/etherdevice.h> #include <linux/skbuff.h> #include <linux/ethtool.h> #include <linux/bitops.h> #include <linux/dma-mapping.h> #include <linux/of.h> #include <linux/pgtable.h> #include <linux/platform_device.h> #include <asm/io.h> #include <asm/dma.h> #include <asm/byteorder.h> #include <asm/idprom.h> #include <asm/openprom.h> #include <asm/oplib.h> #include <asm/auxio.h> #include <asm/irq.h> #include "sunqe.h" #define DRV_NAME "sunqe" #define DRV_VERSION "4.1" #define DRV_RELDATE "August 27, 2008" #define DRV_AUTHOR "David S. Miller ([email protected])" static char version[] = DRV_NAME ".c:v" DRV_VERSION " " DRV_RELDATE " " DRV_AUTHOR "\n"; MODULE_VERSION(DRV_VERSION); MODULE_AUTHOR(DRV_AUTHOR); MODULE_DESCRIPTION("Sun QuadEthernet 10baseT SBUS card driver"); MODULE_LICENSE("GPL"); static struct sunqec *root_qec_dev; static void qe_set_multicast(struct net_device *dev); #define QEC_RESET_TRIES 200 static inline int qec_global_reset(void __iomem *gregs) { int tries = QEC_RESET_TRIES; sbus_writel(GLOB_CTRL_RESET, gregs + GLOB_CTRL); while (--tries) { u32 tmp = sbus_readl(gregs + GLOB_CTRL); if (tmp & GLOB_CTRL_RESET) { udelay(20); continue; } break; } if (tries) return 0; printk(KERN_ERR "QuadEther: AIEEE cannot reset the QEC!\n"); return -1; } #define MACE_RESET_RETRIES 200 #define QE_RESET_RETRIES 200 static inline int qe_stop(struct sunqe *qep) { void __iomem *cregs = qep->qcregs; void __iomem *mregs = qep->mregs; int tries; /* Reset the MACE, then the QEC channel. */ sbus_writeb(MREGS_BCONFIG_RESET, mregs + MREGS_BCONFIG); tries = MACE_RESET_RETRIES; while (--tries) { u8 tmp = sbus_readb(mregs + MREGS_BCONFIG); if (tmp & MREGS_BCONFIG_RESET) { udelay(20); continue; } break; } if (!tries) { printk(KERN_ERR "QuadEther: AIEEE cannot reset the MACE!\n"); return -1; } sbus_writel(CREG_CTRL_RESET, cregs + CREG_CTRL); tries = QE_RESET_RETRIES; while (--tries) { u32 tmp = sbus_readl(cregs + CREG_CTRL); if (tmp & CREG_CTRL_RESET) { udelay(20); continue; } break; } if (!tries) { printk(KERN_ERR "QuadEther: Cannot reset QE channel!\n"); return -1; } return 0; } static void qe_init_rings(struct sunqe *qep) { struct qe_init_block *qb = qep->qe_block; struct sunqe_buffers *qbufs = qep->buffers; __u32 qbufs_dvma = (__u32)qep->buffers_dvma; int i; qep->rx_new = qep->rx_old = qep->tx_new = qep->tx_old = 0; memset(qb, 0, sizeof(struct qe_init_block)); memset(qbufs, 0, sizeof(struct sunqe_buffers)); for (i = 0; i < RX_RING_SIZE; i++) { qb->qe_rxd[i].rx_addr = qbufs_dvma + qebuf_offset(rx_buf, i); qb->qe_rxd[i].rx_flags = (RXD_OWN | ((RXD_PKT_SZ) & RXD_LENGTH)); } } static int qe_init(struct sunqe *qep, int from_irq) { struct sunqec *qecp = qep->parent; void __iomem *cregs = qep->qcregs; void __iomem *mregs = qep->mregs; void __iomem *gregs = qecp->gregs; const unsigned char *e = &qep->dev->dev_addr[0]; __u32 qblk_dvma = (__u32)qep->qblock_dvma; u32 tmp; int i; /* Shut it up. */ if (qe_stop(qep)) return -EAGAIN; /* Setup initial rx/tx init block pointers. */ sbus_writel(qblk_dvma + qib_offset(qe_rxd, 0), cregs + CREG_RXDS); sbus_writel(qblk_dvma + qib_offset(qe_txd, 0), cregs + CREG_TXDS); /* Enable/mask the various irq's. */ sbus_writel(0, cregs + CREG_RIMASK); sbus_writel(1, cregs + CREG_TIMASK); sbus_writel(0, cregs + CREG_QMASK); sbus_writel(CREG_MMASK_RXCOLL, cregs + CREG_MMASK); /* Setup the FIFO pointers into QEC local memory. */ tmp = qep->channel * sbus_readl(gregs + GLOB_MSIZE); sbus_writel(tmp, cregs + CREG_RXRBUFPTR); sbus_writel(tmp, cregs + CREG_RXWBUFPTR); tmp = sbus_readl(cregs + CREG_RXRBUFPTR) + sbus_readl(gregs + GLOB_RSIZE); sbus_writel(tmp, cregs + CREG_TXRBUFPTR); sbus_writel(tmp, cregs + CREG_TXWBUFPTR); /* Clear the channel collision counter. */ sbus_writel(0, cregs + CREG_CCNT); /* For 10baseT, inter frame space nor throttle seems to be necessary. */ sbus_writel(0, cregs + CREG_PIPG); /* Now dork with the AMD MACE. */ sbus_writeb(MREGS_PHYCONFIG_AUTO, mregs + MREGS_PHYCONFIG); sbus_writeb(MREGS_TXFCNTL_AUTOPAD, mregs + MREGS_TXFCNTL); sbus_writeb(0, mregs + MREGS_RXFCNTL); /* The QEC dma's the rx'd packets from local memory out to main memory, * and therefore it interrupts when the packet reception is "complete". * So don't listen for the MACE talking about it. */ sbus_writeb(MREGS_IMASK_COLL | MREGS_IMASK_RXIRQ, mregs + MREGS_IMASK); sbus_writeb(MREGS_BCONFIG_BSWAP | MREGS_BCONFIG_64TS, mregs + MREGS_BCONFIG); sbus_writeb((MREGS_FCONFIG_TXF16 | MREGS_FCONFIG_RXF32 | MREGS_FCONFIG_RFWU | MREGS_FCONFIG_TFWU), mregs + MREGS_FCONFIG); /* Only usable interface on QuadEther is twisted pair. */ sbus_writeb(MREGS_PLSCONFIG_TP, mregs + MREGS_PLSCONFIG); /* Tell MACE we are changing the ether address. */ sbus_writeb(MREGS_IACONFIG_ACHNGE | MREGS_IACONFIG_PARESET, mregs + MREGS_IACONFIG); while ((sbus_readb(mregs + MREGS_IACONFIG) & MREGS_IACONFIG_ACHNGE) != 0) barrier(); sbus_writeb(e[0], mregs + MREGS_ETHADDR); sbus_writeb(e[1], mregs + MREGS_ETHADDR); sbus_writeb(e[2], mregs + MREGS_ETHADDR); sbus_writeb(e[3], mregs + MREGS_ETHADDR); sbus_writeb(e[4], mregs + MREGS_ETHADDR); sbus_writeb(e[5], mregs + MREGS_ETHADDR); /* Clear out the address filter. */ sbus_writeb(MREGS_IACONFIG_ACHNGE | MREGS_IACONFIG_LARESET, mregs + MREGS_IACONFIG); while ((sbus_readb(mregs + MREGS_IACONFIG) & MREGS_IACONFIG_ACHNGE) != 0) barrier(); for (i = 0; i < 8; i++) sbus_writeb(0, mregs + MREGS_FILTER); /* Address changes are now complete. */ sbus_writeb(0, mregs + MREGS_IACONFIG); qe_init_rings(qep); /* Wait a little bit for the link to come up... */ mdelay(5); if (!(sbus_readb(mregs + MREGS_PHYCONFIG) & MREGS_PHYCONFIG_LTESTDIS)) { int tries = 50; while (--tries) { u8 tmp; mdelay(5); barrier(); tmp = sbus_readb(mregs + MREGS_PHYCONFIG); if ((tmp & MREGS_PHYCONFIG_LSTAT) != 0) break; } if (tries == 0) printk(KERN_NOTICE "%s: Warning, link state is down.\n", qep->dev->name); } /* Missed packet counter is cleared on a read. */ sbus_readb(mregs + MREGS_MPCNT); /* Reload multicast information, this will enable the receiver * and transmitter. */ qe_set_multicast(qep->dev); /* QEC should now start to show interrupts. */ return 0; } /* Grrr, certain error conditions completely lock up the AMD MACE, * so when we get these we _must_ reset the chip. */ static int qe_is_bolixed(struct sunqe *qep, u32 qe_status) { struct net_device *dev = qep->dev; int mace_hwbug_workaround = 0; if (qe_status & CREG_STAT_EDEFER) { printk(KERN_ERR "%s: Excessive transmit defers.\n", dev->name); dev->stats.tx_errors++; } if (qe_status & CREG_STAT_CLOSS) { printk(KERN_ERR "%s: Carrier lost, link down?\n", dev->name); dev->stats.tx_errors++; dev->stats.tx_carrier_errors++; } if (qe_status & CREG_STAT_ERETRIES) { printk(KERN_ERR "%s: Excessive transmit retries (more than 16).\n", dev->name); dev->stats.tx_errors++; mace_hwbug_workaround = 1; } if (qe_status & CREG_STAT_LCOLL) { printk(KERN_ERR "%s: Late transmit collision.\n", dev->name); dev->stats.tx_errors++; dev->stats.collisions++; mace_hwbug_workaround = 1; } if (qe_status & CREG_STAT_FUFLOW) { printk(KERN_ERR "%s: Transmit fifo underflow, driver bug.\n", dev->name); dev->stats.tx_errors++; mace_hwbug_workaround = 1; } if (qe_status & CREG_STAT_JERROR) { printk(KERN_ERR "%s: Jabber error.\n", dev->name); } if (qe_status & CREG_STAT_BERROR) { printk(KERN_ERR "%s: Babble error.\n", dev->name); } if (qe_status & CREG_STAT_CCOFLOW) { dev->stats.tx_errors += 256; dev->stats.collisions += 256; } if (qe_status & CREG_STAT_TXDERROR) { printk(KERN_ERR "%s: Transmit descriptor is bogus, driver bug.\n", dev->name); dev->stats.tx_errors++; dev->stats.tx_aborted_errors++; mace_hwbug_workaround = 1; } if (qe_status & CREG_STAT_TXLERR) { printk(KERN_ERR "%s: Transmit late error.\n", dev->name); dev->stats.tx_errors++; mace_hwbug_workaround = 1; } if (qe_status & CREG_STAT_TXPERR) { printk(KERN_ERR "%s: Transmit DMA parity error.\n", dev->name); dev->stats.tx_errors++; dev->stats.tx_aborted_errors++; mace_hwbug_workaround = 1; } if (qe_status & CREG_STAT_TXSERR) { printk(KERN_ERR "%s: Transmit DMA sbus error ack.\n", dev->name); dev->stats.tx_errors++; dev->stats.tx_aborted_errors++; mace_hwbug_workaround = 1; } if (qe_status & CREG_STAT_RCCOFLOW) { dev->stats.rx_errors += 256; dev->stats.collisions += 256; } if (qe_status & CREG_STAT_RUOFLOW) { dev->stats.rx_errors += 256; dev->stats.rx_over_errors += 256; } if (qe_status & CREG_STAT_MCOFLOW) { dev->stats.rx_errors += 256; dev->stats.rx_missed_errors += 256; } if (qe_status & CREG_STAT_RXFOFLOW) { printk(KERN_ERR "%s: Receive fifo overflow.\n", dev->name); dev->stats.rx_errors++; dev->stats.rx_over_errors++; } if (qe_status & CREG_STAT_RLCOLL) { printk(KERN_ERR "%s: Late receive collision.\n", dev->name); dev->stats.rx_errors++; dev->stats.collisions++; } if (qe_status & CREG_STAT_FCOFLOW) { dev->stats.rx_errors += 256; dev->stats.rx_frame_errors += 256; } if (qe_status & CREG_STAT_CECOFLOW) { dev->stats.rx_errors += 256; dev->stats.rx_crc_errors += 256; } if (qe_status & CREG_STAT_RXDROP) { printk(KERN_ERR "%s: Receive packet dropped.\n", dev->name); dev->stats.rx_errors++; dev->stats.rx_dropped++; dev->stats.rx_missed_errors++; } if (qe_status & CREG_STAT_RXSMALL) { printk(KERN_ERR "%s: Receive buffer too small, driver bug.\n", dev->name); dev->stats.rx_errors++; dev->stats.rx_length_errors++; } if (qe_status & CREG_STAT_RXLERR) { printk(KERN_ERR "%s: Receive late error.\n", dev->name); dev->stats.rx_errors++; mace_hwbug_workaround = 1; } if (qe_status & CREG_STAT_RXPERR) { printk(KERN_ERR "%s: Receive DMA parity error.\n", dev->name); dev->stats.rx_errors++; dev->stats.rx_missed_errors++; mace_hwbug_workaround = 1; } if (qe_status & CREG_STAT_RXSERR) { printk(KERN_ERR "%s: Receive DMA sbus error ack.\n", dev->name); dev->stats.rx_errors++; dev->stats.rx_missed_errors++; mace_hwbug_workaround = 1; } if (mace_hwbug_workaround) qe_init(qep, 1); return mace_hwbug_workaround; } /* Per-QE receive interrupt service routine. Just like on the happy meal * we receive directly into skb's with a small packet copy water mark. */ static void qe_rx(struct sunqe *qep) { struct qe_rxd *rxbase = &qep->qe_block->qe_rxd[0]; struct net_device *dev = qep->dev; struct qe_rxd *this; struct sunqe_buffers *qbufs = qep->buffers; __u32 qbufs_dvma = (__u32)qep->buffers_dvma; int elem = qep->rx_new; u32 flags; this = &rxbase[elem]; while (!((flags = this->rx_flags) & RXD_OWN)) { struct sk_buff *skb; unsigned char *this_qbuf = &qbufs->rx_buf[elem & (RX_RING_SIZE - 1)][0]; __u32 this_qbuf_dvma = qbufs_dvma + qebuf_offset(rx_buf, (elem & (RX_RING_SIZE - 1))); struct qe_rxd *end_rxd = &rxbase[(elem+RX_RING_SIZE)&(RX_RING_MAXSIZE-1)]; int len = (flags & RXD_LENGTH) - 4; /* QE adds ether FCS size to len */ /* Check for errors. */ if (len < ETH_ZLEN) { dev->stats.rx_errors++; dev->stats.rx_length_errors++; dev->stats.rx_dropped++; } else { skb = netdev_alloc_skb(dev, len + 2); if (skb == NULL) { dev->stats.rx_dropped++; } else { skb_reserve(skb, 2); skb_put(skb, len); skb_copy_to_linear_data(skb, this_qbuf, len); skb->protocol = eth_type_trans(skb, qep->dev); netif_rx(skb); dev->stats.rx_packets++; dev->stats.rx_bytes += len; } } end_rxd->rx_addr = this_qbuf_dvma; end_rxd->rx_flags = (RXD_OWN | ((RXD_PKT_SZ) & RXD_LENGTH)); elem = NEXT_RX(elem); this = &rxbase[elem]; } qep->rx_new = elem; } static void qe_tx_reclaim(struct sunqe *qep); /* Interrupts for all QE's get filtered out via the QEC master controller, * so we just run through each qe and check to see who is signaling * and thus needs to be serviced. */ static irqreturn_t qec_interrupt(int irq, void *dev_id) { struct sunqec *qecp = dev_id; u32 qec_status; int channel = 0; /* Latch the status now. */ qec_status = sbus_readl(qecp->gregs + GLOB_STAT); while (channel < 4) { if (qec_status & 0xf) { struct sunqe *qep = qecp->qes[channel]; u32 qe_status; qe_status = sbus_readl(qep->qcregs + CREG_STAT); if (qe_status & CREG_STAT_ERRORS) { if (qe_is_bolixed(qep, qe_status)) goto next; } if (qe_status & CREG_STAT_RXIRQ) qe_rx(qep); if (netif_queue_stopped(qep->dev) && (qe_status & CREG_STAT_TXIRQ)) { spin_lock(&qep->lock); qe_tx_reclaim(qep); if (TX_BUFFS_AVAIL(qep) > 0) { /* Wake net queue and return to * lazy tx reclaim. */ netif_wake_queue(qep->dev); sbus_writel(1, qep->qcregs + CREG_TIMASK); } spin_unlock(&qep->lock); } next: ; } qec_status >>= 4; channel++; } return IRQ_HANDLED; } static int qe_open(struct net_device *dev) { struct sunqe *qep = netdev_priv(dev); qep->mconfig = (MREGS_MCONFIG_TXENAB | MREGS_MCONFIG_RXENAB | MREGS_MCONFIG_MBAENAB); return qe_init(qep, 0); } static int qe_close(struct net_device *dev) { struct sunqe *qep = netdev_priv(dev); qe_stop(qep); return 0; } /* Reclaim TX'd frames from the ring. This must always run under * the IRQ protected qep->lock. */ static void qe_tx_reclaim(struct sunqe *qep) { struct qe_txd *txbase = &qep->qe_block->qe_txd[0]; int elem = qep->tx_old; while (elem != qep->tx_new) { u32 flags = txbase[elem].tx_flags; if (flags & TXD_OWN) break; elem = NEXT_TX(elem); } qep->tx_old = elem; } static void qe_tx_timeout(struct net_device *dev, unsigned int txqueue) { struct sunqe *qep = netdev_priv(dev); int tx_full; spin_lock_irq(&qep->lock); /* Try to reclaim, if that frees up some tx * entries, we're fine. */ qe_tx_reclaim(qep); tx_full = TX_BUFFS_AVAIL(qep) <= 0; spin_unlock_irq(&qep->lock); if (! tx_full) goto out; printk(KERN_ERR "%s: transmit timed out, resetting\n", dev->name); qe_init(qep, 1); out: netif_wake_queue(dev); } /* Get a packet queued to go onto the wire. */ static netdev_tx_t qe_start_xmit(struct sk_buff *skb, struct net_device *dev) { struct sunqe *qep = netdev_priv(dev); struct sunqe_buffers *qbufs = qep->buffers; __u32 txbuf_dvma, qbufs_dvma = (__u32)qep->buffers_dvma; unsigned char *txbuf; int len, entry; spin_lock_irq(&qep->lock); qe_tx_reclaim(qep); len = skb->len; entry = qep->tx_new; txbuf = &qbufs->tx_buf[entry & (TX_RING_SIZE - 1)][0]; txbuf_dvma = qbufs_dvma + qebuf_offset(tx_buf, (entry & (TX_RING_SIZE - 1))); /* Avoid a race... */ qep->qe_block->qe_txd[entry].tx_flags = TXD_UPDATE; skb_copy_from_linear_data(skb, txbuf, len); qep->qe_block->qe_txd[entry].tx_addr = txbuf_dvma; qep->qe_block->qe_txd[entry].tx_flags = (TXD_OWN | TXD_SOP | TXD_EOP | (len & TXD_LENGTH)); qep->tx_new = NEXT_TX(entry); /* Get it going. */ sbus_writel(CREG_CTRL_TWAKEUP, qep->qcregs + CREG_CTRL); dev->stats.tx_packets++; dev->stats.tx_bytes += len; if (TX_BUFFS_AVAIL(qep) <= 0) { /* Halt the net queue and enable tx interrupts. * When the tx queue empties the tx irq handler * will wake up the queue and return us back to * the lazy tx reclaim scheme. */ netif_stop_queue(dev); sbus_writel(0, qep->qcregs + CREG_TIMASK); } spin_unlock_irq(&qep->lock); dev_kfree_skb(skb); return NETDEV_TX_OK; } static void qe_set_multicast(struct net_device *dev) { struct sunqe *qep = netdev_priv(dev); struct netdev_hw_addr *ha; u8 new_mconfig = qep->mconfig; int i; u32 crc; /* Lock out others. */ netif_stop_queue(dev); if ((dev->flags & IFF_ALLMULTI) || (netdev_mc_count(dev) > 64)) { sbus_writeb(MREGS_IACONFIG_ACHNGE | MREGS_IACONFIG_LARESET, qep->mregs + MREGS_IACONFIG); while ((sbus_readb(qep->mregs + MREGS_IACONFIG) & MREGS_IACONFIG_ACHNGE) != 0) barrier(); for (i = 0; i < 8; i++) sbus_writeb(0xff, qep->mregs + MREGS_FILTER); sbus_writeb(0, qep->mregs + MREGS_IACONFIG); } else if (dev->flags & IFF_PROMISC) { new_mconfig |= MREGS_MCONFIG_PROMISC; } else { u16 hash_table[4]; u8 *hbytes = (unsigned char *) &hash_table[0]; memset(hash_table, 0, sizeof(hash_table)); netdev_for_each_mc_addr(ha, dev) { crc = ether_crc_le(6, ha->addr); crc >>= 26; hash_table[crc >> 4] |= 1 << (crc & 0xf); } /* Program the qe with the new filter value. */ sbus_writeb(MREGS_IACONFIG_ACHNGE | MREGS_IACONFIG_LARESET, qep->mregs + MREGS_IACONFIG); while ((sbus_readb(qep->mregs + MREGS_IACONFIG) & MREGS_IACONFIG_ACHNGE) != 0) barrier(); for (i = 0; i < 8; i++) { u8 tmp = *hbytes++; sbus_writeb(tmp, qep->mregs + MREGS_FILTER); } sbus_writeb(0, qep->mregs + MREGS_IACONFIG); } /* Any change of the logical address filter, the physical address, * or enabling/disabling promiscuous mode causes the MACE to disable * the receiver. So we must re-enable them here or else the MACE * refuses to listen to anything on the network. Sheesh, took * me a day or two to find this bug. */ qep->mconfig = new_mconfig; sbus_writeb(qep->mconfig, qep->mregs + MREGS_MCONFIG); /* Let us get going again. */ netif_wake_queue(dev); } /* Ethtool support... */ static void qe_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) { const struct linux_prom_registers *regs; struct sunqe *qep = netdev_priv(dev); struct platform_device *op; strscpy(info->driver, "sunqe", sizeof(info->driver)); strscpy(info->version, "3.0", sizeof(info->version)); op = qep->op; regs = of_get_property(op->dev.of_node, "reg", NULL); if (regs) snprintf(info->bus_info, sizeof(info->bus_info), "SBUS:%d", regs->which_io); } static u32 qe_get_link(struct net_device *dev) { struct sunqe *qep = netdev_priv(dev); void __iomem *mregs = qep->mregs; u8 phyconfig; spin_lock_irq(&qep->lock); phyconfig = sbus_readb(mregs + MREGS_PHYCONFIG); spin_unlock_irq(&qep->lock); return phyconfig & MREGS_PHYCONFIG_LSTAT; } static const struct ethtool_ops qe_ethtool_ops = { .get_drvinfo = qe_get_drvinfo, .get_link = qe_get_link, }; /* This is only called once at boot time for each card probed. */ static void qec_init_once(struct sunqec *qecp, struct platform_device *op) { u8 bsizes = qecp->qec_bursts; if (sbus_can_burst64() && (bsizes & DMA_BURST64)) { sbus_writel(GLOB_CTRL_B64, qecp->gregs + GLOB_CTRL); } else if (bsizes & DMA_BURST32) { sbus_writel(GLOB_CTRL_B32, qecp->gregs + GLOB_CTRL); } else { sbus_writel(GLOB_CTRL_B16, qecp->gregs + GLOB_CTRL); } /* Packetsize only used in 100baseT BigMAC configurations, * set it to zero just to be on the safe side. */ sbus_writel(GLOB_PSIZE_2048, qecp->gregs + GLOB_PSIZE); /* Set the local memsize register, divided up to one piece per QE channel. */ sbus_writel((resource_size(&op->resource[1]) >> 2), qecp->gregs + GLOB_MSIZE); /* Divide up the local QEC memory amongst the 4 QE receiver and * transmitter FIFOs. Basically it is (total / 2 / num_channels). */ sbus_writel((resource_size(&op->resource[1]) >> 2) >> 1, qecp->gregs + GLOB_TSIZE); sbus_writel((resource_size(&op->resource[1]) >> 2) >> 1, qecp->gregs + GLOB_RSIZE); } static u8 qec_get_burst(struct device_node *dp) { u8 bsizes, bsizes_more; /* Find and set the burst sizes for the QEC, since it * does the actual dma for all 4 channels. */ bsizes = of_getintprop_default(dp, "burst-sizes", 0xff); bsizes &= 0xff; bsizes_more = of_getintprop_default(dp->parent, "burst-sizes", 0xff); if (bsizes_more != 0xff) bsizes &= bsizes_more; if (bsizes == 0xff || (bsizes & DMA_BURST16) == 0 || (bsizes & DMA_BURST32)==0) bsizes = (DMA_BURST32 - 1); return bsizes; } static struct sunqec *get_qec(struct platform_device *child) { struct platform_device *op = to_platform_device(child->dev.parent); struct sunqec *qecp; qecp = platform_get_drvdata(op); if (!qecp) { qecp = kzalloc(sizeof(struct sunqec), GFP_KERNEL); if (qecp) { u32 ctrl; qecp->op = op; qecp->gregs = of_ioremap(&op->resource[0], 0, GLOB_REG_SIZE, "QEC Global Registers"); if (!qecp->gregs) goto fail; /* Make sure the QEC is in MACE mode. */ ctrl = sbus_readl(qecp->gregs + GLOB_CTRL); ctrl &= 0xf0000000; if (ctrl != GLOB_CTRL_MMODE) { printk(KERN_ERR "qec: Not in MACE mode!\n"); goto fail; } if (qec_global_reset(qecp->gregs)) goto fail; qecp->qec_bursts = qec_get_burst(op->dev.of_node); qec_init_once(qecp, op); if (request_irq(op->archdata.irqs[0], qec_interrupt, IRQF_SHARED, "qec", (void *) qecp)) { printk(KERN_ERR "qec: Can't register irq.\n"); goto fail; } platform_set_drvdata(op, qecp); qecp->next_module = root_qec_dev; root_qec_dev = qecp; } } return qecp; fail: if (qecp->gregs) of_iounmap(&op->resource[0], qecp->gregs, GLOB_REG_SIZE); kfree(qecp); return NULL; } static const struct net_device_ops qec_ops = { .ndo_open = qe_open, .ndo_stop = qe_close, .ndo_start_xmit = qe_start_xmit, .ndo_set_rx_mode = qe_set_multicast, .ndo_tx_timeout = qe_tx_timeout, .ndo_set_mac_address = eth_mac_addr, .ndo_validate_addr = eth_validate_addr, }; static int qec_ether_init(struct platform_device *op) { static unsigned version_printed; struct net_device *dev; struct sunqec *qecp; struct sunqe *qe; int i, res; if (version_printed++ == 0) printk(KERN_INFO "%s", version); dev = alloc_etherdev(sizeof(struct sunqe)); if (!dev) return -ENOMEM; eth_hw_addr_set(dev, idprom->id_ethaddr); qe = netdev_priv(dev); res = -ENODEV; i = of_getintprop_default(op->dev.of_node, "channel#", -1); if (i == -1) goto fail; qe->channel = i; spin_lock_init(&qe->lock); qecp = get_qec(op); if (!qecp) goto fail; qecp->qes[qe->channel] = qe; qe->dev = dev; qe->parent = qecp; qe->op = op; res = -ENOMEM; qe->qcregs = of_ioremap(&op->resource[0], 0, CREG_REG_SIZE, "QEC Channel Registers"); if (!qe->qcregs) { printk(KERN_ERR "qe: Cannot map channel registers.\n"); goto fail; } qe->mregs = of_ioremap(&op->resource[1], 0, MREGS_REG_SIZE, "QE MACE Registers"); if (!qe->mregs) { printk(KERN_ERR "qe: Cannot map MACE registers.\n"); goto fail; } qe->qe_block = dma_alloc_coherent(&op->dev, PAGE_SIZE, &qe->qblock_dvma, GFP_ATOMIC); qe->buffers = dma_alloc_coherent(&op->dev, sizeof(struct sunqe_buffers), &qe->buffers_dvma, GFP_ATOMIC); if (qe->qe_block == NULL || qe->qblock_dvma == 0 || qe->buffers == NULL || qe->buffers_dvma == 0) goto fail; /* Stop this QE. */ qe_stop(qe); SET_NETDEV_DEV(dev, &op->dev); dev->watchdog_timeo = 5*HZ; dev->irq = op->archdata.irqs[0]; dev->dma = 0; dev->ethtool_ops = &qe_ethtool_ops; dev->netdev_ops = &qec_ops; res = register_netdev(dev); if (res) goto fail; platform_set_drvdata(op, qe); printk(KERN_INFO "%s: qe channel[%d] %pM\n", dev->name, qe->channel, dev->dev_addr); return 0; fail: if (qe->qcregs) of_iounmap(&op->resource[0], qe->qcregs, CREG_REG_SIZE); if (qe->mregs) of_iounmap(&op->resource[1], qe->mregs, MREGS_REG_SIZE); if (qe->qe_block) dma_free_coherent(&op->dev, PAGE_SIZE, qe->qe_block, qe->qblock_dvma); if (qe->buffers) dma_free_coherent(&op->dev, sizeof(struct sunqe_buffers), qe->buffers, qe->buffers_dvma); free_netdev(dev); return res; } static int qec_sbus_probe(struct platform_device *op) { return qec_ether_init(op); } static int qec_sbus_remove(struct platform_device *op) { struct sunqe *qp = platform_get_drvdata(op); struct net_device *net_dev = qp->dev; unregister_netdev(net_dev); of_iounmap(&op->resource[0], qp->qcregs, CREG_REG_SIZE); of_iounmap(&op->resource[1], qp->mregs, MREGS_REG_SIZE); dma_free_coherent(&op->dev, PAGE_SIZE, qp->qe_block, qp->qblock_dvma); dma_free_coherent(&op->dev, sizeof(struct sunqe_buffers), qp->buffers, qp->buffers_dvma); free_netdev(net_dev); return 0; } static const struct of_device_id qec_sbus_match[] = { { .name = "qe", }, {}, }; MODULE_DEVICE_TABLE(of, qec_sbus_match); static struct platform_driver qec_sbus_driver = { .driver = { .name = "qec", .of_match_table = qec_sbus_match, }, .probe = qec_sbus_probe, .remove = qec_sbus_remove, }; static int __init qec_init(void) { return platform_driver_register(&qec_sbus_driver); } static void __exit qec_exit(void) { platform_driver_unregister(&qec_sbus_driver); while (root_qec_dev) { struct sunqec *next = root_qec_dev->next_module; struct platform_device *op = root_qec_dev->op; free_irq(op->archdata.irqs[0], (void *) root_qec_dev); of_iounmap(&op->resource[0], root_qec_dev->gregs, GLOB_REG_SIZE); kfree(root_qec_dev); root_qec_dev = next; } } module_init(qec_init); module_exit(qec_exit);
linux-master
drivers/net/ethernet/sun/sunqe.c
// SPDX-License-Identifier: GPL-2.0+ /* cassini.c: Sun Microsystems Cassini(+) ethernet driver. * * Copyright (C) 2004 Sun Microsystems Inc. * Copyright (C) 2003 Adrian Sun ([email protected]) * * This driver uses the sungem driver (c) David Miller * ([email protected]) as its basis. * * The cassini chip has a number of features that distinguish it from * the gem chip: * 4 transmit descriptor rings that are used for either QoS (VLAN) or * load balancing (non-VLAN mode) * batching of multiple packets * multiple CPU dispatching * page-based RX descriptor engine with separate completion rings * Gigabit support (GMII and PCS interface) * MIF link up/down detection works * * RX is handled by page sized buffers that are attached as fragments to * the skb. here's what's done: * -- driver allocates pages at a time and keeps reference counts * on them. * -- the upper protocol layers assume that the header is in the skb * itself. as a result, cassini will copy a small amount (64 bytes) * to make them happy. * -- driver appends the rest of the data pages as frags to skbuffs * and increments the reference count * -- on page reclamation, the driver swaps the page with a spare page. * if that page is still in use, it frees its reference to that page, * and allocates a new page for use. otherwise, it just recycles the * page. * * NOTE: cassini can parse the header. however, it's not worth it * as long as the network stack requires a header copy. * * TX has 4 queues. currently these queues are used in a round-robin * fashion for load balancing. They can also be used for QoS. for that * to work, however, QoS information needs to be exposed down to the driver * level so that subqueues get targeted to particular transmit rings. * alternatively, the queues can be configured via use of the all-purpose * ioctl. * * RX DATA: the rx completion ring has all the info, but the rx desc * ring has all of the data. RX can conceivably come in under multiple * interrupts, but the INT# assignment needs to be set up properly by * the BIOS and conveyed to the driver. PCI BIOSes don't know how to do * that. also, the two descriptor rings are designed to distinguish between * encrypted and non-encrypted packets, but we use them for buffering * instead. * * by default, the selective clear mask is set up to process rx packets. */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/module.h> #include <linux/kernel.h> #include <linux/types.h> #include <linux/compiler.h> #include <linux/slab.h> #include <linux/delay.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/vmalloc.h> #include <linux/ioport.h> #include <linux/pci.h> #include <linux/mm.h> #include <linux/highmem.h> #include <linux/list.h> #include <linux/dma-mapping.h> #include <linux/netdevice.h> #include <linux/etherdevice.h> #include <linux/skbuff.h> #include <linux/ethtool.h> #include <linux/crc32.h> #include <linux/random.h> #include <linux/mii.h> #include <linux/ip.h> #include <linux/tcp.h> #include <linux/mutex.h> #include <linux/firmware.h> #include <net/checksum.h> #include <linux/atomic.h> #include <asm/io.h> #include <asm/byteorder.h> #include <linux/uaccess.h> #include <linux/jiffies.h> #define CAS_NCPUS num_online_cpus() #define cas_skb_release(x) netif_rx(x) /* select which firmware to use */ #define USE_HP_WORKAROUND #define HP_WORKAROUND_DEFAULT /* select which firmware to use as default */ #define CAS_HP_ALT_FIRMWARE cas_prog_null /* alternate firmware */ #include "cassini.h" #define USE_TX_COMPWB /* use completion writeback registers */ #define USE_CSMA_CD_PROTO /* standard CSMA/CD */ #define USE_RX_BLANK /* hw interrupt mitigation */ #undef USE_ENTROPY_DEV /* don't test for entropy device */ /* NOTE: these aren't useable unless PCI interrupts can be assigned. * also, we need to make cp->lock finer-grained. */ #undef USE_PCI_INTB #undef USE_PCI_INTC #undef USE_PCI_INTD #undef USE_QOS #undef USE_VPD_DEBUG /* debug vpd information if defined */ /* rx processing options */ #define USE_PAGE_ORDER /* specify to allocate large rx pages */ #define RX_DONT_BATCH 0 /* if 1, don't batch flows */ #define RX_COPY_ALWAYS 0 /* if 0, use frags */ #define RX_COPY_MIN 64 /* copy a little to make upper layers happy */ #undef RX_COUNT_BUFFERS /* define to calculate RX buffer stats */ #define DRV_MODULE_NAME "cassini" #define DRV_MODULE_VERSION "1.6" #define DRV_MODULE_RELDATE "21 May 2008" #define CAS_DEF_MSG_ENABLE \ (NETIF_MSG_DRV | \ NETIF_MSG_PROBE | \ NETIF_MSG_LINK | \ NETIF_MSG_TIMER | \ NETIF_MSG_IFDOWN | \ NETIF_MSG_IFUP | \ NETIF_MSG_RX_ERR | \ NETIF_MSG_TX_ERR) /* length of time before we decide the hardware is borked, * and dev->tx_timeout() should be called to fix the problem */ #define CAS_TX_TIMEOUT (HZ) #define CAS_LINK_TIMEOUT (22*HZ/10) #define CAS_LINK_FAST_TIMEOUT (1) /* timeout values for state changing. these specify the number * of 10us delays to be used before giving up. */ #define STOP_TRIES_PHY 1000 #define STOP_TRIES 5000 /* specify a minimum frame size to deal with some fifo issues * max mtu == 2 * page size - ethernet header - 64 - swivel = * 2 * page_size - 0x50 */ #define CAS_MIN_FRAME 97 #define CAS_1000MB_MIN_FRAME 255 #define CAS_MIN_MTU 60 #define CAS_MAX_MTU min(((cp->page_size << 1) - 0x50), 9000) #if 1 /* * Eliminate these and use separate atomic counters for each, to * avoid a race condition. */ #else #define CAS_RESET_MTU 1 #define CAS_RESET_ALL 2 #define CAS_RESET_SPARE 3 #endif static char version[] = DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n"; static int cassini_debug = -1; /* -1 == use CAS_DEF_MSG_ENABLE as value */ static int link_mode; MODULE_AUTHOR("Adrian Sun ([email protected])"); MODULE_DESCRIPTION("Sun Cassini(+) ethernet driver"); MODULE_LICENSE("GPL"); MODULE_FIRMWARE("sun/cassini.bin"); module_param(cassini_debug, int, 0); MODULE_PARM_DESC(cassini_debug, "Cassini bitmapped debugging message enable value"); module_param(link_mode, int, 0); MODULE_PARM_DESC(link_mode, "default link mode"); /* * Work around for a PCS bug in which the link goes down due to the chip * being confused and never showing a link status of "up." */ #define DEFAULT_LINKDOWN_TIMEOUT 5 /* * Value in seconds, for user input. */ static int linkdown_timeout = DEFAULT_LINKDOWN_TIMEOUT; module_param(linkdown_timeout, int, 0); MODULE_PARM_DESC(linkdown_timeout, "min reset interval in sec. for PCS linkdown issue; disabled if not positive"); /* * value in 'ticks' (units used by jiffies). Set when we init the * module because 'HZ' in actually a function call on some flavors of * Linux. This will default to DEFAULT_LINKDOWN_TIMEOUT * HZ. */ static int link_transition_timeout; static u16 link_modes[] = { BMCR_ANENABLE, /* 0 : autoneg */ 0, /* 1 : 10bt half duplex */ BMCR_SPEED100, /* 2 : 100bt half duplex */ BMCR_FULLDPLX, /* 3 : 10bt full duplex */ BMCR_SPEED100|BMCR_FULLDPLX, /* 4 : 100bt full duplex */ CAS_BMCR_SPEED1000|BMCR_FULLDPLX /* 5 : 1000bt full duplex */ }; static const struct pci_device_id cas_pci_tbl[] = { { PCI_VENDOR_ID_SUN, PCI_DEVICE_ID_SUN_CASSINI, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, { PCI_VENDOR_ID_NS, PCI_DEVICE_ID_NS_SATURN, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, { 0, } }; MODULE_DEVICE_TABLE(pci, cas_pci_tbl); static void cas_set_link_modes(struct cas *cp); static inline void cas_lock_tx(struct cas *cp) { int i; for (i = 0; i < N_TX_RINGS; i++) spin_lock_nested(&cp->tx_lock[i], i); } /* WTZ: QA was finding deadlock problems with the previous * versions after long test runs with multiple cards per machine. * See if replacing cas_lock_all with safer versions helps. The * symptoms QA is reporting match those we'd expect if interrupts * aren't being properly restored, and we fixed a previous deadlock * with similar symptoms by using save/restore versions in other * places. */ #define cas_lock_all_save(cp, flags) \ do { \ struct cas *xxxcp = (cp); \ spin_lock_irqsave(&xxxcp->lock, flags); \ cas_lock_tx(xxxcp); \ } while (0) static inline void cas_unlock_tx(struct cas *cp) { int i; for (i = N_TX_RINGS; i > 0; i--) spin_unlock(&cp->tx_lock[i - 1]); } #define cas_unlock_all_restore(cp, flags) \ do { \ struct cas *xxxcp = (cp); \ cas_unlock_tx(xxxcp); \ spin_unlock_irqrestore(&xxxcp->lock, flags); \ } while (0) static void cas_disable_irq(struct cas *cp, const int ring) { /* Make sure we won't get any more interrupts */ if (ring == 0) { writel(0xFFFFFFFF, cp->regs + REG_INTR_MASK); return; } /* disable completion interrupts and selectively mask */ if (cp->cas_flags & CAS_FLAG_REG_PLUS) { switch (ring) { #if defined (USE_PCI_INTB) || defined(USE_PCI_INTC) || defined(USE_PCI_INTD) #ifdef USE_PCI_INTB case 1: #endif #ifdef USE_PCI_INTC case 2: #endif #ifdef USE_PCI_INTD case 3: #endif writel(INTRN_MASK_CLEAR_ALL | INTRN_MASK_RX_EN, cp->regs + REG_PLUS_INTRN_MASK(ring)); break; #endif default: writel(INTRN_MASK_CLEAR_ALL, cp->regs + REG_PLUS_INTRN_MASK(ring)); break; } } } static inline void cas_mask_intr(struct cas *cp) { int i; for (i = 0; i < N_RX_COMP_RINGS; i++) cas_disable_irq(cp, i); } static void cas_enable_irq(struct cas *cp, const int ring) { if (ring == 0) { /* all but TX_DONE */ writel(INTR_TX_DONE, cp->regs + REG_INTR_MASK); return; } if (cp->cas_flags & CAS_FLAG_REG_PLUS) { switch (ring) { #if defined (USE_PCI_INTB) || defined(USE_PCI_INTC) || defined(USE_PCI_INTD) #ifdef USE_PCI_INTB case 1: #endif #ifdef USE_PCI_INTC case 2: #endif #ifdef USE_PCI_INTD case 3: #endif writel(INTRN_MASK_RX_EN, cp->regs + REG_PLUS_INTRN_MASK(ring)); break; #endif default: break; } } } static inline void cas_unmask_intr(struct cas *cp) { int i; for (i = 0; i < N_RX_COMP_RINGS; i++) cas_enable_irq(cp, i); } static inline void cas_entropy_gather(struct cas *cp) { #ifdef USE_ENTROPY_DEV if ((cp->cas_flags & CAS_FLAG_ENTROPY_DEV) == 0) return; batch_entropy_store(readl(cp->regs + REG_ENTROPY_IV), readl(cp->regs + REG_ENTROPY_IV), sizeof(uint64_t)*8); #endif } static inline void cas_entropy_reset(struct cas *cp) { #ifdef USE_ENTROPY_DEV if ((cp->cas_flags & CAS_FLAG_ENTROPY_DEV) == 0) return; writel(BIM_LOCAL_DEV_PAD | BIM_LOCAL_DEV_PROM | BIM_LOCAL_DEV_EXT, cp->regs + REG_BIM_LOCAL_DEV_EN); writeb(ENTROPY_RESET_STC_MODE, cp->regs + REG_ENTROPY_RESET); writeb(0x55, cp->regs + REG_ENTROPY_RAND_REG); /* if we read back 0x0, we don't have an entropy device */ if (readb(cp->regs + REG_ENTROPY_RAND_REG) == 0) cp->cas_flags &= ~CAS_FLAG_ENTROPY_DEV; #endif } /* access to the phy. the following assumes that we've initialized the MIF to * be in frame rather than bit-bang mode */ static u16 cas_phy_read(struct cas *cp, int reg) { u32 cmd; int limit = STOP_TRIES_PHY; cmd = MIF_FRAME_ST | MIF_FRAME_OP_READ; cmd |= CAS_BASE(MIF_FRAME_PHY_ADDR, cp->phy_addr); cmd |= CAS_BASE(MIF_FRAME_REG_ADDR, reg); cmd |= MIF_FRAME_TURN_AROUND_MSB; writel(cmd, cp->regs + REG_MIF_FRAME); /* poll for completion */ while (limit-- > 0) { udelay(10); cmd = readl(cp->regs + REG_MIF_FRAME); if (cmd & MIF_FRAME_TURN_AROUND_LSB) return cmd & MIF_FRAME_DATA_MASK; } return 0xFFFF; /* -1 */ } static int cas_phy_write(struct cas *cp, int reg, u16 val) { int limit = STOP_TRIES_PHY; u32 cmd; cmd = MIF_FRAME_ST | MIF_FRAME_OP_WRITE; cmd |= CAS_BASE(MIF_FRAME_PHY_ADDR, cp->phy_addr); cmd |= CAS_BASE(MIF_FRAME_REG_ADDR, reg); cmd |= MIF_FRAME_TURN_AROUND_MSB; cmd |= val & MIF_FRAME_DATA_MASK; writel(cmd, cp->regs + REG_MIF_FRAME); /* poll for completion */ while (limit-- > 0) { udelay(10); cmd = readl(cp->regs + REG_MIF_FRAME); if (cmd & MIF_FRAME_TURN_AROUND_LSB) return 0; } return -1; } static void cas_phy_powerup(struct cas *cp) { u16 ctl = cas_phy_read(cp, MII_BMCR); if ((ctl & BMCR_PDOWN) == 0) return; ctl &= ~BMCR_PDOWN; cas_phy_write(cp, MII_BMCR, ctl); } static void cas_phy_powerdown(struct cas *cp) { u16 ctl = cas_phy_read(cp, MII_BMCR); if (ctl & BMCR_PDOWN) return; ctl |= BMCR_PDOWN; cas_phy_write(cp, MII_BMCR, ctl); } /* cp->lock held. note: the last put_page will free the buffer */ static int cas_page_free(struct cas *cp, cas_page_t *page) { dma_unmap_page(&cp->pdev->dev, page->dma_addr, cp->page_size, DMA_FROM_DEVICE); __free_pages(page->buffer, cp->page_order); kfree(page); return 0; } #ifdef RX_COUNT_BUFFERS #define RX_USED_ADD(x, y) ((x)->used += (y)) #define RX_USED_SET(x, y) ((x)->used = (y)) #else #define RX_USED_ADD(x, y) do { } while(0) #define RX_USED_SET(x, y) do { } while(0) #endif /* local page allocation routines for the receive buffers. jumbo pages * require at least 8K contiguous and 8K aligned buffers. */ static cas_page_t *cas_page_alloc(struct cas *cp, const gfp_t flags) { cas_page_t *page; page = kmalloc(sizeof(cas_page_t), flags); if (!page) return NULL; INIT_LIST_HEAD(&page->list); RX_USED_SET(page, 0); page->buffer = alloc_pages(flags, cp->page_order); if (!page->buffer) goto page_err; page->dma_addr = dma_map_page(&cp->pdev->dev, page->buffer, 0, cp->page_size, DMA_FROM_DEVICE); return page; page_err: kfree(page); return NULL; } /* initialize spare pool of rx buffers, but allocate during the open */ static void cas_spare_init(struct cas *cp) { spin_lock(&cp->rx_inuse_lock); INIT_LIST_HEAD(&cp->rx_inuse_list); spin_unlock(&cp->rx_inuse_lock); spin_lock(&cp->rx_spare_lock); INIT_LIST_HEAD(&cp->rx_spare_list); cp->rx_spares_needed = RX_SPARE_COUNT; spin_unlock(&cp->rx_spare_lock); } /* used on close. free all the spare buffers. */ static void cas_spare_free(struct cas *cp) { struct list_head list, *elem, *tmp; /* free spare buffers */ INIT_LIST_HEAD(&list); spin_lock(&cp->rx_spare_lock); list_splice_init(&cp->rx_spare_list, &list); spin_unlock(&cp->rx_spare_lock); list_for_each_safe(elem, tmp, &list) { cas_page_free(cp, list_entry(elem, cas_page_t, list)); } INIT_LIST_HEAD(&list); #if 1 /* * Looks like Adrian had protected this with a different * lock than used everywhere else to manipulate this list. */ spin_lock(&cp->rx_inuse_lock); list_splice_init(&cp->rx_inuse_list, &list); spin_unlock(&cp->rx_inuse_lock); #else spin_lock(&cp->rx_spare_lock); list_splice_init(&cp->rx_inuse_list, &list); spin_unlock(&cp->rx_spare_lock); #endif list_for_each_safe(elem, tmp, &list) { cas_page_free(cp, list_entry(elem, cas_page_t, list)); } } /* replenish spares if needed */ static void cas_spare_recover(struct cas *cp, const gfp_t flags) { struct list_head list, *elem, *tmp; int needed, i; /* check inuse list. if we don't need any more free buffers, * just free it */ /* make a local copy of the list */ INIT_LIST_HEAD(&list); spin_lock(&cp->rx_inuse_lock); list_splice_init(&cp->rx_inuse_list, &list); spin_unlock(&cp->rx_inuse_lock); list_for_each_safe(elem, tmp, &list) { cas_page_t *page = list_entry(elem, cas_page_t, list); /* * With the lockless pagecache, cassini buffering scheme gets * slightly less accurate: we might find that a page has an * elevated reference count here, due to a speculative ref, * and skip it as in-use. Ideally we would be able to reclaim * it. However this would be such a rare case, it doesn't * matter too much as we should pick it up the next time round. * * Importantly, if we find that the page has a refcount of 1 * here (our refcount), then we know it is definitely not inuse * so we can reuse it. */ if (page_count(page->buffer) > 1) continue; list_del(elem); spin_lock(&cp->rx_spare_lock); if (cp->rx_spares_needed > 0) { list_add(elem, &cp->rx_spare_list); cp->rx_spares_needed--; spin_unlock(&cp->rx_spare_lock); } else { spin_unlock(&cp->rx_spare_lock); cas_page_free(cp, page); } } /* put any inuse buffers back on the list */ if (!list_empty(&list)) { spin_lock(&cp->rx_inuse_lock); list_splice(&list, &cp->rx_inuse_list); spin_unlock(&cp->rx_inuse_lock); } spin_lock(&cp->rx_spare_lock); needed = cp->rx_spares_needed; spin_unlock(&cp->rx_spare_lock); if (!needed) return; /* we still need spares, so try to allocate some */ INIT_LIST_HEAD(&list); i = 0; while (i < needed) { cas_page_t *spare = cas_page_alloc(cp, flags); if (!spare) break; list_add(&spare->list, &list); i++; } spin_lock(&cp->rx_spare_lock); list_splice(&list, &cp->rx_spare_list); cp->rx_spares_needed -= i; spin_unlock(&cp->rx_spare_lock); } /* pull a page from the list. */ static cas_page_t *cas_page_dequeue(struct cas *cp) { struct list_head *entry; int recover; spin_lock(&cp->rx_spare_lock); if (list_empty(&cp->rx_spare_list)) { /* try to do a quick recovery */ spin_unlock(&cp->rx_spare_lock); cas_spare_recover(cp, GFP_ATOMIC); spin_lock(&cp->rx_spare_lock); if (list_empty(&cp->rx_spare_list)) { netif_err(cp, rx_err, cp->dev, "no spare buffers available\n"); spin_unlock(&cp->rx_spare_lock); return NULL; } } entry = cp->rx_spare_list.next; list_del(entry); recover = ++cp->rx_spares_needed; spin_unlock(&cp->rx_spare_lock); /* trigger the timer to do the recovery */ if ((recover & (RX_SPARE_RECOVER_VAL - 1)) == 0) { #if 1 atomic_inc(&cp->reset_task_pending); atomic_inc(&cp->reset_task_pending_spare); schedule_work(&cp->reset_task); #else atomic_set(&cp->reset_task_pending, CAS_RESET_SPARE); schedule_work(&cp->reset_task); #endif } return list_entry(entry, cas_page_t, list); } static void cas_mif_poll(struct cas *cp, const int enable) { u32 cfg; cfg = readl(cp->regs + REG_MIF_CFG); cfg &= (MIF_CFG_MDIO_0 | MIF_CFG_MDIO_1); if (cp->phy_type & CAS_PHY_MII_MDIO1) cfg |= MIF_CFG_PHY_SELECT; /* poll and interrupt on link status change. */ if (enable) { cfg |= MIF_CFG_POLL_EN; cfg |= CAS_BASE(MIF_CFG_POLL_REG, MII_BMSR); cfg |= CAS_BASE(MIF_CFG_POLL_PHY, cp->phy_addr); } writel((enable) ? ~(BMSR_LSTATUS | BMSR_ANEGCOMPLETE) : 0xFFFF, cp->regs + REG_MIF_MASK); writel(cfg, cp->regs + REG_MIF_CFG); } /* Must be invoked under cp->lock */ static void cas_begin_auto_negotiation(struct cas *cp, const struct ethtool_link_ksettings *ep) { u16 ctl; #if 1 int lcntl; int changed = 0; int oldstate = cp->lstate; int link_was_not_down = !(oldstate == link_down); #endif /* Setup link parameters */ if (!ep) goto start_aneg; lcntl = cp->link_cntl; if (ep->base.autoneg == AUTONEG_ENABLE) { cp->link_cntl = BMCR_ANENABLE; } else { u32 speed = ep->base.speed; cp->link_cntl = 0; if (speed == SPEED_100) cp->link_cntl |= BMCR_SPEED100; else if (speed == SPEED_1000) cp->link_cntl |= CAS_BMCR_SPEED1000; if (ep->base.duplex == DUPLEX_FULL) cp->link_cntl |= BMCR_FULLDPLX; } #if 1 changed = (lcntl != cp->link_cntl); #endif start_aneg: if (cp->lstate == link_up) { netdev_info(cp->dev, "PCS link down\n"); } else { if (changed) { netdev_info(cp->dev, "link configuration changed\n"); } } cp->lstate = link_down; cp->link_transition = LINK_TRANSITION_LINK_DOWN; if (!cp->hw_running) return; #if 1 /* * WTZ: If the old state was link_up, we turn off the carrier * to replicate everything we do elsewhere on a link-down * event when we were already in a link-up state.. */ if (oldstate == link_up) netif_carrier_off(cp->dev); if (changed && link_was_not_down) { /* * WTZ: This branch will simply schedule a full reset after * we explicitly changed link modes in an ioctl. See if this * fixes the link-problems we were having for forced mode. */ atomic_inc(&cp->reset_task_pending); atomic_inc(&cp->reset_task_pending_all); schedule_work(&cp->reset_task); cp->timer_ticks = 0; mod_timer(&cp->link_timer, jiffies + CAS_LINK_TIMEOUT); return; } #endif if (cp->phy_type & CAS_PHY_SERDES) { u32 val = readl(cp->regs + REG_PCS_MII_CTRL); if (cp->link_cntl & BMCR_ANENABLE) { val |= (PCS_MII_RESTART_AUTONEG | PCS_MII_AUTONEG_EN); cp->lstate = link_aneg; } else { if (cp->link_cntl & BMCR_FULLDPLX) val |= PCS_MII_CTRL_DUPLEX; val &= ~PCS_MII_AUTONEG_EN; cp->lstate = link_force_ok; } cp->link_transition = LINK_TRANSITION_LINK_CONFIG; writel(val, cp->regs + REG_PCS_MII_CTRL); } else { cas_mif_poll(cp, 0); ctl = cas_phy_read(cp, MII_BMCR); ctl &= ~(BMCR_FULLDPLX | BMCR_SPEED100 | CAS_BMCR_SPEED1000 | BMCR_ANENABLE); ctl |= cp->link_cntl; if (ctl & BMCR_ANENABLE) { ctl |= BMCR_ANRESTART; cp->lstate = link_aneg; } else { cp->lstate = link_force_ok; } cp->link_transition = LINK_TRANSITION_LINK_CONFIG; cas_phy_write(cp, MII_BMCR, ctl); cas_mif_poll(cp, 1); } cp->timer_ticks = 0; mod_timer(&cp->link_timer, jiffies + CAS_LINK_TIMEOUT); } /* Must be invoked under cp->lock. */ static int cas_reset_mii_phy(struct cas *cp) { int limit = STOP_TRIES_PHY; u16 val; cas_phy_write(cp, MII_BMCR, BMCR_RESET); udelay(100); while (--limit) { val = cas_phy_read(cp, MII_BMCR); if ((val & BMCR_RESET) == 0) break; udelay(10); } return limit <= 0; } static void cas_saturn_firmware_init(struct cas *cp) { const struct firmware *fw; const char fw_name[] = "sun/cassini.bin"; int err; if (PHY_NS_DP83065 != cp->phy_id) return; err = request_firmware(&fw, fw_name, &cp->pdev->dev); if (err) { pr_err("Failed to load firmware \"%s\"\n", fw_name); return; } if (fw->size < 2) { pr_err("bogus length %zu in \"%s\"\n", fw->size, fw_name); goto out; } cp->fw_load_addr= fw->data[1] << 8 | fw->data[0]; cp->fw_size = fw->size - 2; cp->fw_data = vmalloc(cp->fw_size); if (!cp->fw_data) goto out; memcpy(cp->fw_data, &fw->data[2], cp->fw_size); out: release_firmware(fw); } static void cas_saturn_firmware_load(struct cas *cp) { int i; if (!cp->fw_data) return; cas_phy_powerdown(cp); /* expanded memory access mode */ cas_phy_write(cp, DP83065_MII_MEM, 0x0); /* pointer configuration for new firmware */ cas_phy_write(cp, DP83065_MII_REGE, 0x8ff9); cas_phy_write(cp, DP83065_MII_REGD, 0xbd); cas_phy_write(cp, DP83065_MII_REGE, 0x8ffa); cas_phy_write(cp, DP83065_MII_REGD, 0x82); cas_phy_write(cp, DP83065_MII_REGE, 0x8ffb); cas_phy_write(cp, DP83065_MII_REGD, 0x0); cas_phy_write(cp, DP83065_MII_REGE, 0x8ffc); cas_phy_write(cp, DP83065_MII_REGD, 0x39); /* download new firmware */ cas_phy_write(cp, DP83065_MII_MEM, 0x1); cas_phy_write(cp, DP83065_MII_REGE, cp->fw_load_addr); for (i = 0; i < cp->fw_size; i++) cas_phy_write(cp, DP83065_MII_REGD, cp->fw_data[i]); /* enable firmware */ cas_phy_write(cp, DP83065_MII_REGE, 0x8ff8); cas_phy_write(cp, DP83065_MII_REGD, 0x1); } /* phy initialization */ static void cas_phy_init(struct cas *cp) { u16 val; /* if we're in MII/GMII mode, set up phy */ if (CAS_PHY_MII(cp->phy_type)) { writel(PCS_DATAPATH_MODE_MII, cp->regs + REG_PCS_DATAPATH_MODE); cas_mif_poll(cp, 0); cas_reset_mii_phy(cp); /* take out of isolate mode */ if (PHY_LUCENT_B0 == cp->phy_id) { /* workaround link up/down issue with lucent */ cas_phy_write(cp, LUCENT_MII_REG, 0x8000); cas_phy_write(cp, MII_BMCR, 0x00f1); cas_phy_write(cp, LUCENT_MII_REG, 0x0); } else if (PHY_BROADCOM_B0 == (cp->phy_id & 0xFFFFFFFC)) { /* workarounds for broadcom phy */ cas_phy_write(cp, BROADCOM_MII_REG8, 0x0C20); cas_phy_write(cp, BROADCOM_MII_REG7, 0x0012); cas_phy_write(cp, BROADCOM_MII_REG5, 0x1804); cas_phy_write(cp, BROADCOM_MII_REG7, 0x0013); cas_phy_write(cp, BROADCOM_MII_REG5, 0x1204); cas_phy_write(cp, BROADCOM_MII_REG7, 0x8006); cas_phy_write(cp, BROADCOM_MII_REG5, 0x0132); cas_phy_write(cp, BROADCOM_MII_REG7, 0x8006); cas_phy_write(cp, BROADCOM_MII_REG5, 0x0232); cas_phy_write(cp, BROADCOM_MII_REG7, 0x201F); cas_phy_write(cp, BROADCOM_MII_REG5, 0x0A20); } else if (PHY_BROADCOM_5411 == cp->phy_id) { val = cas_phy_read(cp, BROADCOM_MII_REG4); val = cas_phy_read(cp, BROADCOM_MII_REG4); if (val & 0x0080) { /* link workaround */ cas_phy_write(cp, BROADCOM_MII_REG4, val & ~0x0080); } } else if (cp->cas_flags & CAS_FLAG_SATURN) { writel((cp->phy_type & CAS_PHY_MII_MDIO0) ? SATURN_PCFG_FSI : 0x0, cp->regs + REG_SATURN_PCFG); /* load firmware to address 10Mbps auto-negotiation * issue. NOTE: this will need to be changed if the * default firmware gets fixed. */ if (PHY_NS_DP83065 == cp->phy_id) { cas_saturn_firmware_load(cp); } cas_phy_powerup(cp); } /* advertise capabilities */ val = cas_phy_read(cp, MII_BMCR); val &= ~BMCR_ANENABLE; cas_phy_write(cp, MII_BMCR, val); udelay(10); cas_phy_write(cp, MII_ADVERTISE, cas_phy_read(cp, MII_ADVERTISE) | (ADVERTISE_10HALF | ADVERTISE_10FULL | ADVERTISE_100HALF | ADVERTISE_100FULL | CAS_ADVERTISE_PAUSE | CAS_ADVERTISE_ASYM_PAUSE)); if (cp->cas_flags & CAS_FLAG_1000MB_CAP) { /* make sure that we don't advertise half * duplex to avoid a chip issue */ val = cas_phy_read(cp, CAS_MII_1000_CTRL); val &= ~CAS_ADVERTISE_1000HALF; val |= CAS_ADVERTISE_1000FULL; cas_phy_write(cp, CAS_MII_1000_CTRL, val); } } else { /* reset pcs for serdes */ u32 val; int limit; writel(PCS_DATAPATH_MODE_SERDES, cp->regs + REG_PCS_DATAPATH_MODE); /* enable serdes pins on saturn */ if (cp->cas_flags & CAS_FLAG_SATURN) writel(0, cp->regs + REG_SATURN_PCFG); /* Reset PCS unit. */ val = readl(cp->regs + REG_PCS_MII_CTRL); val |= PCS_MII_RESET; writel(val, cp->regs + REG_PCS_MII_CTRL); limit = STOP_TRIES; while (--limit > 0) { udelay(10); if ((readl(cp->regs + REG_PCS_MII_CTRL) & PCS_MII_RESET) == 0) break; } if (limit <= 0) netdev_warn(cp->dev, "PCS reset bit would not clear [%08x]\n", readl(cp->regs + REG_PCS_STATE_MACHINE)); /* Make sure PCS is disabled while changing advertisement * configuration. */ writel(0x0, cp->regs + REG_PCS_CFG); /* Advertise all capabilities except half-duplex. */ val = readl(cp->regs + REG_PCS_MII_ADVERT); val &= ~PCS_MII_ADVERT_HD; val |= (PCS_MII_ADVERT_FD | PCS_MII_ADVERT_SYM_PAUSE | PCS_MII_ADVERT_ASYM_PAUSE); writel(val, cp->regs + REG_PCS_MII_ADVERT); /* enable PCS */ writel(PCS_CFG_EN, cp->regs + REG_PCS_CFG); /* pcs workaround: enable sync detect */ writel(PCS_SERDES_CTRL_SYNCD_EN, cp->regs + REG_PCS_SERDES_CTRL); } } static int cas_pcs_link_check(struct cas *cp) { u32 stat, state_machine; int retval = 0; /* The link status bit latches on zero, so you must * read it twice in such a case to see a transition * to the link being up. */ stat = readl(cp->regs + REG_PCS_MII_STATUS); if ((stat & PCS_MII_STATUS_LINK_STATUS) == 0) stat = readl(cp->regs + REG_PCS_MII_STATUS); /* The remote-fault indication is only valid * when autoneg has completed. */ if ((stat & (PCS_MII_STATUS_AUTONEG_COMP | PCS_MII_STATUS_REMOTE_FAULT)) == (PCS_MII_STATUS_AUTONEG_COMP | PCS_MII_STATUS_REMOTE_FAULT)) netif_info(cp, link, cp->dev, "PCS RemoteFault\n"); /* work around link detection issue by querying the PCS state * machine directly. */ state_machine = readl(cp->regs + REG_PCS_STATE_MACHINE); if ((state_machine & PCS_SM_LINK_STATE_MASK) != SM_LINK_STATE_UP) { stat &= ~PCS_MII_STATUS_LINK_STATUS; } else if (state_machine & PCS_SM_WORD_SYNC_STATE_MASK) { stat |= PCS_MII_STATUS_LINK_STATUS; } if (stat & PCS_MII_STATUS_LINK_STATUS) { if (cp->lstate != link_up) { if (cp->opened) { cp->lstate = link_up; cp->link_transition = LINK_TRANSITION_LINK_UP; cas_set_link_modes(cp); netif_carrier_on(cp->dev); } } } else if (cp->lstate == link_up) { cp->lstate = link_down; if (link_transition_timeout != 0 && cp->link_transition != LINK_TRANSITION_REQUESTED_RESET && !cp->link_transition_jiffies_valid) { /* * force a reset, as a workaround for the * link-failure problem. May want to move this to a * point a bit earlier in the sequence. If we had * generated a reset a short time ago, we'll wait for * the link timer to check the status until a * timer expires (link_transistion_jiffies_valid is * true when the timer is running.) Instead of using * a system timer, we just do a check whenever the * link timer is running - this clears the flag after * a suitable delay. */ retval = 1; cp->link_transition = LINK_TRANSITION_REQUESTED_RESET; cp->link_transition_jiffies = jiffies; cp->link_transition_jiffies_valid = 1; } else { cp->link_transition = LINK_TRANSITION_ON_FAILURE; } netif_carrier_off(cp->dev); if (cp->opened) netif_info(cp, link, cp->dev, "PCS link down\n"); /* Cassini only: if you force a mode, there can be * sync problems on link down. to fix that, the following * things need to be checked: * 1) read serialink state register * 2) read pcs status register to verify link down. * 3) if link down and serial link == 0x03, then you need * to global reset the chip. */ if ((cp->cas_flags & CAS_FLAG_REG_PLUS) == 0) { /* should check to see if we're in a forced mode */ stat = readl(cp->regs + REG_PCS_SERDES_STATE); if (stat == 0x03) return 1; } } else if (cp->lstate == link_down) { if (link_transition_timeout != 0 && cp->link_transition != LINK_TRANSITION_REQUESTED_RESET && !cp->link_transition_jiffies_valid) { /* force a reset, as a workaround for the * link-failure problem. May want to move * this to a point a bit earlier in the * sequence. */ retval = 1; cp->link_transition = LINK_TRANSITION_REQUESTED_RESET; cp->link_transition_jiffies = jiffies; cp->link_transition_jiffies_valid = 1; } else { cp->link_transition = LINK_TRANSITION_STILL_FAILED; } } return retval; } static int cas_pcs_interrupt(struct net_device *dev, struct cas *cp, u32 status) { u32 stat = readl(cp->regs + REG_PCS_INTR_STATUS); if ((stat & PCS_INTR_STATUS_LINK_CHANGE) == 0) return 0; return cas_pcs_link_check(cp); } static int cas_txmac_interrupt(struct net_device *dev, struct cas *cp, u32 status) { u32 txmac_stat = readl(cp->regs + REG_MAC_TX_STATUS); if (!txmac_stat) return 0; netif_printk(cp, intr, KERN_DEBUG, cp->dev, "txmac interrupt, txmac_stat: 0x%x\n", txmac_stat); /* Defer timer expiration is quite normal, * don't even log the event. */ if ((txmac_stat & MAC_TX_DEFER_TIMER) && !(txmac_stat & ~MAC_TX_DEFER_TIMER)) return 0; spin_lock(&cp->stat_lock[0]); if (txmac_stat & MAC_TX_UNDERRUN) { netdev_err(dev, "TX MAC xmit underrun\n"); cp->net_stats[0].tx_fifo_errors++; } if (txmac_stat & MAC_TX_MAX_PACKET_ERR) { netdev_err(dev, "TX MAC max packet size error\n"); cp->net_stats[0].tx_errors++; } /* The rest are all cases of one of the 16-bit TX * counters expiring. */ if (txmac_stat & MAC_TX_COLL_NORMAL) cp->net_stats[0].collisions += 0x10000; if (txmac_stat & MAC_TX_COLL_EXCESS) { cp->net_stats[0].tx_aborted_errors += 0x10000; cp->net_stats[0].collisions += 0x10000; } if (txmac_stat & MAC_TX_COLL_LATE) { cp->net_stats[0].tx_aborted_errors += 0x10000; cp->net_stats[0].collisions += 0x10000; } spin_unlock(&cp->stat_lock[0]); /* We do not keep track of MAC_TX_COLL_FIRST and * MAC_TX_PEAK_ATTEMPTS events. */ return 0; } static void cas_load_firmware(struct cas *cp, cas_hp_inst_t *firmware) { cas_hp_inst_t *inst; u32 val; int i; i = 0; while ((inst = firmware) && inst->note) { writel(i, cp->regs + REG_HP_INSTR_RAM_ADDR); val = CAS_BASE(HP_INSTR_RAM_HI_VAL, inst->val); val |= CAS_BASE(HP_INSTR_RAM_HI_MASK, inst->mask); writel(val, cp->regs + REG_HP_INSTR_RAM_DATA_HI); val = CAS_BASE(HP_INSTR_RAM_MID_OUTARG, inst->outarg >> 10); val |= CAS_BASE(HP_INSTR_RAM_MID_OUTOP, inst->outop); val |= CAS_BASE(HP_INSTR_RAM_MID_FNEXT, inst->fnext); val |= CAS_BASE(HP_INSTR_RAM_MID_FOFF, inst->foff); val |= CAS_BASE(HP_INSTR_RAM_MID_SNEXT, inst->snext); val |= CAS_BASE(HP_INSTR_RAM_MID_SOFF, inst->soff); val |= CAS_BASE(HP_INSTR_RAM_MID_OP, inst->op); writel(val, cp->regs + REG_HP_INSTR_RAM_DATA_MID); val = CAS_BASE(HP_INSTR_RAM_LOW_OUTMASK, inst->outmask); val |= CAS_BASE(HP_INSTR_RAM_LOW_OUTSHIFT, inst->outshift); val |= CAS_BASE(HP_INSTR_RAM_LOW_OUTEN, inst->outenab); val |= CAS_BASE(HP_INSTR_RAM_LOW_OUTARG, inst->outarg); writel(val, cp->regs + REG_HP_INSTR_RAM_DATA_LOW); ++firmware; ++i; } } static void cas_init_rx_dma(struct cas *cp) { u64 desc_dma = cp->block_dvma; u32 val; int i, size; /* rx free descriptors */ val = CAS_BASE(RX_CFG_SWIVEL, RX_SWIVEL_OFF_VAL); val |= CAS_BASE(RX_CFG_DESC_RING, RX_DESC_RINGN_INDEX(0)); val |= CAS_BASE(RX_CFG_COMP_RING, RX_COMP_RINGN_INDEX(0)); if ((N_RX_DESC_RINGS > 1) && (cp->cas_flags & CAS_FLAG_REG_PLUS)) /* do desc 2 */ val |= CAS_BASE(RX_CFG_DESC_RING1, RX_DESC_RINGN_INDEX(1)); writel(val, cp->regs + REG_RX_CFG); val = (unsigned long) cp->init_rxds[0] - (unsigned long) cp->init_block; writel((desc_dma + val) >> 32, cp->regs + REG_RX_DB_HI); writel((desc_dma + val) & 0xffffffff, cp->regs + REG_RX_DB_LOW); writel(RX_DESC_RINGN_SIZE(0) - 4, cp->regs + REG_RX_KICK); if (cp->cas_flags & CAS_FLAG_REG_PLUS) { /* rx desc 2 is for IPSEC packets. however, * we don't it that for that purpose. */ val = (unsigned long) cp->init_rxds[1] - (unsigned long) cp->init_block; writel((desc_dma + val) >> 32, cp->regs + REG_PLUS_RX_DB1_HI); writel((desc_dma + val) & 0xffffffff, cp->regs + REG_PLUS_RX_DB1_LOW); writel(RX_DESC_RINGN_SIZE(1) - 4, cp->regs + REG_PLUS_RX_KICK1); } /* rx completion registers */ val = (unsigned long) cp->init_rxcs[0] - (unsigned long) cp->init_block; writel((desc_dma + val) >> 32, cp->regs + REG_RX_CB_HI); writel((desc_dma + val) & 0xffffffff, cp->regs + REG_RX_CB_LOW); if (cp->cas_flags & CAS_FLAG_REG_PLUS) { /* rx comp 2-4 */ for (i = 1; i < MAX_RX_COMP_RINGS; i++) { val = (unsigned long) cp->init_rxcs[i] - (unsigned long) cp->init_block; writel((desc_dma + val) >> 32, cp->regs + REG_PLUS_RX_CBN_HI(i)); writel((desc_dma + val) & 0xffffffff, cp->regs + REG_PLUS_RX_CBN_LOW(i)); } } /* read selective clear regs to prevent spurious interrupts * on reset because complete == kick. * selective clear set up to prevent interrupts on resets */ readl(cp->regs + REG_INTR_STATUS_ALIAS); writel(INTR_RX_DONE | INTR_RX_BUF_UNAVAIL, cp->regs + REG_ALIAS_CLEAR); /* set up pause thresholds */ val = CAS_BASE(RX_PAUSE_THRESH_OFF, cp->rx_pause_off / RX_PAUSE_THRESH_QUANTUM); val |= CAS_BASE(RX_PAUSE_THRESH_ON, cp->rx_pause_on / RX_PAUSE_THRESH_QUANTUM); writel(val, cp->regs + REG_RX_PAUSE_THRESH); /* zero out dma reassembly buffers */ for (i = 0; i < 64; i++) { writel(i, cp->regs + REG_RX_TABLE_ADDR); writel(0x0, cp->regs + REG_RX_TABLE_DATA_LOW); writel(0x0, cp->regs + REG_RX_TABLE_DATA_MID); writel(0x0, cp->regs + REG_RX_TABLE_DATA_HI); } /* make sure address register is 0 for normal operation */ writel(0x0, cp->regs + REG_RX_CTRL_FIFO_ADDR); writel(0x0, cp->regs + REG_RX_IPP_FIFO_ADDR); /* interrupt mitigation */ #ifdef USE_RX_BLANK val = CAS_BASE(RX_BLANK_INTR_TIME, RX_BLANK_INTR_TIME_VAL); val |= CAS_BASE(RX_BLANK_INTR_PKT, RX_BLANK_INTR_PKT_VAL); writel(val, cp->regs + REG_RX_BLANK); #else writel(0x0, cp->regs + REG_RX_BLANK); #endif /* interrupt generation as a function of low water marks for * free desc and completion entries. these are used to trigger * housekeeping for rx descs. we don't use the free interrupt * as it's not very useful */ /* val = CAS_BASE(RX_AE_THRESH_FREE, RX_AE_FREEN_VAL(0)); */ val = CAS_BASE(RX_AE_THRESH_COMP, RX_AE_COMP_VAL); writel(val, cp->regs + REG_RX_AE_THRESH); if (cp->cas_flags & CAS_FLAG_REG_PLUS) { val = CAS_BASE(RX_AE1_THRESH_FREE, RX_AE_FREEN_VAL(1)); writel(val, cp->regs + REG_PLUS_RX_AE1_THRESH); } /* Random early detect registers. useful for congestion avoidance. * this should be tunable. */ writel(0x0, cp->regs + REG_RX_RED); /* receive page sizes. default == 2K (0x800) */ val = 0; if (cp->page_size == 0x1000) val = 0x1; else if (cp->page_size == 0x2000) val = 0x2; else if (cp->page_size == 0x4000) val = 0x3; /* round mtu + offset. constrain to page size. */ size = cp->dev->mtu + 64; if (size > cp->page_size) size = cp->page_size; if (size <= 0x400) i = 0x0; else if (size <= 0x800) i = 0x1; else if (size <= 0x1000) i = 0x2; else i = 0x3; cp->mtu_stride = 1 << (i + 10); val = CAS_BASE(RX_PAGE_SIZE, val); val |= CAS_BASE(RX_PAGE_SIZE_MTU_STRIDE, i); val |= CAS_BASE(RX_PAGE_SIZE_MTU_COUNT, cp->page_size >> (i + 10)); val |= CAS_BASE(RX_PAGE_SIZE_MTU_OFF, 0x1); writel(val, cp->regs + REG_RX_PAGE_SIZE); /* enable the header parser if desired */ if (&CAS_HP_FIRMWARE[0] == &cas_prog_null[0]) return; val = CAS_BASE(HP_CFG_NUM_CPU, CAS_NCPUS > 63 ? 0 : CAS_NCPUS); val |= HP_CFG_PARSE_EN | HP_CFG_SYN_INC_MASK; val |= CAS_BASE(HP_CFG_TCP_THRESH, HP_TCP_THRESH_VAL); writel(val, cp->regs + REG_HP_CFG); } static inline void cas_rxc_init(struct cas_rx_comp *rxc) { memset(rxc, 0, sizeof(*rxc)); rxc->word4 = cpu_to_le64(RX_COMP4_ZERO); } /* NOTE: we use the ENC RX DESC ring for spares. the rx_page[0,1] * flipping is protected by the fact that the chip will not * hand back the same page index while it's being processed. */ static inline cas_page_t *cas_page_spare(struct cas *cp, const int index) { cas_page_t *page = cp->rx_pages[1][index]; cas_page_t *new; if (page_count(page->buffer) == 1) return page; new = cas_page_dequeue(cp); if (new) { spin_lock(&cp->rx_inuse_lock); list_add(&page->list, &cp->rx_inuse_list); spin_unlock(&cp->rx_inuse_lock); } return new; } /* this needs to be changed if we actually use the ENC RX DESC ring */ static cas_page_t *cas_page_swap(struct cas *cp, const int ring, const int index) { cas_page_t **page0 = cp->rx_pages[0]; cas_page_t **page1 = cp->rx_pages[1]; /* swap if buffer is in use */ if (page_count(page0[index]->buffer) > 1) { cas_page_t *new = cas_page_spare(cp, index); if (new) { page1[index] = page0[index]; page0[index] = new; } } RX_USED_SET(page0[index], 0); return page0[index]; } static void cas_clean_rxds(struct cas *cp) { /* only clean ring 0 as ring 1 is used for spare buffers */ struct cas_rx_desc *rxd = cp->init_rxds[0]; int i, size; /* release all rx flows */ for (i = 0; i < N_RX_FLOWS; i++) { struct sk_buff *skb; while ((skb = __skb_dequeue(&cp->rx_flows[i]))) { cas_skb_release(skb); } } /* initialize descriptors */ size = RX_DESC_RINGN_SIZE(0); for (i = 0; i < size; i++) { cas_page_t *page = cas_page_swap(cp, 0, i); rxd[i].buffer = cpu_to_le64(page->dma_addr); rxd[i].index = cpu_to_le64(CAS_BASE(RX_INDEX_NUM, i) | CAS_BASE(RX_INDEX_RING, 0)); } cp->rx_old[0] = RX_DESC_RINGN_SIZE(0) - 4; cp->rx_last[0] = 0; cp->cas_flags &= ~CAS_FLAG_RXD_POST(0); } static void cas_clean_rxcs(struct cas *cp) { int i, j; /* take ownership of rx comp descriptors */ memset(cp->rx_cur, 0, sizeof(*cp->rx_cur)*N_RX_COMP_RINGS); memset(cp->rx_new, 0, sizeof(*cp->rx_new)*N_RX_COMP_RINGS); for (i = 0; i < N_RX_COMP_RINGS; i++) { struct cas_rx_comp *rxc = cp->init_rxcs[i]; for (j = 0; j < RX_COMP_RINGN_SIZE(i); j++) { cas_rxc_init(rxc + j); } } } #if 0 /* When we get a RX fifo overflow, the RX unit is probably hung * so we do the following. * * If any part of the reset goes wrong, we return 1 and that causes the * whole chip to be reset. */ static int cas_rxmac_reset(struct cas *cp) { struct net_device *dev = cp->dev; int limit; u32 val; /* First, reset MAC RX. */ writel(cp->mac_rx_cfg & ~MAC_RX_CFG_EN, cp->regs + REG_MAC_RX_CFG); for (limit = 0; limit < STOP_TRIES; limit++) { if (!(readl(cp->regs + REG_MAC_RX_CFG) & MAC_RX_CFG_EN)) break; udelay(10); } if (limit == STOP_TRIES) { netdev_err(dev, "RX MAC will not disable, resetting whole chip\n"); return 1; } /* Second, disable RX DMA. */ writel(0, cp->regs + REG_RX_CFG); for (limit = 0; limit < STOP_TRIES; limit++) { if (!(readl(cp->regs + REG_RX_CFG) & RX_CFG_DMA_EN)) break; udelay(10); } if (limit == STOP_TRIES) { netdev_err(dev, "RX DMA will not disable, resetting whole chip\n"); return 1; } mdelay(5); /* Execute RX reset command. */ writel(SW_RESET_RX, cp->regs + REG_SW_RESET); for (limit = 0; limit < STOP_TRIES; limit++) { if (!(readl(cp->regs + REG_SW_RESET) & SW_RESET_RX)) break; udelay(10); } if (limit == STOP_TRIES) { netdev_err(dev, "RX reset command will not execute, resetting whole chip\n"); return 1; } /* reset driver rx state */ cas_clean_rxds(cp); cas_clean_rxcs(cp); /* Now, reprogram the rest of RX unit. */ cas_init_rx_dma(cp); /* re-enable */ val = readl(cp->regs + REG_RX_CFG); writel(val | RX_CFG_DMA_EN, cp->regs + REG_RX_CFG); writel(MAC_RX_FRAME_RECV, cp->regs + REG_MAC_RX_MASK); val = readl(cp->regs + REG_MAC_RX_CFG); writel(val | MAC_RX_CFG_EN, cp->regs + REG_MAC_RX_CFG); return 0; } #endif static int cas_rxmac_interrupt(struct net_device *dev, struct cas *cp, u32 status) { u32 stat = readl(cp->regs + REG_MAC_RX_STATUS); if (!stat) return 0; netif_dbg(cp, intr, cp->dev, "rxmac interrupt, stat: 0x%x\n", stat); /* these are all rollovers */ spin_lock(&cp->stat_lock[0]); if (stat & MAC_RX_ALIGN_ERR) cp->net_stats[0].rx_frame_errors += 0x10000; if (stat & MAC_RX_CRC_ERR) cp->net_stats[0].rx_crc_errors += 0x10000; if (stat & MAC_RX_LEN_ERR) cp->net_stats[0].rx_length_errors += 0x10000; if (stat & MAC_RX_OVERFLOW) { cp->net_stats[0].rx_over_errors++; cp->net_stats[0].rx_fifo_errors++; } /* We do not track MAC_RX_FRAME_COUNT and MAC_RX_VIOL_ERR * events. */ spin_unlock(&cp->stat_lock[0]); return 0; } static int cas_mac_interrupt(struct net_device *dev, struct cas *cp, u32 status) { u32 stat = readl(cp->regs + REG_MAC_CTRL_STATUS); if (!stat) return 0; netif_printk(cp, intr, KERN_DEBUG, cp->dev, "mac interrupt, stat: 0x%x\n", stat); /* This interrupt is just for pause frame and pause * tracking. It is useful for diagnostics and debug * but probably by default we will mask these events. */ if (stat & MAC_CTRL_PAUSE_STATE) cp->pause_entered++; if (stat & MAC_CTRL_PAUSE_RECEIVED) cp->pause_last_time_recvd = (stat >> 16); return 0; } /* Must be invoked under cp->lock. */ static inline int cas_mdio_link_not_up(struct cas *cp) { u16 val; switch (cp->lstate) { case link_force_ret: netif_info(cp, link, cp->dev, "Autoneg failed again, keeping forced mode\n"); cas_phy_write(cp, MII_BMCR, cp->link_fcntl); cp->timer_ticks = 5; cp->lstate = link_force_ok; cp->link_transition = LINK_TRANSITION_LINK_CONFIG; break; case link_aneg: val = cas_phy_read(cp, MII_BMCR); /* Try forced modes. we try things in the following order: * 1000 full -> 100 full/half -> 10 half */ val &= ~(BMCR_ANRESTART | BMCR_ANENABLE); val |= BMCR_FULLDPLX; val |= (cp->cas_flags & CAS_FLAG_1000MB_CAP) ? CAS_BMCR_SPEED1000 : BMCR_SPEED100; cas_phy_write(cp, MII_BMCR, val); cp->timer_ticks = 5; cp->lstate = link_force_try; cp->link_transition = LINK_TRANSITION_LINK_CONFIG; break; case link_force_try: /* Downgrade from 1000 to 100 to 10 Mbps if necessary. */ val = cas_phy_read(cp, MII_BMCR); cp->timer_ticks = 5; if (val & CAS_BMCR_SPEED1000) { /* gigabit */ val &= ~CAS_BMCR_SPEED1000; val |= (BMCR_SPEED100 | BMCR_FULLDPLX); cas_phy_write(cp, MII_BMCR, val); break; } if (val & BMCR_SPEED100) { if (val & BMCR_FULLDPLX) /* fd failed */ val &= ~BMCR_FULLDPLX; else { /* 100Mbps failed */ val &= ~BMCR_SPEED100; } cas_phy_write(cp, MII_BMCR, val); break; } break; default: break; } return 0; } /* must be invoked with cp->lock held */ static int cas_mii_link_check(struct cas *cp, const u16 bmsr) { int restart; if (bmsr & BMSR_LSTATUS) { /* Ok, here we got a link. If we had it due to a forced * fallback, and we were configured for autoneg, we * retry a short autoneg pass. If you know your hub is * broken, use ethtool ;) */ if ((cp->lstate == link_force_try) && (cp->link_cntl & BMCR_ANENABLE)) { cp->lstate = link_force_ret; cp->link_transition = LINK_TRANSITION_LINK_CONFIG; cas_mif_poll(cp, 0); cp->link_fcntl = cas_phy_read(cp, MII_BMCR); cp->timer_ticks = 5; if (cp->opened) netif_info(cp, link, cp->dev, "Got link after fallback, retrying autoneg once...\n"); cas_phy_write(cp, MII_BMCR, cp->link_fcntl | BMCR_ANENABLE | BMCR_ANRESTART); cas_mif_poll(cp, 1); } else if (cp->lstate != link_up) { cp->lstate = link_up; cp->link_transition = LINK_TRANSITION_LINK_UP; if (cp->opened) { cas_set_link_modes(cp); netif_carrier_on(cp->dev); } } return 0; } /* link not up. if the link was previously up, we restart the * whole process */ restart = 0; if (cp->lstate == link_up) { cp->lstate = link_down; cp->link_transition = LINK_TRANSITION_LINK_DOWN; netif_carrier_off(cp->dev); if (cp->opened) netif_info(cp, link, cp->dev, "Link down\n"); restart = 1; } else if (++cp->timer_ticks > 10) cas_mdio_link_not_up(cp); return restart; } static int cas_mif_interrupt(struct net_device *dev, struct cas *cp, u32 status) { u32 stat = readl(cp->regs + REG_MIF_STATUS); u16 bmsr; /* check for a link change */ if (CAS_VAL(MIF_STATUS_POLL_STATUS, stat) == 0) return 0; bmsr = CAS_VAL(MIF_STATUS_POLL_DATA, stat); return cas_mii_link_check(cp, bmsr); } static int cas_pci_interrupt(struct net_device *dev, struct cas *cp, u32 status) { u32 stat = readl(cp->regs + REG_PCI_ERR_STATUS); if (!stat) return 0; netdev_err(dev, "PCI error [%04x:%04x]", stat, readl(cp->regs + REG_BIM_DIAG)); /* cassini+ has this reserved */ if ((stat & PCI_ERR_BADACK) && ((cp->cas_flags & CAS_FLAG_REG_PLUS) == 0)) pr_cont(" <No ACK64# during ABS64 cycle>"); if (stat & PCI_ERR_DTRTO) pr_cont(" <Delayed transaction timeout>"); if (stat & PCI_ERR_OTHER) pr_cont(" <other>"); if (stat & PCI_ERR_BIM_DMA_WRITE) pr_cont(" <BIM DMA 0 write req>"); if (stat & PCI_ERR_BIM_DMA_READ) pr_cont(" <BIM DMA 0 read req>"); pr_cont("\n"); if (stat & PCI_ERR_OTHER) { int pci_errs; /* Interrogate PCI config space for the * true cause. */ pci_errs = pci_status_get_and_clear_errors(cp->pdev); netdev_err(dev, "PCI status errors[%04x]\n", pci_errs); if (pci_errs & PCI_STATUS_PARITY) netdev_err(dev, "PCI parity error detected\n"); if (pci_errs & PCI_STATUS_SIG_TARGET_ABORT) netdev_err(dev, "PCI target abort\n"); if (pci_errs & PCI_STATUS_REC_TARGET_ABORT) netdev_err(dev, "PCI master acks target abort\n"); if (pci_errs & PCI_STATUS_REC_MASTER_ABORT) netdev_err(dev, "PCI master abort\n"); if (pci_errs & PCI_STATUS_SIG_SYSTEM_ERROR) netdev_err(dev, "PCI system error SERR#\n"); if (pci_errs & PCI_STATUS_DETECTED_PARITY) netdev_err(dev, "PCI parity error\n"); } /* For all PCI errors, we should reset the chip. */ return 1; } /* All non-normal interrupt conditions get serviced here. * Returns non-zero if we should just exit the interrupt * handler right now (ie. if we reset the card which invalidates * all of the other original irq status bits). */ static int cas_abnormal_irq(struct net_device *dev, struct cas *cp, u32 status) { if (status & INTR_RX_TAG_ERROR) { /* corrupt RX tag framing */ netif_printk(cp, rx_err, KERN_DEBUG, cp->dev, "corrupt rx tag framing\n"); spin_lock(&cp->stat_lock[0]); cp->net_stats[0].rx_errors++; spin_unlock(&cp->stat_lock[0]); goto do_reset; } if (status & INTR_RX_LEN_MISMATCH) { /* length mismatch. */ netif_printk(cp, rx_err, KERN_DEBUG, cp->dev, "length mismatch for rx frame\n"); spin_lock(&cp->stat_lock[0]); cp->net_stats[0].rx_errors++; spin_unlock(&cp->stat_lock[0]); goto do_reset; } if (status & INTR_PCS_STATUS) { if (cas_pcs_interrupt(dev, cp, status)) goto do_reset; } if (status & INTR_TX_MAC_STATUS) { if (cas_txmac_interrupt(dev, cp, status)) goto do_reset; } if (status & INTR_RX_MAC_STATUS) { if (cas_rxmac_interrupt(dev, cp, status)) goto do_reset; } if (status & INTR_MAC_CTRL_STATUS) { if (cas_mac_interrupt(dev, cp, status)) goto do_reset; } if (status & INTR_MIF_STATUS) { if (cas_mif_interrupt(dev, cp, status)) goto do_reset; } if (status & INTR_PCI_ERROR_STATUS) { if (cas_pci_interrupt(dev, cp, status)) goto do_reset; } return 0; do_reset: #if 1 atomic_inc(&cp->reset_task_pending); atomic_inc(&cp->reset_task_pending_all); netdev_err(dev, "reset called in cas_abnormal_irq [0x%x]\n", status); schedule_work(&cp->reset_task); #else atomic_set(&cp->reset_task_pending, CAS_RESET_ALL); netdev_err(dev, "reset called in cas_abnormal_irq\n"); schedule_work(&cp->reset_task); #endif return 1; } /* NOTE: CAS_TABORT returns 1 or 2 so that it can be used when * determining whether to do a netif_stop/wakeup */ #define CAS_TABORT(x) (((x)->cas_flags & CAS_FLAG_TARGET_ABORT) ? 2 : 1) #define CAS_ROUND_PAGE(x) (((x) + PAGE_SIZE - 1) & PAGE_MASK) static inline int cas_calc_tabort(struct cas *cp, const unsigned long addr, const int len) { unsigned long off = addr + len; if (CAS_TABORT(cp) == 1) return 0; if ((CAS_ROUND_PAGE(off) - off) > TX_TARGET_ABORT_LEN) return 0; return TX_TARGET_ABORT_LEN; } static inline void cas_tx_ringN(struct cas *cp, int ring, int limit) { struct cas_tx_desc *txds; struct sk_buff **skbs; struct net_device *dev = cp->dev; int entry, count; spin_lock(&cp->tx_lock[ring]); txds = cp->init_txds[ring]; skbs = cp->tx_skbs[ring]; entry = cp->tx_old[ring]; count = TX_BUFF_COUNT(ring, entry, limit); while (entry != limit) { struct sk_buff *skb = skbs[entry]; dma_addr_t daddr; u32 dlen; int frag; if (!skb) { /* this should never occur */ entry = TX_DESC_NEXT(ring, entry); continue; } /* however, we might get only a partial skb release. */ count -= skb_shinfo(skb)->nr_frags + + cp->tx_tiny_use[ring][entry].nbufs + 1; if (count < 0) break; netif_printk(cp, tx_done, KERN_DEBUG, cp->dev, "tx[%d] done, slot %d\n", ring, entry); skbs[entry] = NULL; cp->tx_tiny_use[ring][entry].nbufs = 0; for (frag = 0; frag <= skb_shinfo(skb)->nr_frags; frag++) { struct cas_tx_desc *txd = txds + entry; daddr = le64_to_cpu(txd->buffer); dlen = CAS_VAL(TX_DESC_BUFLEN, le64_to_cpu(txd->control)); dma_unmap_page(&cp->pdev->dev, daddr, dlen, DMA_TO_DEVICE); entry = TX_DESC_NEXT(ring, entry); /* tiny buffer may follow */ if (cp->tx_tiny_use[ring][entry].used) { cp->tx_tiny_use[ring][entry].used = 0; entry = TX_DESC_NEXT(ring, entry); } } spin_lock(&cp->stat_lock[ring]); cp->net_stats[ring].tx_packets++; cp->net_stats[ring].tx_bytes += skb->len; spin_unlock(&cp->stat_lock[ring]); dev_consume_skb_irq(skb); } cp->tx_old[ring] = entry; /* this is wrong for multiple tx rings. the net device needs * multiple queues for this to do the right thing. we wait * for 2*packets to be available when using tiny buffers */ if (netif_queue_stopped(dev) && (TX_BUFFS_AVAIL(cp, ring) > CAS_TABORT(cp)*(MAX_SKB_FRAGS + 1))) netif_wake_queue(dev); spin_unlock(&cp->tx_lock[ring]); } static void cas_tx(struct net_device *dev, struct cas *cp, u32 status) { int limit, ring; #ifdef USE_TX_COMPWB u64 compwb = le64_to_cpu(cp->init_block->tx_compwb); #endif netif_printk(cp, intr, KERN_DEBUG, cp->dev, "tx interrupt, status: 0x%x, %llx\n", status, (unsigned long long)compwb); /* process all the rings */ for (ring = 0; ring < N_TX_RINGS; ring++) { #ifdef USE_TX_COMPWB /* use the completion writeback registers */ limit = (CAS_VAL(TX_COMPWB_MSB, compwb) << 8) | CAS_VAL(TX_COMPWB_LSB, compwb); compwb = TX_COMPWB_NEXT(compwb); #else limit = readl(cp->regs + REG_TX_COMPN(ring)); #endif if (cp->tx_old[ring] != limit) cas_tx_ringN(cp, ring, limit); } } static int cas_rx_process_pkt(struct cas *cp, struct cas_rx_comp *rxc, int entry, const u64 *words, struct sk_buff **skbref) { int dlen, hlen, len, i, alloclen; int off, swivel = RX_SWIVEL_OFF_VAL; struct cas_page *page; struct sk_buff *skb; void *crcaddr; __sum16 csum; char *p; hlen = CAS_VAL(RX_COMP2_HDR_SIZE, words[1]); dlen = CAS_VAL(RX_COMP1_DATA_SIZE, words[0]); len = hlen + dlen; if (RX_COPY_ALWAYS || (words[2] & RX_COMP3_SMALL_PKT)) alloclen = len; else alloclen = max(hlen, RX_COPY_MIN); skb = netdev_alloc_skb(cp->dev, alloclen + swivel + cp->crc_size); if (skb == NULL) return -1; *skbref = skb; skb_reserve(skb, swivel); p = skb->data; crcaddr = NULL; if (hlen) { /* always copy header pages */ i = CAS_VAL(RX_COMP2_HDR_INDEX, words[1]); page = cp->rx_pages[CAS_VAL(RX_INDEX_RING, i)][CAS_VAL(RX_INDEX_NUM, i)]; off = CAS_VAL(RX_COMP2_HDR_OFF, words[1]) * 0x100 + swivel; i = hlen; if (!dlen) /* attach FCS */ i += cp->crc_size; dma_sync_single_for_cpu(&cp->pdev->dev, page->dma_addr + off, i, DMA_FROM_DEVICE); memcpy(p, page_address(page->buffer) + off, i); dma_sync_single_for_device(&cp->pdev->dev, page->dma_addr + off, i, DMA_FROM_DEVICE); RX_USED_ADD(page, 0x100); p += hlen; swivel = 0; } if (alloclen < (hlen + dlen)) { skb_frag_t *frag = skb_shinfo(skb)->frags; /* normal or jumbo packets. we use frags */ i = CAS_VAL(RX_COMP1_DATA_INDEX, words[0]); page = cp->rx_pages[CAS_VAL(RX_INDEX_RING, i)][CAS_VAL(RX_INDEX_NUM, i)]; off = CAS_VAL(RX_COMP1_DATA_OFF, words[0]) + swivel; hlen = min(cp->page_size - off, dlen); if (hlen < 0) { netif_printk(cp, rx_err, KERN_DEBUG, cp->dev, "rx page overflow: %d\n", hlen); dev_kfree_skb_irq(skb); return -1; } i = hlen; if (i == dlen) /* attach FCS */ i += cp->crc_size; dma_sync_single_for_cpu(&cp->pdev->dev, page->dma_addr + off, i, DMA_FROM_DEVICE); /* make sure we always copy a header */ swivel = 0; if (p == (char *) skb->data) { /* not split */ memcpy(p, page_address(page->buffer) + off, RX_COPY_MIN); dma_sync_single_for_device(&cp->pdev->dev, page->dma_addr + off, i, DMA_FROM_DEVICE); off += RX_COPY_MIN; swivel = RX_COPY_MIN; RX_USED_ADD(page, cp->mtu_stride); } else { RX_USED_ADD(page, hlen); } skb_put(skb, alloclen); skb_shinfo(skb)->nr_frags++; skb->data_len += hlen - swivel; skb->truesize += hlen - swivel; skb->len += hlen - swivel; skb_frag_fill_page_desc(frag, page->buffer, off, hlen - swivel); __skb_frag_ref(frag); /* any more data? */ if ((words[0] & RX_COMP1_SPLIT_PKT) && ((dlen -= hlen) > 0)) { hlen = dlen; off = 0; i = CAS_VAL(RX_COMP2_NEXT_INDEX, words[1]); page = cp->rx_pages[CAS_VAL(RX_INDEX_RING, i)][CAS_VAL(RX_INDEX_NUM, i)]; dma_sync_single_for_cpu(&cp->pdev->dev, page->dma_addr, hlen + cp->crc_size, DMA_FROM_DEVICE); dma_sync_single_for_device(&cp->pdev->dev, page->dma_addr, hlen + cp->crc_size, DMA_FROM_DEVICE); skb_shinfo(skb)->nr_frags++; skb->data_len += hlen; skb->len += hlen; frag++; skb_frag_fill_page_desc(frag, page->buffer, 0, hlen); __skb_frag_ref(frag); RX_USED_ADD(page, hlen + cp->crc_size); } if (cp->crc_size) crcaddr = page_address(page->buffer) + off + hlen; } else { /* copying packet */ if (!dlen) goto end_copy_pkt; i = CAS_VAL(RX_COMP1_DATA_INDEX, words[0]); page = cp->rx_pages[CAS_VAL(RX_INDEX_RING, i)][CAS_VAL(RX_INDEX_NUM, i)]; off = CAS_VAL(RX_COMP1_DATA_OFF, words[0]) + swivel; hlen = min(cp->page_size - off, dlen); if (hlen < 0) { netif_printk(cp, rx_err, KERN_DEBUG, cp->dev, "rx page overflow: %d\n", hlen); dev_kfree_skb_irq(skb); return -1; } i = hlen; if (i == dlen) /* attach FCS */ i += cp->crc_size; dma_sync_single_for_cpu(&cp->pdev->dev, page->dma_addr + off, i, DMA_FROM_DEVICE); memcpy(p, page_address(page->buffer) + off, i); dma_sync_single_for_device(&cp->pdev->dev, page->dma_addr + off, i, DMA_FROM_DEVICE); if (p == (char *) skb->data) /* not split */ RX_USED_ADD(page, cp->mtu_stride); else RX_USED_ADD(page, i); /* any more data? */ if ((words[0] & RX_COMP1_SPLIT_PKT) && ((dlen -= hlen) > 0)) { p += hlen; i = CAS_VAL(RX_COMP2_NEXT_INDEX, words[1]); page = cp->rx_pages[CAS_VAL(RX_INDEX_RING, i)][CAS_VAL(RX_INDEX_NUM, i)]; dma_sync_single_for_cpu(&cp->pdev->dev, page->dma_addr, dlen + cp->crc_size, DMA_FROM_DEVICE); memcpy(p, page_address(page->buffer), dlen + cp->crc_size); dma_sync_single_for_device(&cp->pdev->dev, page->dma_addr, dlen + cp->crc_size, DMA_FROM_DEVICE); RX_USED_ADD(page, dlen + cp->crc_size); } end_copy_pkt: if (cp->crc_size) crcaddr = skb->data + alloclen; skb_put(skb, alloclen); } csum = (__force __sum16)htons(CAS_VAL(RX_COMP4_TCP_CSUM, words[3])); if (cp->crc_size) { /* checksum includes FCS. strip it out. */ csum = csum_fold(csum_partial(crcaddr, cp->crc_size, csum_unfold(csum))); } skb->protocol = eth_type_trans(skb, cp->dev); if (skb->protocol == htons(ETH_P_IP)) { skb->csum = csum_unfold(~csum); skb->ip_summed = CHECKSUM_COMPLETE; } else skb_checksum_none_assert(skb); return len; } /* we can handle up to 64 rx flows at a time. we do the same thing * as nonreassm except that we batch up the buffers. * NOTE: we currently just treat each flow as a bunch of packets that * we pass up. a better way would be to coalesce the packets * into a jumbo packet. to do that, we need to do the following: * 1) the first packet will have a clean split between header and * data. save both. * 2) each time the next flow packet comes in, extend the * data length and merge the checksums. * 3) on flow release, fix up the header. * 4) make sure the higher layer doesn't care. * because packets get coalesced, we shouldn't run into fragment count * issues. */ static inline void cas_rx_flow_pkt(struct cas *cp, const u64 *words, struct sk_buff *skb) { int flowid = CAS_VAL(RX_COMP3_FLOWID, words[2]) & (N_RX_FLOWS - 1); struct sk_buff_head *flow = &cp->rx_flows[flowid]; /* this is protected at a higher layer, so no need to * do any additional locking here. stick the buffer * at the end. */ __skb_queue_tail(flow, skb); if (words[0] & RX_COMP1_RELEASE_FLOW) { while ((skb = __skb_dequeue(flow))) { cas_skb_release(skb); } } } /* put rx descriptor back on ring. if a buffer is in use by a higher * layer, this will need to put in a replacement. */ static void cas_post_page(struct cas *cp, const int ring, const int index) { cas_page_t *new; int entry; entry = cp->rx_old[ring]; new = cas_page_swap(cp, ring, index); cp->init_rxds[ring][entry].buffer = cpu_to_le64(new->dma_addr); cp->init_rxds[ring][entry].index = cpu_to_le64(CAS_BASE(RX_INDEX_NUM, index) | CAS_BASE(RX_INDEX_RING, ring)); entry = RX_DESC_ENTRY(ring, entry + 1); cp->rx_old[ring] = entry; if (entry % 4) return; if (ring == 0) writel(entry, cp->regs + REG_RX_KICK); else if ((N_RX_DESC_RINGS > 1) && (cp->cas_flags & CAS_FLAG_REG_PLUS)) writel(entry, cp->regs + REG_PLUS_RX_KICK1); } /* only when things are bad */ static int cas_post_rxds_ringN(struct cas *cp, int ring, int num) { unsigned int entry, last, count, released; int cluster; cas_page_t **page = cp->rx_pages[ring]; entry = cp->rx_old[ring]; netif_printk(cp, intr, KERN_DEBUG, cp->dev, "rxd[%d] interrupt, done: %d\n", ring, entry); cluster = -1; count = entry & 0x3; last = RX_DESC_ENTRY(ring, num ? entry + num - 4: entry - 4); released = 0; while (entry != last) { /* make a new buffer if it's still in use */ if (page_count(page[entry]->buffer) > 1) { cas_page_t *new = cas_page_dequeue(cp); if (!new) { /* let the timer know that we need to * do this again */ cp->cas_flags |= CAS_FLAG_RXD_POST(ring); if (!timer_pending(&cp->link_timer)) mod_timer(&cp->link_timer, jiffies + CAS_LINK_FAST_TIMEOUT); cp->rx_old[ring] = entry; cp->rx_last[ring] = num ? num - released : 0; return -ENOMEM; } spin_lock(&cp->rx_inuse_lock); list_add(&page[entry]->list, &cp->rx_inuse_list); spin_unlock(&cp->rx_inuse_lock); cp->init_rxds[ring][entry].buffer = cpu_to_le64(new->dma_addr); page[entry] = new; } if (++count == 4) { cluster = entry; count = 0; } released++; entry = RX_DESC_ENTRY(ring, entry + 1); } cp->rx_old[ring] = entry; if (cluster < 0) return 0; if (ring == 0) writel(cluster, cp->regs + REG_RX_KICK); else if ((N_RX_DESC_RINGS > 1) && (cp->cas_flags & CAS_FLAG_REG_PLUS)) writel(cluster, cp->regs + REG_PLUS_RX_KICK1); return 0; } /* process a completion ring. packets are set up in three basic ways: * small packets: should be copied header + data in single buffer. * large packets: header and data in a single buffer. * split packets: header in a separate buffer from data. * data may be in multiple pages. data may be > 256 * bytes but in a single page. * * NOTE: RX page posting is done in this routine as well. while there's * the capability of using multiple RX completion rings, it isn't * really worthwhile due to the fact that the page posting will * force serialization on the single descriptor ring. */ static int cas_rx_ringN(struct cas *cp, int ring, int budget) { struct cas_rx_comp *rxcs = cp->init_rxcs[ring]; int entry, drops; int npackets = 0; netif_printk(cp, intr, KERN_DEBUG, cp->dev, "rx[%d] interrupt, done: %d/%d\n", ring, readl(cp->regs + REG_RX_COMP_HEAD), cp->rx_new[ring]); entry = cp->rx_new[ring]; drops = 0; while (1) { struct cas_rx_comp *rxc = rxcs + entry; struct sk_buff *skb; int type, len; u64 words[4]; int i, dring; words[0] = le64_to_cpu(rxc->word1); words[1] = le64_to_cpu(rxc->word2); words[2] = le64_to_cpu(rxc->word3); words[3] = le64_to_cpu(rxc->word4); /* don't touch if still owned by hw */ type = CAS_VAL(RX_COMP1_TYPE, words[0]); if (type == 0) break; /* hw hasn't cleared the zero bit yet */ if (words[3] & RX_COMP4_ZERO) { break; } /* get info on the packet */ if (words[3] & (RX_COMP4_LEN_MISMATCH | RX_COMP4_BAD)) { spin_lock(&cp->stat_lock[ring]); cp->net_stats[ring].rx_errors++; if (words[3] & RX_COMP4_LEN_MISMATCH) cp->net_stats[ring].rx_length_errors++; if (words[3] & RX_COMP4_BAD) cp->net_stats[ring].rx_crc_errors++; spin_unlock(&cp->stat_lock[ring]); /* We'll just return it to Cassini. */ drop_it: spin_lock(&cp->stat_lock[ring]); ++cp->net_stats[ring].rx_dropped; spin_unlock(&cp->stat_lock[ring]); goto next; } len = cas_rx_process_pkt(cp, rxc, entry, words, &skb); if (len < 0) { ++drops; goto drop_it; } /* see if it's a flow re-assembly or not. the driver * itself handles release back up. */ if (RX_DONT_BATCH || (type == 0x2)) { /* non-reassm: these always get released */ cas_skb_release(skb); } else { cas_rx_flow_pkt(cp, words, skb); } spin_lock(&cp->stat_lock[ring]); cp->net_stats[ring].rx_packets++; cp->net_stats[ring].rx_bytes += len; spin_unlock(&cp->stat_lock[ring]); next: npackets++; /* should it be released? */ if (words[0] & RX_COMP1_RELEASE_HDR) { i = CAS_VAL(RX_COMP2_HDR_INDEX, words[1]); dring = CAS_VAL(RX_INDEX_RING, i); i = CAS_VAL(RX_INDEX_NUM, i); cas_post_page(cp, dring, i); } if (words[0] & RX_COMP1_RELEASE_DATA) { i = CAS_VAL(RX_COMP1_DATA_INDEX, words[0]); dring = CAS_VAL(RX_INDEX_RING, i); i = CAS_VAL(RX_INDEX_NUM, i); cas_post_page(cp, dring, i); } if (words[0] & RX_COMP1_RELEASE_NEXT) { i = CAS_VAL(RX_COMP2_NEXT_INDEX, words[1]); dring = CAS_VAL(RX_INDEX_RING, i); i = CAS_VAL(RX_INDEX_NUM, i); cas_post_page(cp, dring, i); } /* skip to the next entry */ entry = RX_COMP_ENTRY(ring, entry + 1 + CAS_VAL(RX_COMP1_SKIP, words[0])); #ifdef USE_NAPI if (budget && (npackets >= budget)) break; #endif } cp->rx_new[ring] = entry; if (drops) netdev_info(cp->dev, "Memory squeeze, deferring packet\n"); return npackets; } /* put completion entries back on the ring */ static void cas_post_rxcs_ringN(struct net_device *dev, struct cas *cp, int ring) { struct cas_rx_comp *rxc = cp->init_rxcs[ring]; int last, entry; last = cp->rx_cur[ring]; entry = cp->rx_new[ring]; netif_printk(cp, intr, KERN_DEBUG, dev, "rxc[%d] interrupt, done: %d/%d\n", ring, readl(cp->regs + REG_RX_COMP_HEAD), entry); /* zero and re-mark descriptors */ while (last != entry) { cas_rxc_init(rxc + last); last = RX_COMP_ENTRY(ring, last + 1); } cp->rx_cur[ring] = last; if (ring == 0) writel(last, cp->regs + REG_RX_COMP_TAIL); else if (cp->cas_flags & CAS_FLAG_REG_PLUS) writel(last, cp->regs + REG_PLUS_RX_COMPN_TAIL(ring)); } /* cassini can use all four PCI interrupts for the completion ring. * rings 3 and 4 are identical */ #if defined(USE_PCI_INTC) || defined(USE_PCI_INTD) static inline void cas_handle_irqN(struct net_device *dev, struct cas *cp, const u32 status, const int ring) { if (status & (INTR_RX_COMP_FULL_ALT | INTR_RX_COMP_AF_ALT)) cas_post_rxcs_ringN(dev, cp, ring); } static irqreturn_t cas_interruptN(int irq, void *dev_id) { struct net_device *dev = dev_id; struct cas *cp = netdev_priv(dev); unsigned long flags; int ring = (irq == cp->pci_irq_INTC) ? 2 : 3; u32 status = readl(cp->regs + REG_PLUS_INTRN_STATUS(ring)); /* check for shared irq */ if (status == 0) return IRQ_NONE; spin_lock_irqsave(&cp->lock, flags); if (status & INTR_RX_DONE_ALT) { /* handle rx separately */ #ifdef USE_NAPI cas_mask_intr(cp); napi_schedule(&cp->napi); #else cas_rx_ringN(cp, ring, 0); #endif status &= ~INTR_RX_DONE_ALT; } if (status) cas_handle_irqN(dev, cp, status, ring); spin_unlock_irqrestore(&cp->lock, flags); return IRQ_HANDLED; } #endif #ifdef USE_PCI_INTB /* everything but rx packets */ static inline void cas_handle_irq1(struct cas *cp, const u32 status) { if (status & INTR_RX_BUF_UNAVAIL_1) { /* Frame arrived, no free RX buffers available. * NOTE: we can get this on a link transition. */ cas_post_rxds_ringN(cp, 1, 0); spin_lock(&cp->stat_lock[1]); cp->net_stats[1].rx_dropped++; spin_unlock(&cp->stat_lock[1]); } if (status & INTR_RX_BUF_AE_1) cas_post_rxds_ringN(cp, 1, RX_DESC_RINGN_SIZE(1) - RX_AE_FREEN_VAL(1)); if (status & (INTR_RX_COMP_AF | INTR_RX_COMP_FULL)) cas_post_rxcs_ringN(cp, 1); } /* ring 2 handles a few more events than 3 and 4 */ static irqreturn_t cas_interrupt1(int irq, void *dev_id) { struct net_device *dev = dev_id; struct cas *cp = netdev_priv(dev); unsigned long flags; u32 status = readl(cp->regs + REG_PLUS_INTRN_STATUS(1)); /* check for shared interrupt */ if (status == 0) return IRQ_NONE; spin_lock_irqsave(&cp->lock, flags); if (status & INTR_RX_DONE_ALT) { /* handle rx separately */ #ifdef USE_NAPI cas_mask_intr(cp); napi_schedule(&cp->napi); #else cas_rx_ringN(cp, 1, 0); #endif status &= ~INTR_RX_DONE_ALT; } if (status) cas_handle_irq1(cp, status); spin_unlock_irqrestore(&cp->lock, flags); return IRQ_HANDLED; } #endif static inline void cas_handle_irq(struct net_device *dev, struct cas *cp, const u32 status) { /* housekeeping interrupts */ if (status & INTR_ERROR_MASK) cas_abnormal_irq(dev, cp, status); if (status & INTR_RX_BUF_UNAVAIL) { /* Frame arrived, no free RX buffers available. * NOTE: we can get this on a link transition. */ cas_post_rxds_ringN(cp, 0, 0); spin_lock(&cp->stat_lock[0]); cp->net_stats[0].rx_dropped++; spin_unlock(&cp->stat_lock[0]); } else if (status & INTR_RX_BUF_AE) { cas_post_rxds_ringN(cp, 0, RX_DESC_RINGN_SIZE(0) - RX_AE_FREEN_VAL(0)); } if (status & (INTR_RX_COMP_AF | INTR_RX_COMP_FULL)) cas_post_rxcs_ringN(dev, cp, 0); } static irqreturn_t cas_interrupt(int irq, void *dev_id) { struct net_device *dev = dev_id; struct cas *cp = netdev_priv(dev); unsigned long flags; u32 status = readl(cp->regs + REG_INTR_STATUS); if (status == 0) return IRQ_NONE; spin_lock_irqsave(&cp->lock, flags); if (status & (INTR_TX_ALL | INTR_TX_INTME)) { cas_tx(dev, cp, status); status &= ~(INTR_TX_ALL | INTR_TX_INTME); } if (status & INTR_RX_DONE) { #ifdef USE_NAPI cas_mask_intr(cp); napi_schedule(&cp->napi); #else cas_rx_ringN(cp, 0, 0); #endif status &= ~INTR_RX_DONE; } if (status) cas_handle_irq(dev, cp, status); spin_unlock_irqrestore(&cp->lock, flags); return IRQ_HANDLED; } #ifdef USE_NAPI static int cas_poll(struct napi_struct *napi, int budget) { struct cas *cp = container_of(napi, struct cas, napi); struct net_device *dev = cp->dev; int i, enable_intr, credits; u32 status = readl(cp->regs + REG_INTR_STATUS); unsigned long flags; spin_lock_irqsave(&cp->lock, flags); cas_tx(dev, cp, status); spin_unlock_irqrestore(&cp->lock, flags); /* NAPI rx packets. we spread the credits across all of the * rxc rings * * to make sure we're fair with the work we loop through each * ring N_RX_COMP_RING times with a request of * budget / N_RX_COMP_RINGS */ enable_intr = 1; credits = 0; for (i = 0; i < N_RX_COMP_RINGS; i++) { int j; for (j = 0; j < N_RX_COMP_RINGS; j++) { credits += cas_rx_ringN(cp, j, budget / N_RX_COMP_RINGS); if (credits >= budget) { enable_intr = 0; goto rx_comp; } } } rx_comp: /* final rx completion */ spin_lock_irqsave(&cp->lock, flags); if (status) cas_handle_irq(dev, cp, status); #ifdef USE_PCI_INTB if (N_RX_COMP_RINGS > 1) { status = readl(cp->regs + REG_PLUS_INTRN_STATUS(1)); if (status) cas_handle_irq1(dev, cp, status); } #endif #ifdef USE_PCI_INTC if (N_RX_COMP_RINGS > 2) { status = readl(cp->regs + REG_PLUS_INTRN_STATUS(2)); if (status) cas_handle_irqN(dev, cp, status, 2); } #endif #ifdef USE_PCI_INTD if (N_RX_COMP_RINGS > 3) { status = readl(cp->regs + REG_PLUS_INTRN_STATUS(3)); if (status) cas_handle_irqN(dev, cp, status, 3); } #endif spin_unlock_irqrestore(&cp->lock, flags); if (enable_intr) { napi_complete(napi); cas_unmask_intr(cp); } return credits; } #endif #ifdef CONFIG_NET_POLL_CONTROLLER static void cas_netpoll(struct net_device *dev) { struct cas *cp = netdev_priv(dev); cas_disable_irq(cp, 0); cas_interrupt(cp->pdev->irq, dev); cas_enable_irq(cp, 0); #ifdef USE_PCI_INTB if (N_RX_COMP_RINGS > 1) { /* cas_interrupt1(); */ } #endif #ifdef USE_PCI_INTC if (N_RX_COMP_RINGS > 2) { /* cas_interruptN(); */ } #endif #ifdef USE_PCI_INTD if (N_RX_COMP_RINGS > 3) { /* cas_interruptN(); */ } #endif } #endif static void cas_tx_timeout(struct net_device *dev, unsigned int txqueue) { struct cas *cp = netdev_priv(dev); netdev_err(dev, "transmit timed out, resetting\n"); if (!cp->hw_running) { netdev_err(dev, "hrm.. hw not running!\n"); return; } netdev_err(dev, "MIF_STATE[%08x]\n", readl(cp->regs + REG_MIF_STATE_MACHINE)); netdev_err(dev, "MAC_STATE[%08x]\n", readl(cp->regs + REG_MAC_STATE_MACHINE)); netdev_err(dev, "TX_STATE[%08x:%08x:%08x] FIFO[%08x:%08x:%08x] SM1[%08x] SM2[%08x]\n", readl(cp->regs + REG_TX_CFG), readl(cp->regs + REG_MAC_TX_STATUS), readl(cp->regs + REG_MAC_TX_CFG), readl(cp->regs + REG_TX_FIFO_PKT_CNT), readl(cp->regs + REG_TX_FIFO_WRITE_PTR), readl(cp->regs + REG_TX_FIFO_READ_PTR), readl(cp->regs + REG_TX_SM_1), readl(cp->regs + REG_TX_SM_2)); netdev_err(dev, "RX_STATE[%08x:%08x:%08x]\n", readl(cp->regs + REG_RX_CFG), readl(cp->regs + REG_MAC_RX_STATUS), readl(cp->regs + REG_MAC_RX_CFG)); netdev_err(dev, "HP_STATE[%08x:%08x:%08x:%08x]\n", readl(cp->regs + REG_HP_STATE_MACHINE), readl(cp->regs + REG_HP_STATUS0), readl(cp->regs + REG_HP_STATUS1), readl(cp->regs + REG_HP_STATUS2)); #if 1 atomic_inc(&cp->reset_task_pending); atomic_inc(&cp->reset_task_pending_all); schedule_work(&cp->reset_task); #else atomic_set(&cp->reset_task_pending, CAS_RESET_ALL); schedule_work(&cp->reset_task); #endif } static inline int cas_intme(int ring, int entry) { /* Algorithm: IRQ every 1/2 of descriptors. */ if (!(entry & ((TX_DESC_RINGN_SIZE(ring) >> 1) - 1))) return 1; return 0; } static void cas_write_txd(struct cas *cp, int ring, int entry, dma_addr_t mapping, int len, u64 ctrl, int last) { struct cas_tx_desc *txd = cp->init_txds[ring] + entry; ctrl |= CAS_BASE(TX_DESC_BUFLEN, len); if (cas_intme(ring, entry)) ctrl |= TX_DESC_INTME; if (last) ctrl |= TX_DESC_EOF; txd->control = cpu_to_le64(ctrl); txd->buffer = cpu_to_le64(mapping); } static inline void *tx_tiny_buf(struct cas *cp, const int ring, const int entry) { return cp->tx_tiny_bufs[ring] + TX_TINY_BUF_LEN*entry; } static inline dma_addr_t tx_tiny_map(struct cas *cp, const int ring, const int entry, const int tentry) { cp->tx_tiny_use[ring][tentry].nbufs++; cp->tx_tiny_use[ring][entry].used = 1; return cp->tx_tiny_dvma[ring] + TX_TINY_BUF_LEN*entry; } static inline int cas_xmit_tx_ringN(struct cas *cp, int ring, struct sk_buff *skb) { struct net_device *dev = cp->dev; int entry, nr_frags, frag, tabort, tentry; dma_addr_t mapping; unsigned long flags; u64 ctrl; u32 len; spin_lock_irqsave(&cp->tx_lock[ring], flags); /* This is a hard error, log it. */ if (TX_BUFFS_AVAIL(cp, ring) <= CAS_TABORT(cp)*(skb_shinfo(skb)->nr_frags + 1)) { netif_stop_queue(dev); spin_unlock_irqrestore(&cp->tx_lock[ring], flags); netdev_err(dev, "BUG! Tx Ring full when queue awake!\n"); return 1; } ctrl = 0; if (skb->ip_summed == CHECKSUM_PARTIAL) { const u64 csum_start_off = skb_checksum_start_offset(skb); const u64 csum_stuff_off = csum_start_off + skb->csum_offset; ctrl = TX_DESC_CSUM_EN | CAS_BASE(TX_DESC_CSUM_START, csum_start_off) | CAS_BASE(TX_DESC_CSUM_STUFF, csum_stuff_off); } entry = cp->tx_new[ring]; cp->tx_skbs[ring][entry] = skb; nr_frags = skb_shinfo(skb)->nr_frags; len = skb_headlen(skb); mapping = dma_map_page(&cp->pdev->dev, virt_to_page(skb->data), offset_in_page(skb->data), len, DMA_TO_DEVICE); tentry = entry; tabort = cas_calc_tabort(cp, (unsigned long) skb->data, len); if (unlikely(tabort)) { /* NOTE: len is always > tabort */ cas_write_txd(cp, ring, entry, mapping, len - tabort, ctrl | TX_DESC_SOF, 0); entry = TX_DESC_NEXT(ring, entry); skb_copy_from_linear_data_offset(skb, len - tabort, tx_tiny_buf(cp, ring, entry), tabort); mapping = tx_tiny_map(cp, ring, entry, tentry); cas_write_txd(cp, ring, entry, mapping, tabort, ctrl, (nr_frags == 0)); } else { cas_write_txd(cp, ring, entry, mapping, len, ctrl | TX_DESC_SOF, (nr_frags == 0)); } entry = TX_DESC_NEXT(ring, entry); for (frag = 0; frag < nr_frags; frag++) { const skb_frag_t *fragp = &skb_shinfo(skb)->frags[frag]; len = skb_frag_size(fragp); mapping = skb_frag_dma_map(&cp->pdev->dev, fragp, 0, len, DMA_TO_DEVICE); tabort = cas_calc_tabort(cp, skb_frag_off(fragp), len); if (unlikely(tabort)) { /* NOTE: len is always > tabort */ cas_write_txd(cp, ring, entry, mapping, len - tabort, ctrl, 0); entry = TX_DESC_NEXT(ring, entry); memcpy_from_page(tx_tiny_buf(cp, ring, entry), skb_frag_page(fragp), skb_frag_off(fragp) + len - tabort, tabort); mapping = tx_tiny_map(cp, ring, entry, tentry); len = tabort; } cas_write_txd(cp, ring, entry, mapping, len, ctrl, (frag + 1 == nr_frags)); entry = TX_DESC_NEXT(ring, entry); } cp->tx_new[ring] = entry; if (TX_BUFFS_AVAIL(cp, ring) <= CAS_TABORT(cp)*(MAX_SKB_FRAGS + 1)) netif_stop_queue(dev); netif_printk(cp, tx_queued, KERN_DEBUG, dev, "tx[%d] queued, slot %d, skblen %d, avail %d\n", ring, entry, skb->len, TX_BUFFS_AVAIL(cp, ring)); writel(entry, cp->regs + REG_TX_KICKN(ring)); spin_unlock_irqrestore(&cp->tx_lock[ring], flags); return 0; } static netdev_tx_t cas_start_xmit(struct sk_buff *skb, struct net_device *dev) { struct cas *cp = netdev_priv(dev); /* this is only used as a load-balancing hint, so it doesn't * need to be SMP safe */ static int ring; if (skb_padto(skb, cp->min_frame_size)) return NETDEV_TX_OK; /* XXX: we need some higher-level QoS hooks to steer packets to * individual queues. */ if (cas_xmit_tx_ringN(cp, ring++ & N_TX_RINGS_MASK, skb)) return NETDEV_TX_BUSY; return NETDEV_TX_OK; } static void cas_init_tx_dma(struct cas *cp) { u64 desc_dma = cp->block_dvma; unsigned long off; u32 val; int i; /* set up tx completion writeback registers. must be 8-byte aligned */ #ifdef USE_TX_COMPWB off = offsetof(struct cas_init_block, tx_compwb); writel((desc_dma + off) >> 32, cp->regs + REG_TX_COMPWB_DB_HI); writel((desc_dma + off) & 0xffffffff, cp->regs + REG_TX_COMPWB_DB_LOW); #endif /* enable completion writebacks, enable paced mode, * disable read pipe, and disable pre-interrupt compwbs */ val = TX_CFG_COMPWB_Q1 | TX_CFG_COMPWB_Q2 | TX_CFG_COMPWB_Q3 | TX_CFG_COMPWB_Q4 | TX_CFG_DMA_RDPIPE_DIS | TX_CFG_PACED_MODE | TX_CFG_INTR_COMPWB_DIS; /* write out tx ring info and tx desc bases */ for (i = 0; i < MAX_TX_RINGS; i++) { off = (unsigned long) cp->init_txds[i] - (unsigned long) cp->init_block; val |= CAS_TX_RINGN_BASE(i); writel((desc_dma + off) >> 32, cp->regs + REG_TX_DBN_HI(i)); writel((desc_dma + off) & 0xffffffff, cp->regs + REG_TX_DBN_LOW(i)); /* don't zero out the kick register here as the system * will wedge */ } writel(val, cp->regs + REG_TX_CFG); /* program max burst sizes. these numbers should be different * if doing QoS. */ #ifdef USE_QOS writel(0x800, cp->regs + REG_TX_MAXBURST_0); writel(0x1600, cp->regs + REG_TX_MAXBURST_1); writel(0x2400, cp->regs + REG_TX_MAXBURST_2); writel(0x4800, cp->regs + REG_TX_MAXBURST_3); #else writel(0x800, cp->regs + REG_TX_MAXBURST_0); writel(0x800, cp->regs + REG_TX_MAXBURST_1); writel(0x800, cp->regs + REG_TX_MAXBURST_2); writel(0x800, cp->regs + REG_TX_MAXBURST_3); #endif } /* Must be invoked under cp->lock. */ static inline void cas_init_dma(struct cas *cp) { cas_init_tx_dma(cp); cas_init_rx_dma(cp); } static void cas_process_mc_list(struct cas *cp) { u16 hash_table[16]; u32 crc; struct netdev_hw_addr *ha; int i = 1; memset(hash_table, 0, sizeof(hash_table)); netdev_for_each_mc_addr(ha, cp->dev) { if (i <= CAS_MC_EXACT_MATCH_SIZE) { /* use the alternate mac address registers for the * first 15 multicast addresses */ writel((ha->addr[4] << 8) | ha->addr[5], cp->regs + REG_MAC_ADDRN(i*3 + 0)); writel((ha->addr[2] << 8) | ha->addr[3], cp->regs + REG_MAC_ADDRN(i*3 + 1)); writel((ha->addr[0] << 8) | ha->addr[1], cp->regs + REG_MAC_ADDRN(i*3 + 2)); i++; } else { /* use hw hash table for the next series of * multicast addresses */ crc = ether_crc_le(ETH_ALEN, ha->addr); crc >>= 24; hash_table[crc >> 4] |= 1 << (15 - (crc & 0xf)); } } for (i = 0; i < 16; i++) writel(hash_table[i], cp->regs + REG_MAC_HASH_TABLEN(i)); } /* Must be invoked under cp->lock. */ static u32 cas_setup_multicast(struct cas *cp) { u32 rxcfg = 0; int i; if (cp->dev->flags & IFF_PROMISC) { rxcfg |= MAC_RX_CFG_PROMISC_EN; } else if (cp->dev->flags & IFF_ALLMULTI) { for (i=0; i < 16; i++) writel(0xFFFF, cp->regs + REG_MAC_HASH_TABLEN(i)); rxcfg |= MAC_RX_CFG_HASH_FILTER_EN; } else { cas_process_mc_list(cp); rxcfg |= MAC_RX_CFG_HASH_FILTER_EN; } return rxcfg; } /* must be invoked under cp->stat_lock[N_TX_RINGS] */ static void cas_clear_mac_err(struct cas *cp) { writel(0, cp->regs + REG_MAC_COLL_NORMAL); writel(0, cp->regs + REG_MAC_COLL_FIRST); writel(0, cp->regs + REG_MAC_COLL_EXCESS); writel(0, cp->regs + REG_MAC_COLL_LATE); writel(0, cp->regs + REG_MAC_TIMER_DEFER); writel(0, cp->regs + REG_MAC_ATTEMPTS_PEAK); writel(0, cp->regs + REG_MAC_RECV_FRAME); writel(0, cp->regs + REG_MAC_LEN_ERR); writel(0, cp->regs + REG_MAC_ALIGN_ERR); writel(0, cp->regs + REG_MAC_FCS_ERR); writel(0, cp->regs + REG_MAC_RX_CODE_ERR); } static void cas_mac_reset(struct cas *cp) { int i; /* do both TX and RX reset */ writel(0x1, cp->regs + REG_MAC_TX_RESET); writel(0x1, cp->regs + REG_MAC_RX_RESET); /* wait for TX */ i = STOP_TRIES; while (i-- > 0) { if (readl(cp->regs + REG_MAC_TX_RESET) == 0) break; udelay(10); } /* wait for RX */ i = STOP_TRIES; while (i-- > 0) { if (readl(cp->regs + REG_MAC_RX_RESET) == 0) break; udelay(10); } if (readl(cp->regs + REG_MAC_TX_RESET) | readl(cp->regs + REG_MAC_RX_RESET)) netdev_err(cp->dev, "mac tx[%d]/rx[%d] reset failed [%08x]\n", readl(cp->regs + REG_MAC_TX_RESET), readl(cp->regs + REG_MAC_RX_RESET), readl(cp->regs + REG_MAC_STATE_MACHINE)); } /* Must be invoked under cp->lock. */ static void cas_init_mac(struct cas *cp) { const unsigned char *e = &cp->dev->dev_addr[0]; int i; cas_mac_reset(cp); /* setup core arbitration weight register */ writel(CAWR_RR_DIS, cp->regs + REG_CAWR); #if !defined(CONFIG_SPARC64) && !defined(CONFIG_ALPHA) /* set the infinite burst register for chips that don't have * pci issues. */ if ((cp->cas_flags & CAS_FLAG_TARGET_ABORT) == 0) writel(INF_BURST_EN, cp->regs + REG_INF_BURST); #endif writel(0x1BF0, cp->regs + REG_MAC_SEND_PAUSE); writel(0x00, cp->regs + REG_MAC_IPG0); writel(0x08, cp->regs + REG_MAC_IPG1); writel(0x04, cp->regs + REG_MAC_IPG2); /* change later for 802.3z */ writel(0x40, cp->regs + REG_MAC_SLOT_TIME); /* min frame + FCS */ writel(ETH_ZLEN + 4, cp->regs + REG_MAC_FRAMESIZE_MIN); /* Ethernet payload + header + FCS + optional VLAN tag. NOTE: we * specify the maximum frame size to prevent RX tag errors on * oversized frames. */ writel(CAS_BASE(MAC_FRAMESIZE_MAX_BURST, 0x2000) | CAS_BASE(MAC_FRAMESIZE_MAX_FRAME, (CAS_MAX_MTU + ETH_HLEN + 4 + 4)), cp->regs + REG_MAC_FRAMESIZE_MAX); /* NOTE: crc_size is used as a surrogate for half-duplex. * workaround saturn half-duplex issue by increasing preamble * size to 65 bytes. */ if ((cp->cas_flags & CAS_FLAG_SATURN) && cp->crc_size) writel(0x41, cp->regs + REG_MAC_PA_SIZE); else writel(0x07, cp->regs + REG_MAC_PA_SIZE); writel(0x04, cp->regs + REG_MAC_JAM_SIZE); writel(0x10, cp->regs + REG_MAC_ATTEMPT_LIMIT); writel(0x8808, cp->regs + REG_MAC_CTRL_TYPE); writel((e[5] | (e[4] << 8)) & 0x3ff, cp->regs + REG_MAC_RANDOM_SEED); writel(0, cp->regs + REG_MAC_ADDR_FILTER0); writel(0, cp->regs + REG_MAC_ADDR_FILTER1); writel(0, cp->regs + REG_MAC_ADDR_FILTER2); writel(0, cp->regs + REG_MAC_ADDR_FILTER2_1_MASK); writel(0, cp->regs + REG_MAC_ADDR_FILTER0_MASK); /* setup mac address in perfect filter array */ for (i = 0; i < 45; i++) writel(0x0, cp->regs + REG_MAC_ADDRN(i)); writel((e[4] << 8) | e[5], cp->regs + REG_MAC_ADDRN(0)); writel((e[2] << 8) | e[3], cp->regs + REG_MAC_ADDRN(1)); writel((e[0] << 8) | e[1], cp->regs + REG_MAC_ADDRN(2)); writel(0x0001, cp->regs + REG_MAC_ADDRN(42)); writel(0xc200, cp->regs + REG_MAC_ADDRN(43)); writel(0x0180, cp->regs + REG_MAC_ADDRN(44)); cp->mac_rx_cfg = cas_setup_multicast(cp); spin_lock(&cp->stat_lock[N_TX_RINGS]); cas_clear_mac_err(cp); spin_unlock(&cp->stat_lock[N_TX_RINGS]); /* Setup MAC interrupts. We want to get all of the interesting * counter expiration events, but we do not want to hear about * normal rx/tx as the DMA engine tells us that. */ writel(MAC_TX_FRAME_XMIT, cp->regs + REG_MAC_TX_MASK); writel(MAC_RX_FRAME_RECV, cp->regs + REG_MAC_RX_MASK); /* Don't enable even the PAUSE interrupts for now, we * make no use of those events other than to record them. */ writel(0xffffffff, cp->regs + REG_MAC_CTRL_MASK); } /* Must be invoked under cp->lock. */ static void cas_init_pause_thresholds(struct cas *cp) { /* Calculate pause thresholds. Setting the OFF threshold to the * full RX fifo size effectively disables PAUSE generation */ if (cp->rx_fifo_size <= (2 * 1024)) { cp->rx_pause_off = cp->rx_pause_on = cp->rx_fifo_size; } else { int max_frame = (cp->dev->mtu + ETH_HLEN + 4 + 4 + 64) & ~63; if (max_frame * 3 > cp->rx_fifo_size) { cp->rx_pause_off = 7104; cp->rx_pause_on = 960; } else { int off = (cp->rx_fifo_size - (max_frame * 2)); int on = off - max_frame; cp->rx_pause_off = off; cp->rx_pause_on = on; } } } static int cas_vpd_match(const void __iomem *p, const char *str) { int len = strlen(str) + 1; int i; for (i = 0; i < len; i++) { if (readb(p + i) != str[i]) return 0; } return 1; } /* get the mac address by reading the vpd information in the rom. * also get the phy type and determine if there's an entropy generator. * NOTE: this is a bit convoluted for the following reasons: * 1) vpd info has order-dependent mac addresses for multinic cards * 2) the only way to determine the nic order is to use the slot * number. * 3) fiber cards don't have bridges, so their slot numbers don't * mean anything. * 4) we don't actually know we have a fiber card until after * the mac addresses are parsed. */ static int cas_get_vpd_info(struct cas *cp, unsigned char *dev_addr, const int offset) { void __iomem *p = cp->regs + REG_EXPANSION_ROM_RUN_START; void __iomem *base, *kstart; int i, len; int found = 0; #define VPD_FOUND_MAC 0x01 #define VPD_FOUND_PHY 0x02 int phy_type = CAS_PHY_MII_MDIO0; /* default phy type */ int mac_off = 0; #if defined(CONFIG_SPARC) const unsigned char *addr; #endif /* give us access to the PROM */ writel(BIM_LOCAL_DEV_PROM | BIM_LOCAL_DEV_PAD, cp->regs + REG_BIM_LOCAL_DEV_EN); /* check for an expansion rom */ if (readb(p) != 0x55 || readb(p + 1) != 0xaa) goto use_random_mac_addr; /* search for beginning of vpd */ base = NULL; for (i = 2; i < EXPANSION_ROM_SIZE; i++) { /* check for PCIR */ if ((readb(p + i + 0) == 0x50) && (readb(p + i + 1) == 0x43) && (readb(p + i + 2) == 0x49) && (readb(p + i + 3) == 0x52)) { base = p + (readb(p + i + 8) | (readb(p + i + 9) << 8)); break; } } if (!base || (readb(base) != 0x82)) goto use_random_mac_addr; i = (readb(base + 1) | (readb(base + 2) << 8)) + 3; while (i < EXPANSION_ROM_SIZE) { if (readb(base + i) != 0x90) /* no vpd found */ goto use_random_mac_addr; /* found a vpd field */ len = readb(base + i + 1) | (readb(base + i + 2) << 8); /* extract keywords */ kstart = base + i + 3; p = kstart; while ((p - kstart) < len) { int klen = readb(p + 2); int j; char type; p += 3; /* look for the following things: * -- correct length == 29 * 3 (type) + 2 (size) + * 18 (strlen("local-mac-address") + 1) + * 6 (mac addr) * -- VPD Instance 'I' * -- VPD Type Bytes 'B' * -- VPD data length == 6 * -- property string == local-mac-address * * -- correct length == 24 * 3 (type) + 2 (size) + * 12 (strlen("entropy-dev") + 1) + * 7 (strlen("vms110") + 1) * -- VPD Instance 'I' * -- VPD Type String 'B' * -- VPD data length == 7 * -- property string == entropy-dev * * -- correct length == 18 * 3 (type) + 2 (size) + * 9 (strlen("phy-type") + 1) + * 4 (strlen("pcs") + 1) * -- VPD Instance 'I' * -- VPD Type String 'S' * -- VPD data length == 4 * -- property string == phy-type * * -- correct length == 23 * 3 (type) + 2 (size) + * 14 (strlen("phy-interface") + 1) + * 4 (strlen("pcs") + 1) * -- VPD Instance 'I' * -- VPD Type String 'S' * -- VPD data length == 4 * -- property string == phy-interface */ if (readb(p) != 'I') goto next; /* finally, check string and length */ type = readb(p + 3); if (type == 'B') { if ((klen == 29) && readb(p + 4) == 6 && cas_vpd_match(p + 5, "local-mac-address")) { if (mac_off++ > offset) goto next; /* set mac address */ for (j = 0; j < 6; j++) dev_addr[j] = readb(p + 23 + j); goto found_mac; } } if (type != 'S') goto next; #ifdef USE_ENTROPY_DEV if ((klen == 24) && cas_vpd_match(p + 5, "entropy-dev") && cas_vpd_match(p + 17, "vms110")) { cp->cas_flags |= CAS_FLAG_ENTROPY_DEV; goto next; } #endif if (found & VPD_FOUND_PHY) goto next; if ((klen == 18) && readb(p + 4) == 4 && cas_vpd_match(p + 5, "phy-type")) { if (cas_vpd_match(p + 14, "pcs")) { phy_type = CAS_PHY_SERDES; goto found_phy; } } if ((klen == 23) && readb(p + 4) == 4 && cas_vpd_match(p + 5, "phy-interface")) { if (cas_vpd_match(p + 19, "pcs")) { phy_type = CAS_PHY_SERDES; goto found_phy; } } found_mac: found |= VPD_FOUND_MAC; goto next; found_phy: found |= VPD_FOUND_PHY; next: p += klen; } i += len + 3; } use_random_mac_addr: if (found & VPD_FOUND_MAC) goto done; #if defined(CONFIG_SPARC) addr = of_get_property(cp->of_node, "local-mac-address", NULL); if (addr != NULL) { memcpy(dev_addr, addr, ETH_ALEN); goto done; } #endif /* Sun MAC prefix then 3 random bytes. */ pr_info("MAC address not found in ROM VPD\n"); dev_addr[0] = 0x08; dev_addr[1] = 0x00; dev_addr[2] = 0x20; get_random_bytes(dev_addr + 3, 3); done: writel(0, cp->regs + REG_BIM_LOCAL_DEV_EN); return phy_type; } /* check pci invariants */ static void cas_check_pci_invariants(struct cas *cp) { struct pci_dev *pdev = cp->pdev; cp->cas_flags = 0; if ((pdev->vendor == PCI_VENDOR_ID_SUN) && (pdev->device == PCI_DEVICE_ID_SUN_CASSINI)) { if (pdev->revision >= CAS_ID_REVPLUS) cp->cas_flags |= CAS_FLAG_REG_PLUS; if (pdev->revision < CAS_ID_REVPLUS02u) cp->cas_flags |= CAS_FLAG_TARGET_ABORT; /* Original Cassini supports HW CSUM, but it's not * enabled by default as it can trigger TX hangs. */ if (pdev->revision < CAS_ID_REV2) cp->cas_flags |= CAS_FLAG_NO_HW_CSUM; } else { /* Only sun has original cassini chips. */ cp->cas_flags |= CAS_FLAG_REG_PLUS; /* We use a flag because the same phy might be externally * connected. */ if ((pdev->vendor == PCI_VENDOR_ID_NS) && (pdev->device == PCI_DEVICE_ID_NS_SATURN)) cp->cas_flags |= CAS_FLAG_SATURN; } } static int cas_check_invariants(struct cas *cp) { struct pci_dev *pdev = cp->pdev; u8 addr[ETH_ALEN]; u32 cfg; int i; /* get page size for rx buffers. */ cp->page_order = 0; #ifdef USE_PAGE_ORDER if (PAGE_SHIFT < CAS_JUMBO_PAGE_SHIFT) { /* see if we can allocate larger pages */ struct page *page = alloc_pages(GFP_ATOMIC, CAS_JUMBO_PAGE_SHIFT - PAGE_SHIFT); if (page) { __free_pages(page, CAS_JUMBO_PAGE_SHIFT - PAGE_SHIFT); cp->page_order = CAS_JUMBO_PAGE_SHIFT - PAGE_SHIFT; } else { printk("MTU limited to %d bytes\n", CAS_MAX_MTU); } } #endif cp->page_size = (PAGE_SIZE << cp->page_order); /* Fetch the FIFO configurations. */ cp->tx_fifo_size = readl(cp->regs + REG_TX_FIFO_SIZE) * 64; cp->rx_fifo_size = RX_FIFO_SIZE; /* finish phy determination. MDIO1 takes precedence over MDIO0 if * they're both connected. */ cp->phy_type = cas_get_vpd_info(cp, addr, PCI_SLOT(pdev->devfn)); eth_hw_addr_set(cp->dev, addr); if (cp->phy_type & CAS_PHY_SERDES) { cp->cas_flags |= CAS_FLAG_1000MB_CAP; return 0; /* no more checking needed */ } /* MII */ cfg = readl(cp->regs + REG_MIF_CFG); if (cfg & MIF_CFG_MDIO_1) { cp->phy_type = CAS_PHY_MII_MDIO1; } else if (cfg & MIF_CFG_MDIO_0) { cp->phy_type = CAS_PHY_MII_MDIO0; } cas_mif_poll(cp, 0); writel(PCS_DATAPATH_MODE_MII, cp->regs + REG_PCS_DATAPATH_MODE); for (i = 0; i < 32; i++) { u32 phy_id; int j; for (j = 0; j < 3; j++) { cp->phy_addr = i; phy_id = cas_phy_read(cp, MII_PHYSID1) << 16; phy_id |= cas_phy_read(cp, MII_PHYSID2); if (phy_id && (phy_id != 0xFFFFFFFF)) { cp->phy_id = phy_id; goto done; } } } pr_err("MII phy did not respond [%08x]\n", readl(cp->regs + REG_MIF_STATE_MACHINE)); return -1; done: /* see if we can do gigabit */ cfg = cas_phy_read(cp, MII_BMSR); if ((cfg & CAS_BMSR_1000_EXTEND) && cas_phy_read(cp, CAS_MII_1000_EXTEND)) cp->cas_flags |= CAS_FLAG_1000MB_CAP; return 0; } /* Must be invoked under cp->lock. */ static inline void cas_start_dma(struct cas *cp) { int i; u32 val; int txfailed = 0; /* enable dma */ val = readl(cp->regs + REG_TX_CFG) | TX_CFG_DMA_EN; writel(val, cp->regs + REG_TX_CFG); val = readl(cp->regs + REG_RX_CFG) | RX_CFG_DMA_EN; writel(val, cp->regs + REG_RX_CFG); /* enable the mac */ val = readl(cp->regs + REG_MAC_TX_CFG) | MAC_TX_CFG_EN; writel(val, cp->regs + REG_MAC_TX_CFG); val = readl(cp->regs + REG_MAC_RX_CFG) | MAC_RX_CFG_EN; writel(val, cp->regs + REG_MAC_RX_CFG); i = STOP_TRIES; while (i-- > 0) { val = readl(cp->regs + REG_MAC_TX_CFG); if ((val & MAC_TX_CFG_EN)) break; udelay(10); } if (i < 0) txfailed = 1; i = STOP_TRIES; while (i-- > 0) { val = readl(cp->regs + REG_MAC_RX_CFG); if ((val & MAC_RX_CFG_EN)) { if (txfailed) { netdev_err(cp->dev, "enabling mac failed [tx:%08x:%08x]\n", readl(cp->regs + REG_MIF_STATE_MACHINE), readl(cp->regs + REG_MAC_STATE_MACHINE)); } goto enable_rx_done; } udelay(10); } netdev_err(cp->dev, "enabling mac failed [%s:%08x:%08x]\n", (txfailed ? "tx,rx" : "rx"), readl(cp->regs + REG_MIF_STATE_MACHINE), readl(cp->regs + REG_MAC_STATE_MACHINE)); enable_rx_done: cas_unmask_intr(cp); /* enable interrupts */ writel(RX_DESC_RINGN_SIZE(0) - 4, cp->regs + REG_RX_KICK); writel(0, cp->regs + REG_RX_COMP_TAIL); if (cp->cas_flags & CAS_FLAG_REG_PLUS) { if (N_RX_DESC_RINGS > 1) writel(RX_DESC_RINGN_SIZE(1) - 4, cp->regs + REG_PLUS_RX_KICK1); } } /* Must be invoked under cp->lock. */ static void cas_read_pcs_link_mode(struct cas *cp, int *fd, int *spd, int *pause) { u32 val = readl(cp->regs + REG_PCS_MII_LPA); *fd = (val & PCS_MII_LPA_FD) ? 1 : 0; *pause = (val & PCS_MII_LPA_SYM_PAUSE) ? 0x01 : 0x00; if (val & PCS_MII_LPA_ASYM_PAUSE) *pause |= 0x10; *spd = 1000; } /* Must be invoked under cp->lock. */ static void cas_read_mii_link_mode(struct cas *cp, int *fd, int *spd, int *pause) { u32 val; *fd = 0; *spd = 10; *pause = 0; /* use GMII registers */ val = cas_phy_read(cp, MII_LPA); if (val & CAS_LPA_PAUSE) *pause = 0x01; if (val & CAS_LPA_ASYM_PAUSE) *pause |= 0x10; if (val & LPA_DUPLEX) *fd = 1; if (val & LPA_100) *spd = 100; if (cp->cas_flags & CAS_FLAG_1000MB_CAP) { val = cas_phy_read(cp, CAS_MII_1000_STATUS); if (val & (CAS_LPA_1000FULL | CAS_LPA_1000HALF)) *spd = 1000; if (val & CAS_LPA_1000FULL) *fd = 1; } } /* A link-up condition has occurred, initialize and enable the * rest of the chip. * * Must be invoked under cp->lock. */ static void cas_set_link_modes(struct cas *cp) { u32 val; int full_duplex, speed, pause; full_duplex = 0; speed = 10; pause = 0; if (CAS_PHY_MII(cp->phy_type)) { cas_mif_poll(cp, 0); val = cas_phy_read(cp, MII_BMCR); if (val & BMCR_ANENABLE) { cas_read_mii_link_mode(cp, &full_duplex, &speed, &pause); } else { if (val & BMCR_FULLDPLX) full_duplex = 1; if (val & BMCR_SPEED100) speed = 100; else if (val & CAS_BMCR_SPEED1000) speed = (cp->cas_flags & CAS_FLAG_1000MB_CAP) ? 1000 : 100; } cas_mif_poll(cp, 1); } else { val = readl(cp->regs + REG_PCS_MII_CTRL); cas_read_pcs_link_mode(cp, &full_duplex, &speed, &pause); if ((val & PCS_MII_AUTONEG_EN) == 0) { if (val & PCS_MII_CTRL_DUPLEX) full_duplex = 1; } } netif_info(cp, link, cp->dev, "Link up at %d Mbps, %s-duplex\n", speed, full_duplex ? "full" : "half"); val = MAC_XIF_TX_MII_OUTPUT_EN | MAC_XIF_LINK_LED; if (CAS_PHY_MII(cp->phy_type)) { val |= MAC_XIF_MII_BUFFER_OUTPUT_EN; if (!full_duplex) val |= MAC_XIF_DISABLE_ECHO; } if (full_duplex) val |= MAC_XIF_FDPLX_LED; if (speed == 1000) val |= MAC_XIF_GMII_MODE; writel(val, cp->regs + REG_MAC_XIF_CFG); /* deal with carrier and collision detect. */ val = MAC_TX_CFG_IPG_EN; if (full_duplex) { val |= MAC_TX_CFG_IGNORE_CARRIER; val |= MAC_TX_CFG_IGNORE_COLL; } else { #ifndef USE_CSMA_CD_PROTO val |= MAC_TX_CFG_NEVER_GIVE_UP_EN; val |= MAC_TX_CFG_NEVER_GIVE_UP_LIM; #endif } /* val now set up for REG_MAC_TX_CFG */ /* If gigabit and half-duplex, enable carrier extension * mode. increase slot time to 512 bytes as well. * else, disable it and make sure slot time is 64 bytes. * also activate checksum bug workaround */ if ((speed == 1000) && !full_duplex) { writel(val | MAC_TX_CFG_CARRIER_EXTEND, cp->regs + REG_MAC_TX_CFG); val = readl(cp->regs + REG_MAC_RX_CFG); val &= ~MAC_RX_CFG_STRIP_FCS; /* checksum workaround */ writel(val | MAC_RX_CFG_CARRIER_EXTEND, cp->regs + REG_MAC_RX_CFG); writel(0x200, cp->regs + REG_MAC_SLOT_TIME); cp->crc_size = 4; /* minimum size gigabit frame at half duplex */ cp->min_frame_size = CAS_1000MB_MIN_FRAME; } else { writel(val, cp->regs + REG_MAC_TX_CFG); /* checksum bug workaround. don't strip FCS when in * half-duplex mode */ val = readl(cp->regs + REG_MAC_RX_CFG); if (full_duplex) { val |= MAC_RX_CFG_STRIP_FCS; cp->crc_size = 0; cp->min_frame_size = CAS_MIN_MTU; } else { val &= ~MAC_RX_CFG_STRIP_FCS; cp->crc_size = 4; cp->min_frame_size = CAS_MIN_FRAME; } writel(val & ~MAC_RX_CFG_CARRIER_EXTEND, cp->regs + REG_MAC_RX_CFG); writel(0x40, cp->regs + REG_MAC_SLOT_TIME); } if (netif_msg_link(cp)) { if (pause & 0x01) { netdev_info(cp->dev, "Pause is enabled (rxfifo: %d off: %d on: %d)\n", cp->rx_fifo_size, cp->rx_pause_off, cp->rx_pause_on); } else if (pause & 0x10) { netdev_info(cp->dev, "TX pause enabled\n"); } else { netdev_info(cp->dev, "Pause is disabled\n"); } } val = readl(cp->regs + REG_MAC_CTRL_CFG); val &= ~(MAC_CTRL_CFG_SEND_PAUSE_EN | MAC_CTRL_CFG_RECV_PAUSE_EN); if (pause) { /* symmetric or asymmetric pause */ val |= MAC_CTRL_CFG_SEND_PAUSE_EN; if (pause & 0x01) { /* symmetric pause */ val |= MAC_CTRL_CFG_RECV_PAUSE_EN; } } writel(val, cp->regs + REG_MAC_CTRL_CFG); cas_start_dma(cp); } /* Must be invoked under cp->lock. */ static void cas_init_hw(struct cas *cp, int restart_link) { if (restart_link) cas_phy_init(cp); cas_init_pause_thresholds(cp); cas_init_mac(cp); cas_init_dma(cp); if (restart_link) { /* Default aneg parameters */ cp->timer_ticks = 0; cas_begin_auto_negotiation(cp, NULL); } else if (cp->lstate == link_up) { cas_set_link_modes(cp); netif_carrier_on(cp->dev); } } /* Must be invoked under cp->lock. on earlier cassini boards, * SOFT_0 is tied to PCI reset. we use this to force a pci reset, * let it settle out, and then restore pci state. */ static void cas_hard_reset(struct cas *cp) { writel(BIM_LOCAL_DEV_SOFT_0, cp->regs + REG_BIM_LOCAL_DEV_EN); udelay(20); pci_restore_state(cp->pdev); } static void cas_global_reset(struct cas *cp, int blkflag) { int limit; /* issue a global reset. don't use RSTOUT. */ if (blkflag && !CAS_PHY_MII(cp->phy_type)) { /* For PCS, when the blkflag is set, we should set the * SW_REST_BLOCK_PCS_SLINK bit to prevent the results of * the last autonegotiation from being cleared. We'll * need some special handling if the chip is set into a * loopback mode. */ writel((SW_RESET_TX | SW_RESET_RX | SW_RESET_BLOCK_PCS_SLINK), cp->regs + REG_SW_RESET); } else { writel(SW_RESET_TX | SW_RESET_RX, cp->regs + REG_SW_RESET); } /* need to wait at least 3ms before polling register */ mdelay(3); limit = STOP_TRIES; while (limit-- > 0) { u32 val = readl(cp->regs + REG_SW_RESET); if ((val & (SW_RESET_TX | SW_RESET_RX)) == 0) goto done; udelay(10); } netdev_err(cp->dev, "sw reset failed\n"); done: /* enable various BIM interrupts */ writel(BIM_CFG_DPAR_INTR_ENABLE | BIM_CFG_RMA_INTR_ENABLE | BIM_CFG_RTA_INTR_ENABLE, cp->regs + REG_BIM_CFG); /* clear out pci error status mask for handled errors. * we don't deal with DMA counter overflows as they happen * all the time. */ writel(0xFFFFFFFFU & ~(PCI_ERR_BADACK | PCI_ERR_DTRTO | PCI_ERR_OTHER | PCI_ERR_BIM_DMA_WRITE | PCI_ERR_BIM_DMA_READ), cp->regs + REG_PCI_ERR_STATUS_MASK); /* set up for MII by default to address mac rx reset timeout * issue */ writel(PCS_DATAPATH_MODE_MII, cp->regs + REG_PCS_DATAPATH_MODE); } static void cas_reset(struct cas *cp, int blkflag) { u32 val; cas_mask_intr(cp); cas_global_reset(cp, blkflag); cas_mac_reset(cp); cas_entropy_reset(cp); /* disable dma engines. */ val = readl(cp->regs + REG_TX_CFG); val &= ~TX_CFG_DMA_EN; writel(val, cp->regs + REG_TX_CFG); val = readl(cp->regs + REG_RX_CFG); val &= ~RX_CFG_DMA_EN; writel(val, cp->regs + REG_RX_CFG); /* program header parser */ if ((cp->cas_flags & CAS_FLAG_TARGET_ABORT) || (&CAS_HP_ALT_FIRMWARE[0] == &cas_prog_null[0])) { cas_load_firmware(cp, CAS_HP_FIRMWARE); } else { cas_load_firmware(cp, CAS_HP_ALT_FIRMWARE); } /* clear out error registers */ spin_lock(&cp->stat_lock[N_TX_RINGS]); cas_clear_mac_err(cp); spin_unlock(&cp->stat_lock[N_TX_RINGS]); } /* Shut down the chip, must be called with pm_mutex held. */ static void cas_shutdown(struct cas *cp) { unsigned long flags; /* Make us not-running to avoid timers respawning */ cp->hw_running = 0; del_timer_sync(&cp->link_timer); /* Stop the reset task */ #if 0 while (atomic_read(&cp->reset_task_pending_mtu) || atomic_read(&cp->reset_task_pending_spare) || atomic_read(&cp->reset_task_pending_all)) schedule(); #else while (atomic_read(&cp->reset_task_pending)) schedule(); #endif /* Actually stop the chip */ cas_lock_all_save(cp, flags); cas_reset(cp, 0); if (cp->cas_flags & CAS_FLAG_SATURN) cas_phy_powerdown(cp); cas_unlock_all_restore(cp, flags); } static int cas_change_mtu(struct net_device *dev, int new_mtu) { struct cas *cp = netdev_priv(dev); dev->mtu = new_mtu; if (!netif_running(dev) || !netif_device_present(dev)) return 0; /* let the reset task handle it */ #if 1 atomic_inc(&cp->reset_task_pending); if ((cp->phy_type & CAS_PHY_SERDES)) { atomic_inc(&cp->reset_task_pending_all); } else { atomic_inc(&cp->reset_task_pending_mtu); } schedule_work(&cp->reset_task); #else atomic_set(&cp->reset_task_pending, (cp->phy_type & CAS_PHY_SERDES) ? CAS_RESET_ALL : CAS_RESET_MTU); pr_err("reset called in cas_change_mtu\n"); schedule_work(&cp->reset_task); #endif flush_work(&cp->reset_task); return 0; } static void cas_clean_txd(struct cas *cp, int ring) { struct cas_tx_desc *txd = cp->init_txds[ring]; struct sk_buff *skb, **skbs = cp->tx_skbs[ring]; u64 daddr, dlen; int i, size; size = TX_DESC_RINGN_SIZE(ring); for (i = 0; i < size; i++) { int frag; if (skbs[i] == NULL) continue; skb = skbs[i]; skbs[i] = NULL; for (frag = 0; frag <= skb_shinfo(skb)->nr_frags; frag++) { int ent = i & (size - 1); /* first buffer is never a tiny buffer and so * needs to be unmapped. */ daddr = le64_to_cpu(txd[ent].buffer); dlen = CAS_VAL(TX_DESC_BUFLEN, le64_to_cpu(txd[ent].control)); dma_unmap_page(&cp->pdev->dev, daddr, dlen, DMA_TO_DEVICE); if (frag != skb_shinfo(skb)->nr_frags) { i++; /* next buffer might by a tiny buffer. * skip past it. */ ent = i & (size - 1); if (cp->tx_tiny_use[ring][ent].used) i++; } } dev_kfree_skb_any(skb); } /* zero out tiny buf usage */ memset(cp->tx_tiny_use[ring], 0, size*sizeof(*cp->tx_tiny_use[ring])); } /* freed on close */ static inline void cas_free_rx_desc(struct cas *cp, int ring) { cas_page_t **page = cp->rx_pages[ring]; int i, size; size = RX_DESC_RINGN_SIZE(ring); for (i = 0; i < size; i++) { if (page[i]) { cas_page_free(cp, page[i]); page[i] = NULL; } } } static void cas_free_rxds(struct cas *cp) { int i; for (i = 0; i < N_RX_DESC_RINGS; i++) cas_free_rx_desc(cp, i); } /* Must be invoked under cp->lock. */ static void cas_clean_rings(struct cas *cp) { int i; /* need to clean all tx rings */ memset(cp->tx_old, 0, sizeof(*cp->tx_old)*N_TX_RINGS); memset(cp->tx_new, 0, sizeof(*cp->tx_new)*N_TX_RINGS); for (i = 0; i < N_TX_RINGS; i++) cas_clean_txd(cp, i); /* zero out init block */ memset(cp->init_block, 0, sizeof(struct cas_init_block)); cas_clean_rxds(cp); cas_clean_rxcs(cp); } /* allocated on open */ static inline int cas_alloc_rx_desc(struct cas *cp, int ring) { cas_page_t **page = cp->rx_pages[ring]; int size, i = 0; size = RX_DESC_RINGN_SIZE(ring); for (i = 0; i < size; i++) { if ((page[i] = cas_page_alloc(cp, GFP_KERNEL)) == NULL) return -1; } return 0; } static int cas_alloc_rxds(struct cas *cp) { int i; for (i = 0; i < N_RX_DESC_RINGS; i++) { if (cas_alloc_rx_desc(cp, i) < 0) { cas_free_rxds(cp); return -1; } } return 0; } static void cas_reset_task(struct work_struct *work) { struct cas *cp = container_of(work, struct cas, reset_task); #if 0 int pending = atomic_read(&cp->reset_task_pending); #else int pending_all = atomic_read(&cp->reset_task_pending_all); int pending_spare = atomic_read(&cp->reset_task_pending_spare); int pending_mtu = atomic_read(&cp->reset_task_pending_mtu); if (pending_all == 0 && pending_spare == 0 && pending_mtu == 0) { /* We can have more tasks scheduled than actually * needed. */ atomic_dec(&cp->reset_task_pending); return; } #endif /* The link went down, we reset the ring, but keep * DMA stopped. Use this function for reset * on error as well. */ if (cp->hw_running) { unsigned long flags; /* Make sure we don't get interrupts or tx packets */ netif_device_detach(cp->dev); cas_lock_all_save(cp, flags); if (cp->opened) { /* We call cas_spare_recover when we call cas_open. * but we do not initialize the lists cas_spare_recover * uses until cas_open is called. */ cas_spare_recover(cp, GFP_ATOMIC); } #if 1 /* test => only pending_spare set */ if (!pending_all && !pending_mtu) goto done; #else if (pending == CAS_RESET_SPARE) goto done; #endif /* when pending == CAS_RESET_ALL, the following * call to cas_init_hw will restart auto negotiation. * Setting the second argument of cas_reset to * !(pending == CAS_RESET_ALL) will set this argument * to 1 (avoiding reinitializing the PHY for the normal * PCS case) when auto negotiation is not restarted. */ #if 1 cas_reset(cp, !(pending_all > 0)); if (cp->opened) cas_clean_rings(cp); cas_init_hw(cp, (pending_all > 0)); #else cas_reset(cp, !(pending == CAS_RESET_ALL)); if (cp->opened) cas_clean_rings(cp); cas_init_hw(cp, pending == CAS_RESET_ALL); #endif done: cas_unlock_all_restore(cp, flags); netif_device_attach(cp->dev); } #if 1 atomic_sub(pending_all, &cp->reset_task_pending_all); atomic_sub(pending_spare, &cp->reset_task_pending_spare); atomic_sub(pending_mtu, &cp->reset_task_pending_mtu); atomic_dec(&cp->reset_task_pending); #else atomic_set(&cp->reset_task_pending, 0); #endif } static void cas_link_timer(struct timer_list *t) { struct cas *cp = from_timer(cp, t, link_timer); int mask, pending = 0, reset = 0; unsigned long flags; if (link_transition_timeout != 0 && cp->link_transition_jiffies_valid && time_is_before_jiffies(cp->link_transition_jiffies + link_transition_timeout)) { /* One-second counter so link-down workaround doesn't * cause resets to occur so fast as to fool the switch * into thinking the link is down. */ cp->link_transition_jiffies_valid = 0; } if (!cp->hw_running) return; spin_lock_irqsave(&cp->lock, flags); cas_lock_tx(cp); cas_entropy_gather(cp); /* If the link task is still pending, we just * reschedule the link timer */ #if 1 if (atomic_read(&cp->reset_task_pending_all) || atomic_read(&cp->reset_task_pending_spare) || atomic_read(&cp->reset_task_pending_mtu)) goto done; #else if (atomic_read(&cp->reset_task_pending)) goto done; #endif /* check for rx cleaning */ if ((mask = (cp->cas_flags & CAS_FLAG_RXD_POST_MASK))) { int i, rmask; for (i = 0; i < MAX_RX_DESC_RINGS; i++) { rmask = CAS_FLAG_RXD_POST(i); if ((mask & rmask) == 0) continue; /* post_rxds will do a mod_timer */ if (cas_post_rxds_ringN(cp, i, cp->rx_last[i]) < 0) { pending = 1; continue; } cp->cas_flags &= ~rmask; } } if (CAS_PHY_MII(cp->phy_type)) { u16 bmsr; cas_mif_poll(cp, 0); bmsr = cas_phy_read(cp, MII_BMSR); /* WTZ: Solaris driver reads this twice, but that * may be due to the PCS case and the use of a * common implementation. Read it twice here to be * safe. */ bmsr = cas_phy_read(cp, MII_BMSR); cas_mif_poll(cp, 1); readl(cp->regs + REG_MIF_STATUS); /* avoid dups */ reset = cas_mii_link_check(cp, bmsr); } else { reset = cas_pcs_link_check(cp); } if (reset) goto done; /* check for tx state machine confusion */ if ((readl(cp->regs + REG_MAC_TX_STATUS) & MAC_TX_FRAME_XMIT) == 0) { u32 val = readl(cp->regs + REG_MAC_STATE_MACHINE); u32 wptr, rptr; int tlm = CAS_VAL(MAC_SM_TLM, val); if (((tlm == 0x5) || (tlm == 0x3)) && (CAS_VAL(MAC_SM_ENCAP_SM, val) == 0)) { netif_printk(cp, tx_err, KERN_DEBUG, cp->dev, "tx err: MAC_STATE[%08x]\n", val); reset = 1; goto done; } val = readl(cp->regs + REG_TX_FIFO_PKT_CNT); wptr = readl(cp->regs + REG_TX_FIFO_WRITE_PTR); rptr = readl(cp->regs + REG_TX_FIFO_READ_PTR); if ((val == 0) && (wptr != rptr)) { netif_printk(cp, tx_err, KERN_DEBUG, cp->dev, "tx err: TX_FIFO[%08x:%08x:%08x]\n", val, wptr, rptr); reset = 1; } if (reset) cas_hard_reset(cp); } done: if (reset) { #if 1 atomic_inc(&cp->reset_task_pending); atomic_inc(&cp->reset_task_pending_all); schedule_work(&cp->reset_task); #else atomic_set(&cp->reset_task_pending, CAS_RESET_ALL); pr_err("reset called in cas_link_timer\n"); schedule_work(&cp->reset_task); #endif } if (!pending) mod_timer(&cp->link_timer, jiffies + CAS_LINK_TIMEOUT); cas_unlock_tx(cp); spin_unlock_irqrestore(&cp->lock, flags); } /* tiny buffers are used to avoid target abort issues with * older cassini's */ static void cas_tx_tiny_free(struct cas *cp) { struct pci_dev *pdev = cp->pdev; int i; for (i = 0; i < N_TX_RINGS; i++) { if (!cp->tx_tiny_bufs[i]) continue; dma_free_coherent(&pdev->dev, TX_TINY_BUF_BLOCK, cp->tx_tiny_bufs[i], cp->tx_tiny_dvma[i]); cp->tx_tiny_bufs[i] = NULL; } } static int cas_tx_tiny_alloc(struct cas *cp) { struct pci_dev *pdev = cp->pdev; int i; for (i = 0; i < N_TX_RINGS; i++) { cp->tx_tiny_bufs[i] = dma_alloc_coherent(&pdev->dev, TX_TINY_BUF_BLOCK, &cp->tx_tiny_dvma[i], GFP_KERNEL); if (!cp->tx_tiny_bufs[i]) { cas_tx_tiny_free(cp); return -1; } } return 0; } static int cas_open(struct net_device *dev) { struct cas *cp = netdev_priv(dev); int hw_was_up, err; unsigned long flags; mutex_lock(&cp->pm_mutex); hw_was_up = cp->hw_running; /* The power-management mutex protects the hw_running * etc. state so it is safe to do this bit without cp->lock */ if (!cp->hw_running) { /* Reset the chip */ cas_lock_all_save(cp, flags); /* We set the second arg to cas_reset to zero * because cas_init_hw below will have its second * argument set to non-zero, which will force * autonegotiation to start. */ cas_reset(cp, 0); cp->hw_running = 1; cas_unlock_all_restore(cp, flags); } err = -ENOMEM; if (cas_tx_tiny_alloc(cp) < 0) goto err_unlock; /* alloc rx descriptors */ if (cas_alloc_rxds(cp) < 0) goto err_tx_tiny; /* allocate spares */ cas_spare_init(cp); cas_spare_recover(cp, GFP_KERNEL); /* We can now request the interrupt as we know it's masked * on the controller. cassini+ has up to 4 interrupts * that can be used, but you need to do explicit pci interrupt * mapping to expose them */ if (request_irq(cp->pdev->irq, cas_interrupt, IRQF_SHARED, dev->name, (void *) dev)) { netdev_err(cp->dev, "failed to request irq !\n"); err = -EAGAIN; goto err_spare; } #ifdef USE_NAPI napi_enable(&cp->napi); #endif /* init hw */ cas_lock_all_save(cp, flags); cas_clean_rings(cp); cas_init_hw(cp, !hw_was_up); cp->opened = 1; cas_unlock_all_restore(cp, flags); netif_start_queue(dev); mutex_unlock(&cp->pm_mutex); return 0; err_spare: cas_spare_free(cp); cas_free_rxds(cp); err_tx_tiny: cas_tx_tiny_free(cp); err_unlock: mutex_unlock(&cp->pm_mutex); return err; } static int cas_close(struct net_device *dev) { unsigned long flags; struct cas *cp = netdev_priv(dev); #ifdef USE_NAPI napi_disable(&cp->napi); #endif /* Make sure we don't get distracted by suspend/resume */ mutex_lock(&cp->pm_mutex); netif_stop_queue(dev); /* Stop traffic, mark us closed */ cas_lock_all_save(cp, flags); cp->opened = 0; cas_reset(cp, 0); cas_phy_init(cp); cas_begin_auto_negotiation(cp, NULL); cas_clean_rings(cp); cas_unlock_all_restore(cp, flags); free_irq(cp->pdev->irq, (void *) dev); cas_spare_free(cp); cas_free_rxds(cp); cas_tx_tiny_free(cp); mutex_unlock(&cp->pm_mutex); return 0; } static struct { const char name[ETH_GSTRING_LEN]; } ethtool_cassini_statnames[] = { {"collisions"}, {"rx_bytes"}, {"rx_crc_errors"}, {"rx_dropped"}, {"rx_errors"}, {"rx_fifo_errors"}, {"rx_frame_errors"}, {"rx_length_errors"}, {"rx_over_errors"}, {"rx_packets"}, {"tx_aborted_errors"}, {"tx_bytes"}, {"tx_dropped"}, {"tx_errors"}, {"tx_fifo_errors"}, {"tx_packets"} }; #define CAS_NUM_STAT_KEYS ARRAY_SIZE(ethtool_cassini_statnames) static struct { const int offsets; /* neg. values for 2nd arg to cas_read_phy */ } ethtool_register_table[] = { {-MII_BMSR}, {-MII_BMCR}, {REG_CAWR}, {REG_INF_BURST}, {REG_BIM_CFG}, {REG_RX_CFG}, {REG_HP_CFG}, {REG_MAC_TX_CFG}, {REG_MAC_RX_CFG}, {REG_MAC_CTRL_CFG}, {REG_MAC_XIF_CFG}, {REG_MIF_CFG}, {REG_PCS_CFG}, {REG_SATURN_PCFG}, {REG_PCS_MII_STATUS}, {REG_PCS_STATE_MACHINE}, {REG_MAC_COLL_EXCESS}, {REG_MAC_COLL_LATE} }; #define CAS_REG_LEN ARRAY_SIZE(ethtool_register_table) #define CAS_MAX_REGS (sizeof (u32)*CAS_REG_LEN) static void cas_read_regs(struct cas *cp, u8 *ptr, int len) { u8 *p; int i; unsigned long flags; spin_lock_irqsave(&cp->lock, flags); for (i = 0, p = ptr; i < len ; i ++, p += sizeof(u32)) { u16 hval; u32 val; if (ethtool_register_table[i].offsets < 0) { hval = cas_phy_read(cp, -ethtool_register_table[i].offsets); val = hval; } else { val= readl(cp->regs+ethtool_register_table[i].offsets); } memcpy(p, (u8 *)&val, sizeof(u32)); } spin_unlock_irqrestore(&cp->lock, flags); } static struct net_device_stats *cas_get_stats(struct net_device *dev) { struct cas *cp = netdev_priv(dev); struct net_device_stats *stats = cp->net_stats; unsigned long flags; int i; unsigned long tmp; /* we collate all of the stats into net_stats[N_TX_RING] */ if (!cp->hw_running) return stats + N_TX_RINGS; /* collect outstanding stats */ /* WTZ: the Cassini spec gives these as 16 bit counters but * stored in 32-bit words. Added a mask of 0xffff to be safe, * in case the chip somehow puts any garbage in the other bits. * Also, counter usage didn't seem to mach what Adrian did * in the parts of the code that set these quantities. Made * that consistent. */ spin_lock_irqsave(&cp->stat_lock[N_TX_RINGS], flags); stats[N_TX_RINGS].rx_crc_errors += readl(cp->regs + REG_MAC_FCS_ERR) & 0xffff; stats[N_TX_RINGS].rx_frame_errors += readl(cp->regs + REG_MAC_ALIGN_ERR) &0xffff; stats[N_TX_RINGS].rx_length_errors += readl(cp->regs + REG_MAC_LEN_ERR) & 0xffff; #if 1 tmp = (readl(cp->regs + REG_MAC_COLL_EXCESS) & 0xffff) + (readl(cp->regs + REG_MAC_COLL_LATE) & 0xffff); stats[N_TX_RINGS].tx_aborted_errors += tmp; stats[N_TX_RINGS].collisions += tmp + (readl(cp->regs + REG_MAC_COLL_NORMAL) & 0xffff); #else stats[N_TX_RINGS].tx_aborted_errors += readl(cp->regs + REG_MAC_COLL_EXCESS); stats[N_TX_RINGS].collisions += readl(cp->regs + REG_MAC_COLL_EXCESS) + readl(cp->regs + REG_MAC_COLL_LATE); #endif cas_clear_mac_err(cp); /* saved bits that are unique to ring 0 */ spin_lock(&cp->stat_lock[0]); stats[N_TX_RINGS].collisions += stats[0].collisions; stats[N_TX_RINGS].rx_over_errors += stats[0].rx_over_errors; stats[N_TX_RINGS].rx_frame_errors += stats[0].rx_frame_errors; stats[N_TX_RINGS].rx_fifo_errors += stats[0].rx_fifo_errors; stats[N_TX_RINGS].tx_aborted_errors += stats[0].tx_aborted_errors; stats[N_TX_RINGS].tx_fifo_errors += stats[0].tx_fifo_errors; spin_unlock(&cp->stat_lock[0]); for (i = 0; i < N_TX_RINGS; i++) { spin_lock(&cp->stat_lock[i]); stats[N_TX_RINGS].rx_length_errors += stats[i].rx_length_errors; stats[N_TX_RINGS].rx_crc_errors += stats[i].rx_crc_errors; stats[N_TX_RINGS].rx_packets += stats[i].rx_packets; stats[N_TX_RINGS].tx_packets += stats[i].tx_packets; stats[N_TX_RINGS].rx_bytes += stats[i].rx_bytes; stats[N_TX_RINGS].tx_bytes += stats[i].tx_bytes; stats[N_TX_RINGS].rx_errors += stats[i].rx_errors; stats[N_TX_RINGS].tx_errors += stats[i].tx_errors; stats[N_TX_RINGS].rx_dropped += stats[i].rx_dropped; stats[N_TX_RINGS].tx_dropped += stats[i].tx_dropped; memset(stats + i, 0, sizeof(struct net_device_stats)); spin_unlock(&cp->stat_lock[i]); } spin_unlock_irqrestore(&cp->stat_lock[N_TX_RINGS], flags); return stats + N_TX_RINGS; } static void cas_set_multicast(struct net_device *dev) { struct cas *cp = netdev_priv(dev); u32 rxcfg, rxcfg_new; unsigned long flags; int limit = STOP_TRIES; if (!cp->hw_running) return; spin_lock_irqsave(&cp->lock, flags); rxcfg = readl(cp->regs + REG_MAC_RX_CFG); /* disable RX MAC and wait for completion */ writel(rxcfg & ~MAC_RX_CFG_EN, cp->regs + REG_MAC_RX_CFG); while (readl(cp->regs + REG_MAC_RX_CFG) & MAC_RX_CFG_EN) { if (!limit--) break; udelay(10); } /* disable hash filter and wait for completion */ limit = STOP_TRIES; rxcfg &= ~(MAC_RX_CFG_PROMISC_EN | MAC_RX_CFG_HASH_FILTER_EN); writel(rxcfg & ~MAC_RX_CFG_EN, cp->regs + REG_MAC_RX_CFG); while (readl(cp->regs + REG_MAC_RX_CFG) & MAC_RX_CFG_HASH_FILTER_EN) { if (!limit--) break; udelay(10); } /* program hash filters */ cp->mac_rx_cfg = rxcfg_new = cas_setup_multicast(cp); rxcfg |= rxcfg_new; writel(rxcfg, cp->regs + REG_MAC_RX_CFG); spin_unlock_irqrestore(&cp->lock, flags); } static void cas_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) { struct cas *cp = netdev_priv(dev); strscpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver)); strscpy(info->version, DRV_MODULE_VERSION, sizeof(info->version)); strscpy(info->bus_info, pci_name(cp->pdev), sizeof(info->bus_info)); } static int cas_get_link_ksettings(struct net_device *dev, struct ethtool_link_ksettings *cmd) { struct cas *cp = netdev_priv(dev); u16 bmcr; int full_duplex, speed, pause; unsigned long flags; enum link_state linkstate = link_up; u32 supported, advertising; advertising = 0; supported = SUPPORTED_Autoneg; if (cp->cas_flags & CAS_FLAG_1000MB_CAP) { supported |= SUPPORTED_1000baseT_Full; advertising |= ADVERTISED_1000baseT_Full; } /* Record PHY settings if HW is on. */ spin_lock_irqsave(&cp->lock, flags); bmcr = 0; linkstate = cp->lstate; if (CAS_PHY_MII(cp->phy_type)) { cmd->base.port = PORT_MII; cmd->base.phy_address = cp->phy_addr; advertising |= ADVERTISED_TP | ADVERTISED_MII | ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full; supported |= (SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full | SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full | SUPPORTED_TP | SUPPORTED_MII); if (cp->hw_running) { cas_mif_poll(cp, 0); bmcr = cas_phy_read(cp, MII_BMCR); cas_read_mii_link_mode(cp, &full_duplex, &speed, &pause); cas_mif_poll(cp, 1); } } else { cmd->base.port = PORT_FIBRE; cmd->base.phy_address = 0; supported |= SUPPORTED_FIBRE; advertising |= ADVERTISED_FIBRE; if (cp->hw_running) { /* pcs uses the same bits as mii */ bmcr = readl(cp->regs + REG_PCS_MII_CTRL); cas_read_pcs_link_mode(cp, &full_duplex, &speed, &pause); } } spin_unlock_irqrestore(&cp->lock, flags); if (bmcr & BMCR_ANENABLE) { advertising |= ADVERTISED_Autoneg; cmd->base.autoneg = AUTONEG_ENABLE; cmd->base.speed = ((speed == 10) ? SPEED_10 : ((speed == 1000) ? SPEED_1000 : SPEED_100)); cmd->base.duplex = full_duplex ? DUPLEX_FULL : DUPLEX_HALF; } else { cmd->base.autoneg = AUTONEG_DISABLE; cmd->base.speed = ((bmcr & CAS_BMCR_SPEED1000) ? SPEED_1000 : ((bmcr & BMCR_SPEED100) ? SPEED_100 : SPEED_10)); cmd->base.duplex = (bmcr & BMCR_FULLDPLX) ? DUPLEX_FULL : DUPLEX_HALF; } if (linkstate != link_up) { /* Force these to "unknown" if the link is not up and * autonogotiation in enabled. We can set the link * speed to 0, but not cmd->duplex, * because its legal values are 0 and 1. Ethtool will * print the value reported in parentheses after the * word "Unknown" for unrecognized values. * * If in forced mode, we report the speed and duplex * settings that we configured. */ if (cp->link_cntl & BMCR_ANENABLE) { cmd->base.speed = 0; cmd->base.duplex = 0xff; } else { cmd->base.speed = SPEED_10; if (cp->link_cntl & BMCR_SPEED100) { cmd->base.speed = SPEED_100; } else if (cp->link_cntl & CAS_BMCR_SPEED1000) { cmd->base.speed = SPEED_1000; } cmd->base.duplex = (cp->link_cntl & BMCR_FULLDPLX) ? DUPLEX_FULL : DUPLEX_HALF; } } ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported, supported); ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising, advertising); return 0; } static int cas_set_link_ksettings(struct net_device *dev, const struct ethtool_link_ksettings *cmd) { struct cas *cp = netdev_priv(dev); unsigned long flags; u32 speed = cmd->base.speed; /* Verify the settings we care about. */ if (cmd->base.autoneg != AUTONEG_ENABLE && cmd->base.autoneg != AUTONEG_DISABLE) return -EINVAL; if (cmd->base.autoneg == AUTONEG_DISABLE && ((speed != SPEED_1000 && speed != SPEED_100 && speed != SPEED_10) || (cmd->base.duplex != DUPLEX_HALF && cmd->base.duplex != DUPLEX_FULL))) return -EINVAL; /* Apply settings and restart link process. */ spin_lock_irqsave(&cp->lock, flags); cas_begin_auto_negotiation(cp, cmd); spin_unlock_irqrestore(&cp->lock, flags); return 0; } static int cas_nway_reset(struct net_device *dev) { struct cas *cp = netdev_priv(dev); unsigned long flags; if ((cp->link_cntl & BMCR_ANENABLE) == 0) return -EINVAL; /* Restart link process. */ spin_lock_irqsave(&cp->lock, flags); cas_begin_auto_negotiation(cp, NULL); spin_unlock_irqrestore(&cp->lock, flags); return 0; } static u32 cas_get_link(struct net_device *dev) { struct cas *cp = netdev_priv(dev); return cp->lstate == link_up; } static u32 cas_get_msglevel(struct net_device *dev) { struct cas *cp = netdev_priv(dev); return cp->msg_enable; } static void cas_set_msglevel(struct net_device *dev, u32 value) { struct cas *cp = netdev_priv(dev); cp->msg_enable = value; } static int cas_get_regs_len(struct net_device *dev) { struct cas *cp = netdev_priv(dev); return min_t(int, cp->casreg_len, CAS_MAX_REGS); } static void cas_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *p) { struct cas *cp = netdev_priv(dev); regs->version = 0; /* cas_read_regs handles locks (cp->lock). */ cas_read_regs(cp, p, regs->len / sizeof(u32)); } static int cas_get_sset_count(struct net_device *dev, int sset) { switch (sset) { case ETH_SS_STATS: return CAS_NUM_STAT_KEYS; default: return -EOPNOTSUPP; } } static void cas_get_strings(struct net_device *dev, u32 stringset, u8 *data) { memcpy(data, &ethtool_cassini_statnames, CAS_NUM_STAT_KEYS * ETH_GSTRING_LEN); } static void cas_get_ethtool_stats(struct net_device *dev, struct ethtool_stats *estats, u64 *data) { struct cas *cp = netdev_priv(dev); struct net_device_stats *stats = cas_get_stats(cp->dev); int i = 0; data[i++] = stats->collisions; data[i++] = stats->rx_bytes; data[i++] = stats->rx_crc_errors; data[i++] = stats->rx_dropped; data[i++] = stats->rx_errors; data[i++] = stats->rx_fifo_errors; data[i++] = stats->rx_frame_errors; data[i++] = stats->rx_length_errors; data[i++] = stats->rx_over_errors; data[i++] = stats->rx_packets; data[i++] = stats->tx_aborted_errors; data[i++] = stats->tx_bytes; data[i++] = stats->tx_dropped; data[i++] = stats->tx_errors; data[i++] = stats->tx_fifo_errors; data[i++] = stats->tx_packets; BUG_ON(i != CAS_NUM_STAT_KEYS); } static const struct ethtool_ops cas_ethtool_ops = { .get_drvinfo = cas_get_drvinfo, .nway_reset = cas_nway_reset, .get_link = cas_get_link, .get_msglevel = cas_get_msglevel, .set_msglevel = cas_set_msglevel, .get_regs_len = cas_get_regs_len, .get_regs = cas_get_regs, .get_sset_count = cas_get_sset_count, .get_strings = cas_get_strings, .get_ethtool_stats = cas_get_ethtool_stats, .get_link_ksettings = cas_get_link_ksettings, .set_link_ksettings = cas_set_link_ksettings, }; static int cas_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) { struct cas *cp = netdev_priv(dev); struct mii_ioctl_data *data = if_mii(ifr); unsigned long flags; int rc = -EOPNOTSUPP; /* Hold the PM mutex while doing ioctl's or we may collide * with open/close and power management and oops. */ mutex_lock(&cp->pm_mutex); switch (cmd) { case SIOCGMIIPHY: /* Get address of MII PHY in use. */ data->phy_id = cp->phy_addr; fallthrough; case SIOCGMIIREG: /* Read MII PHY register. */ spin_lock_irqsave(&cp->lock, flags); cas_mif_poll(cp, 0); data->val_out = cas_phy_read(cp, data->reg_num & 0x1f); cas_mif_poll(cp, 1); spin_unlock_irqrestore(&cp->lock, flags); rc = 0; break; case SIOCSMIIREG: /* Write MII PHY register. */ spin_lock_irqsave(&cp->lock, flags); cas_mif_poll(cp, 0); rc = cas_phy_write(cp, data->reg_num & 0x1f, data->val_in); cas_mif_poll(cp, 1); spin_unlock_irqrestore(&cp->lock, flags); break; default: break; } mutex_unlock(&cp->pm_mutex); return rc; } /* When this chip sits underneath an Intel 31154 bridge, it is the * only subordinate device and we can tweak the bridge settings to * reflect that fact. */ static void cas_program_bridge(struct pci_dev *cas_pdev) { struct pci_dev *pdev = cas_pdev->bus->self; u32 val; if (!pdev) return; if (pdev->vendor != 0x8086 || pdev->device != 0x537c) return; /* Clear bit 10 (Bus Parking Control) in the Secondary * Arbiter Control/Status Register which lives at offset * 0x41. Using a 32-bit word read/modify/write at 0x40 * is much simpler so that's how we do this. */ pci_read_config_dword(pdev, 0x40, &val); val &= ~0x00040000; pci_write_config_dword(pdev, 0x40, val); /* Max out the Multi-Transaction Timer settings since * Cassini is the only device present. * * The register is 16-bit and lives at 0x50. When the * settings are enabled, it extends the GRANT# signal * for a requestor after a transaction is complete. This * allows the next request to run without first needing * to negotiate the GRANT# signal back. * * Bits 12:10 define the grant duration: * * 1 -- 16 clocks * 2 -- 32 clocks * 3 -- 64 clocks * 4 -- 128 clocks * 5 -- 256 clocks * * All other values are illegal. * * Bits 09:00 define which REQ/GNT signal pairs get the * GRANT# signal treatment. We set them all. */ pci_write_config_word(pdev, 0x50, (5 << 10) | 0x3ff); /* The Read Prefecth Policy register is 16-bit and sits at * offset 0x52. It enables a "smart" pre-fetch policy. We * enable it and max out all of the settings since only one * device is sitting underneath and thus bandwidth sharing is * not an issue. * * The register has several 3 bit fields, which indicates a * multiplier applied to the base amount of prefetching the * chip would do. These fields are at: * * 15:13 --- ReRead Primary Bus * 12:10 --- FirstRead Primary Bus * 09:07 --- ReRead Secondary Bus * 06:04 --- FirstRead Secondary Bus * * Bits 03:00 control which REQ/GNT pairs the prefetch settings * get enabled on. Bit 3 is a grouped enabler which controls * all of the REQ/GNT pairs from [8:3]. Bits 2 to 0 control * the individual REQ/GNT pairs [2:0]. */ pci_write_config_word(pdev, 0x52, (0x7 << 13) | (0x7 << 10) | (0x7 << 7) | (0x7 << 4) | (0xf << 0)); /* Force cacheline size to 0x8 */ pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE, 0x08); /* Force latency timer to maximum setting so Cassini can * sit on the bus as long as it likes. */ pci_write_config_byte(pdev, PCI_LATENCY_TIMER, 0xff); } static const struct net_device_ops cas_netdev_ops = { .ndo_open = cas_open, .ndo_stop = cas_close, .ndo_start_xmit = cas_start_xmit, .ndo_get_stats = cas_get_stats, .ndo_set_rx_mode = cas_set_multicast, .ndo_eth_ioctl = cas_ioctl, .ndo_tx_timeout = cas_tx_timeout, .ndo_change_mtu = cas_change_mtu, .ndo_set_mac_address = eth_mac_addr, .ndo_validate_addr = eth_validate_addr, #ifdef CONFIG_NET_POLL_CONTROLLER .ndo_poll_controller = cas_netpoll, #endif }; static int cas_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) { static int cas_version_printed = 0; unsigned long casreg_len; struct net_device *dev; struct cas *cp; u16 pci_cmd; int i, err; u8 orig_cacheline_size = 0, cas_cacheline_size = 0; if (cas_version_printed++ == 0) pr_info("%s", version); err = pci_enable_device(pdev); if (err) { dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n"); return err; } if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) { dev_err(&pdev->dev, "Cannot find proper PCI device " "base address, aborting\n"); err = -ENODEV; goto err_out_disable_pdev; } dev = alloc_etherdev(sizeof(*cp)); if (!dev) { err = -ENOMEM; goto err_out_disable_pdev; } SET_NETDEV_DEV(dev, &pdev->dev); err = pci_request_regions(pdev, dev->name); if (err) { dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n"); goto err_out_free_netdev; } pci_set_master(pdev); /* we must always turn on parity response or else parity * doesn't get generated properly. disable SERR/PERR as well. * in addition, we want to turn MWI on. */ pci_read_config_word(pdev, PCI_COMMAND, &pci_cmd); pci_cmd &= ~PCI_COMMAND_SERR; pci_cmd |= PCI_COMMAND_PARITY; pci_write_config_word(pdev, PCI_COMMAND, pci_cmd); if (pci_try_set_mwi(pdev)) pr_warn("Could not enable MWI for %s\n", pci_name(pdev)); cas_program_bridge(pdev); /* * On some architectures, the default cache line size set * by pci_try_set_mwi reduces perforamnce. We have to increase * it for this case. To start, we'll print some configuration * data. */ #if 1 pci_read_config_byte(pdev, PCI_CACHE_LINE_SIZE, &orig_cacheline_size); if (orig_cacheline_size < CAS_PREF_CACHELINE_SIZE) { cas_cacheline_size = (CAS_PREF_CACHELINE_SIZE < SMP_CACHE_BYTES) ? CAS_PREF_CACHELINE_SIZE : SMP_CACHE_BYTES; if (pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE, cas_cacheline_size)) { dev_err(&pdev->dev, "Could not set PCI cache " "line size\n"); goto err_out_free_res; } } #endif /* Configure DMA attributes. */ err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); if (err) { dev_err(&pdev->dev, "No usable DMA configuration, aborting\n"); goto err_out_free_res; } casreg_len = pci_resource_len(pdev, 0); cp = netdev_priv(dev); cp->pdev = pdev; #if 1 /* A value of 0 indicates we never explicitly set it */ cp->orig_cacheline_size = cas_cacheline_size ? orig_cacheline_size: 0; #endif cp->dev = dev; cp->msg_enable = (cassini_debug < 0) ? CAS_DEF_MSG_ENABLE : cassini_debug; #if defined(CONFIG_SPARC) cp->of_node = pci_device_to_OF_node(pdev); #endif cp->link_transition = LINK_TRANSITION_UNKNOWN; cp->link_transition_jiffies_valid = 0; spin_lock_init(&cp->lock); spin_lock_init(&cp->rx_inuse_lock); spin_lock_init(&cp->rx_spare_lock); for (i = 0; i < N_TX_RINGS; i++) { spin_lock_init(&cp->stat_lock[i]); spin_lock_init(&cp->tx_lock[i]); } spin_lock_init(&cp->stat_lock[N_TX_RINGS]); mutex_init(&cp->pm_mutex); timer_setup(&cp->link_timer, cas_link_timer, 0); #if 1 /* Just in case the implementation of atomic operations * change so that an explicit initialization is necessary. */ atomic_set(&cp->reset_task_pending, 0); atomic_set(&cp->reset_task_pending_all, 0); atomic_set(&cp->reset_task_pending_spare, 0); atomic_set(&cp->reset_task_pending_mtu, 0); #endif INIT_WORK(&cp->reset_task, cas_reset_task); /* Default link parameters */ if (link_mode >= 0 && link_mode < 6) cp->link_cntl = link_modes[link_mode]; else cp->link_cntl = BMCR_ANENABLE; cp->lstate = link_down; cp->link_transition = LINK_TRANSITION_LINK_DOWN; netif_carrier_off(cp->dev); cp->timer_ticks = 0; /* give us access to cassini registers */ cp->regs = pci_iomap(pdev, 0, casreg_len); if (!cp->regs) { dev_err(&pdev->dev, "Cannot map device registers, aborting\n"); goto err_out_free_res; } cp->casreg_len = casreg_len; pci_save_state(pdev); cas_check_pci_invariants(cp); cas_hard_reset(cp); cas_reset(cp, 0); if (cas_check_invariants(cp)) goto err_out_iounmap; if (cp->cas_flags & CAS_FLAG_SATURN) cas_saturn_firmware_init(cp); cp->init_block = dma_alloc_coherent(&pdev->dev, sizeof(struct cas_init_block), &cp->block_dvma, GFP_KERNEL); if (!cp->init_block) { dev_err(&pdev->dev, "Cannot allocate init block, aborting\n"); goto err_out_iounmap; } for (i = 0; i < N_TX_RINGS; i++) cp->init_txds[i] = cp->init_block->txds[i]; for (i = 0; i < N_RX_DESC_RINGS; i++) cp->init_rxds[i] = cp->init_block->rxds[i]; for (i = 0; i < N_RX_COMP_RINGS; i++) cp->init_rxcs[i] = cp->init_block->rxcs[i]; for (i = 0; i < N_RX_FLOWS; i++) skb_queue_head_init(&cp->rx_flows[i]); dev->netdev_ops = &cas_netdev_ops; dev->ethtool_ops = &cas_ethtool_ops; dev->watchdog_timeo = CAS_TX_TIMEOUT; #ifdef USE_NAPI netif_napi_add(dev, &cp->napi, cas_poll); #endif dev->irq = pdev->irq; dev->dma = 0; /* Cassini features. */ if ((cp->cas_flags & CAS_FLAG_NO_HW_CSUM) == 0) dev->features |= NETIF_F_HW_CSUM | NETIF_F_SG; dev->features |= NETIF_F_HIGHDMA; /* MTU range: 60 - varies or 9000 */ dev->min_mtu = CAS_MIN_MTU; dev->max_mtu = CAS_MAX_MTU; if (register_netdev(dev)) { dev_err(&pdev->dev, "Cannot register net device, aborting\n"); goto err_out_free_consistent; } i = readl(cp->regs + REG_BIM_CFG); netdev_info(dev, "Sun Cassini%s (%sbit/%sMHz PCI/%s) Ethernet[%d] %pM\n", (cp->cas_flags & CAS_FLAG_REG_PLUS) ? "+" : "", (i & BIM_CFG_32BIT) ? "32" : "64", (i & BIM_CFG_66MHZ) ? "66" : "33", (cp->phy_type == CAS_PHY_SERDES) ? "Fi" : "Cu", pdev->irq, dev->dev_addr); pci_set_drvdata(pdev, dev); cp->hw_running = 1; cas_entropy_reset(cp); cas_phy_init(cp); cas_begin_auto_negotiation(cp, NULL); return 0; err_out_free_consistent: dma_free_coherent(&pdev->dev, sizeof(struct cas_init_block), cp->init_block, cp->block_dvma); err_out_iounmap: mutex_lock(&cp->pm_mutex); if (cp->hw_running) cas_shutdown(cp); mutex_unlock(&cp->pm_mutex); vfree(cp->fw_data); pci_iounmap(pdev, cp->regs); err_out_free_res: pci_release_regions(pdev); /* Try to restore it in case the error occurred after we * set it. */ pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE, orig_cacheline_size); err_out_free_netdev: free_netdev(dev); err_out_disable_pdev: pci_disable_device(pdev); return -ENODEV; } static void cas_remove_one(struct pci_dev *pdev) { struct net_device *dev = pci_get_drvdata(pdev); struct cas *cp; if (!dev) return; cp = netdev_priv(dev); unregister_netdev(dev); vfree(cp->fw_data); mutex_lock(&cp->pm_mutex); cancel_work_sync(&cp->reset_task); if (cp->hw_running) cas_shutdown(cp); mutex_unlock(&cp->pm_mutex); #if 1 if (cp->orig_cacheline_size) { /* Restore the cache line size if we had modified * it. */ pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE, cp->orig_cacheline_size); } #endif dma_free_coherent(&pdev->dev, sizeof(struct cas_init_block), cp->init_block, cp->block_dvma); pci_iounmap(pdev, cp->regs); free_netdev(dev); pci_release_regions(pdev); pci_disable_device(pdev); } static int __maybe_unused cas_suspend(struct device *dev_d) { struct net_device *dev = dev_get_drvdata(dev_d); struct cas *cp = netdev_priv(dev); unsigned long flags; mutex_lock(&cp->pm_mutex); /* If the driver is opened, we stop the DMA */ if (cp->opened) { netif_device_detach(dev); cas_lock_all_save(cp, flags); /* We can set the second arg of cas_reset to 0 * because on resume, we'll call cas_init_hw with * its second arg set so that autonegotiation is * restarted. */ cas_reset(cp, 0); cas_clean_rings(cp); cas_unlock_all_restore(cp, flags); } if (cp->hw_running) cas_shutdown(cp); mutex_unlock(&cp->pm_mutex); return 0; } static int __maybe_unused cas_resume(struct device *dev_d) { struct net_device *dev = dev_get_drvdata(dev_d); struct cas *cp = netdev_priv(dev); netdev_info(dev, "resuming\n"); mutex_lock(&cp->pm_mutex); cas_hard_reset(cp); if (cp->opened) { unsigned long flags; cas_lock_all_save(cp, flags); cas_reset(cp, 0); cp->hw_running = 1; cas_clean_rings(cp); cas_init_hw(cp, 1); cas_unlock_all_restore(cp, flags); netif_device_attach(dev); } mutex_unlock(&cp->pm_mutex); return 0; } static SIMPLE_DEV_PM_OPS(cas_pm_ops, cas_suspend, cas_resume); static struct pci_driver cas_driver = { .name = DRV_MODULE_NAME, .id_table = cas_pci_tbl, .probe = cas_init_one, .remove = cas_remove_one, .driver.pm = &cas_pm_ops, }; static int __init cas_init(void) { if (linkdown_timeout > 0) link_transition_timeout = linkdown_timeout * HZ; else link_transition_timeout = 0; return pci_register_driver(&cas_driver); } static void __exit cas_cleanup(void) { pci_unregister_driver(&cas_driver); } module_init(cas_init); module_exit(cas_cleanup);
linux-master
drivers/net/ethernet/sun/cassini.c
// SPDX-License-Identifier: GPL-2.0 /* sunhme.c: Sparc HME/BigMac 10/100baseT half/full duplex auto switching, * auto carrier detecting ethernet driver. Also known as the * "Happy Meal Ethernet" found on SunSwift SBUS cards. * * Copyright (C) 1996, 1998, 1999, 2002, 2003, * 2006, 2008 David S. Miller ([email protected]) * * Changes : * 2000/11/11 Willy Tarreau <willy AT meta-x.org> * - port to non-sparc architectures. Tested only on x86 and * only currently works with QFE PCI cards. * - ability to specify the MAC address at module load time by passing this * argument : macaddr=0x00,0x10,0x20,0x30,0x40,0x50 */ #include <linux/bitops.h> #include <linux/crc32.h> #include <linux/delay.h> #include <linux/dma-mapping.h> #include <linux/errno.h> #include <linux/etherdevice.h> #include <linux/ethtool.h> #include <linux/fcntl.h> #include <linux/in.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/io.h> #include <linux/ioport.h> #include <linux/kernel.h> #include <linux/mii.h> #include <linux/mm.h> #include <linux/module.h> #include <linux/netdevice.h> #include <linux/of.h> #include <linux/of_device.h> #include <linux/pci.h> #include <linux/platform_device.h> #include <linux/random.h> #include <linux/skbuff.h> #include <linux/slab.h> #include <linux/string.h> #include <linux/types.h> #include <linux/uaccess.h> #include <asm/byteorder.h> #include <asm/dma.h> #include <asm/irq.h> #ifdef CONFIG_SPARC #include <asm/auxio.h> #include <asm/idprom.h> #include <asm/openprom.h> #include <asm/oplib.h> #include <asm/prom.h> #endif #include "sunhme.h" #define DRV_NAME "sunhme" MODULE_AUTHOR("David S. Miller ([email protected])"); MODULE_DESCRIPTION("Sun HappyMealEthernet(HME) 10/100baseT ethernet driver"); MODULE_LICENSE("GPL"); static int macaddr[6]; /* accept MAC address of the form macaddr=0x08,0x00,0x20,0x30,0x40,0x50 */ module_param_array(macaddr, int, NULL, 0); MODULE_PARM_DESC(macaddr, "Happy Meal MAC address to set"); #ifdef CONFIG_SBUS static struct quattro *qfe_sbus_list; #endif #ifdef CONFIG_PCI static struct quattro *qfe_pci_list; #endif #define hme_debug(fmt, ...) pr_debug("%s: " fmt, __func__, ##__VA_ARGS__) #define HMD hme_debug /* "Auto Switch Debug" aka phy debug */ #if 1 #define ASD hme_debug #else #define ASD(...) #endif #if 0 struct hme_tx_logent { unsigned int tstamp; int tx_new, tx_old; unsigned int action; #define TXLOG_ACTION_IRQ 0x01 #define TXLOG_ACTION_TXMIT 0x02 #define TXLOG_ACTION_TBUSY 0x04 #define TXLOG_ACTION_NBUFS 0x08 unsigned int status; }; #define TX_LOG_LEN 128 static struct hme_tx_logent tx_log[TX_LOG_LEN]; static int txlog_cur_entry; static __inline__ void tx_add_log(struct happy_meal *hp, unsigned int a, unsigned int s) { struct hme_tx_logent *tlp; unsigned long flags; local_irq_save(flags); tlp = &tx_log[txlog_cur_entry]; tlp->tstamp = (unsigned int)jiffies; tlp->tx_new = hp->tx_new; tlp->tx_old = hp->tx_old; tlp->action = a; tlp->status = s; txlog_cur_entry = (txlog_cur_entry + 1) & (TX_LOG_LEN - 1); local_irq_restore(flags); } static __inline__ void tx_dump_log(void) { int i, this; this = txlog_cur_entry; for (i = 0; i < TX_LOG_LEN; i++) { pr_err("TXLOG[%d]: j[%08x] tx[N(%d)O(%d)] action[%08x] stat[%08x]\n", i, tx_log[this].tstamp, tx_log[this].tx_new, tx_log[this].tx_old, tx_log[this].action, tx_log[this].status); this = (this + 1) & (TX_LOG_LEN - 1); } } #else #define tx_add_log(hp, a, s) #define tx_dump_log() #endif #define DEFAULT_IPG0 16 /* For lance-mode only */ #define DEFAULT_IPG1 8 /* For all modes */ #define DEFAULT_IPG2 4 /* For all modes */ #define DEFAULT_JAMSIZE 4 /* Toe jam */ /* NOTE: In the descriptor writes one _must_ write the address * member _first_. The card must not be allowed to see * the updated descriptor flags until the address is * correct. I've added a write memory barrier between * the two stores so that I can sleep well at night... -DaveM */ #if defined(CONFIG_SBUS) && defined(CONFIG_PCI) static void sbus_hme_write32(void __iomem *reg, u32 val) { sbus_writel(val, reg); } static u32 sbus_hme_read32(void __iomem *reg) { return sbus_readl(reg); } static void sbus_hme_write_rxd(struct happy_meal_rxd *rxd, u32 flags, u32 addr) { rxd->rx_addr = (__force hme32)addr; dma_wmb(); rxd->rx_flags = (__force hme32)flags; } static void sbus_hme_write_txd(struct happy_meal_txd *txd, u32 flags, u32 addr) { txd->tx_addr = (__force hme32)addr; dma_wmb(); txd->tx_flags = (__force hme32)flags; } static u32 sbus_hme_read_desc32(hme32 *p) { return (__force u32)*p; } static void pci_hme_write32(void __iomem *reg, u32 val) { writel(val, reg); } static u32 pci_hme_read32(void __iomem *reg) { return readl(reg); } static void pci_hme_write_rxd(struct happy_meal_rxd *rxd, u32 flags, u32 addr) { rxd->rx_addr = (__force hme32)cpu_to_le32(addr); dma_wmb(); rxd->rx_flags = (__force hme32)cpu_to_le32(flags); } static void pci_hme_write_txd(struct happy_meal_txd *txd, u32 flags, u32 addr) { txd->tx_addr = (__force hme32)cpu_to_le32(addr); dma_wmb(); txd->tx_flags = (__force hme32)cpu_to_le32(flags); } static u32 pci_hme_read_desc32(hme32 *p) { return le32_to_cpup((__le32 *)p); } #define hme_write32(__hp, __reg, __val) \ ((__hp)->write32((__reg), (__val))) #define hme_read32(__hp, __reg) \ ((__hp)->read32(__reg)) #define hme_write_rxd(__hp, __rxd, __flags, __addr) \ ((__hp)->write_rxd((__rxd), (__flags), (__addr))) #define hme_write_txd(__hp, __txd, __flags, __addr) \ ((__hp)->write_txd((__txd), (__flags), (__addr))) #define hme_read_desc32(__hp, __p) \ ((__hp)->read_desc32(__p)) #else #ifdef CONFIG_SBUS /* SBUS only compilation */ #define hme_write32(__hp, __reg, __val) \ sbus_writel((__val), (__reg)) #define hme_read32(__hp, __reg) \ sbus_readl(__reg) #define hme_write_rxd(__hp, __rxd, __flags, __addr) \ do { (__rxd)->rx_addr = (__force hme32)(u32)(__addr); \ dma_wmb(); \ (__rxd)->rx_flags = (__force hme32)(u32)(__flags); \ } while(0) #define hme_write_txd(__hp, __txd, __flags, __addr) \ do { (__txd)->tx_addr = (__force hme32)(u32)(__addr); \ dma_wmb(); \ (__txd)->tx_flags = (__force hme32)(u32)(__flags); \ } while(0) #define hme_read_desc32(__hp, __p) ((__force u32)(hme32)*(__p)) #else /* PCI only compilation */ #define hme_write32(__hp, __reg, __val) \ writel((__val), (__reg)) #define hme_read32(__hp, __reg) \ readl(__reg) #define hme_write_rxd(__hp, __rxd, __flags, __addr) \ do { (__rxd)->rx_addr = (__force hme32)cpu_to_le32(__addr); \ dma_wmb(); \ (__rxd)->rx_flags = (__force hme32)cpu_to_le32(__flags); \ } while(0) #define hme_write_txd(__hp, __txd, __flags, __addr) \ do { (__txd)->tx_addr = (__force hme32)cpu_to_le32(__addr); \ dma_wmb(); \ (__txd)->tx_flags = (__force hme32)cpu_to_le32(__flags); \ } while(0) static inline u32 hme_read_desc32(struct happy_meal *hp, hme32 *p) { return le32_to_cpup((__le32 *)p); } #endif #endif /* Oh yes, the MIF BitBang is mighty fun to program. BitBucket is more like it. */ static void BB_PUT_BIT(struct happy_meal *hp, void __iomem *tregs, int bit) { hme_write32(hp, tregs + TCVR_BBDATA, bit); hme_write32(hp, tregs + TCVR_BBCLOCK, 0); hme_write32(hp, tregs + TCVR_BBCLOCK, 1); } #if 0 static u32 BB_GET_BIT(struct happy_meal *hp, void __iomem *tregs, int internal) { u32 ret; hme_write32(hp, tregs + TCVR_BBCLOCK, 0); hme_write32(hp, tregs + TCVR_BBCLOCK, 1); ret = hme_read32(hp, tregs + TCVR_CFG); if (internal) ret &= TCV_CFG_MDIO0; else ret &= TCV_CFG_MDIO1; return ret; } #endif static u32 BB_GET_BIT2(struct happy_meal *hp, void __iomem *tregs, int internal) { u32 retval; hme_write32(hp, tregs + TCVR_BBCLOCK, 0); udelay(1); retval = hme_read32(hp, tregs + TCVR_CFG); if (internal) retval &= TCV_CFG_MDIO0; else retval &= TCV_CFG_MDIO1; hme_write32(hp, tregs + TCVR_BBCLOCK, 1); return retval; } #define TCVR_FAILURE 0x80000000 /* Impossible MIF read value */ static int happy_meal_bb_read(struct happy_meal *hp, void __iomem *tregs, int reg) { u32 tmp; int retval = 0; int i; /* Enable the MIF BitBang outputs. */ hme_write32(hp, tregs + TCVR_BBOENAB, 1); /* Force BitBang into the idle state. */ for (i = 0; i < 32; i++) BB_PUT_BIT(hp, tregs, 1); /* Give it the read sequence. */ BB_PUT_BIT(hp, tregs, 0); BB_PUT_BIT(hp, tregs, 1); BB_PUT_BIT(hp, tregs, 1); BB_PUT_BIT(hp, tregs, 0); /* Give it the PHY address. */ tmp = hp->paddr & 0xff; for (i = 4; i >= 0; i--) BB_PUT_BIT(hp, tregs, ((tmp >> i) & 1)); /* Tell it what register we want to read. */ tmp = (reg & 0xff); for (i = 4; i >= 0; i--) BB_PUT_BIT(hp, tregs, ((tmp >> i) & 1)); /* Close down the MIF BitBang outputs. */ hme_write32(hp, tregs + TCVR_BBOENAB, 0); /* Now read in the value. */ (void) BB_GET_BIT2(hp, tregs, (hp->tcvr_type == internal)); for (i = 15; i >= 0; i--) retval |= BB_GET_BIT2(hp, tregs, (hp->tcvr_type == internal)); (void) BB_GET_BIT2(hp, tregs, (hp->tcvr_type == internal)); (void) BB_GET_BIT2(hp, tregs, (hp->tcvr_type == internal)); (void) BB_GET_BIT2(hp, tregs, (hp->tcvr_type == internal)); ASD("reg=%d value=%x\n", reg, retval); return retval; } static void happy_meal_bb_write(struct happy_meal *hp, void __iomem *tregs, int reg, unsigned short value) { u32 tmp; int i; ASD("reg=%d value=%x\n", reg, value); /* Enable the MIF BitBang outputs. */ hme_write32(hp, tregs + TCVR_BBOENAB, 1); /* Force BitBang into the idle state. */ for (i = 0; i < 32; i++) BB_PUT_BIT(hp, tregs, 1); /* Give it write sequence. */ BB_PUT_BIT(hp, tregs, 0); BB_PUT_BIT(hp, tregs, 1); BB_PUT_BIT(hp, tregs, 0); BB_PUT_BIT(hp, tregs, 1); /* Give it the PHY address. */ tmp = (hp->paddr & 0xff); for (i = 4; i >= 0; i--) BB_PUT_BIT(hp, tregs, ((tmp >> i) & 1)); /* Tell it what register we will be writing. */ tmp = (reg & 0xff); for (i = 4; i >= 0; i--) BB_PUT_BIT(hp, tregs, ((tmp >> i) & 1)); /* Tell it to become ready for the bits. */ BB_PUT_BIT(hp, tregs, 1); BB_PUT_BIT(hp, tregs, 0); for (i = 15; i >= 0; i--) BB_PUT_BIT(hp, tregs, ((value >> i) & 1)); /* Close down the MIF BitBang outputs. */ hme_write32(hp, tregs + TCVR_BBOENAB, 0); } #define TCVR_READ_TRIES 16 static int happy_meal_tcvr_read(struct happy_meal *hp, void __iomem *tregs, int reg) { int tries = TCVR_READ_TRIES; int retval; if (hp->tcvr_type == none) { ASD("no transceiver, value=TCVR_FAILURE\n"); return TCVR_FAILURE; } if (!(hp->happy_flags & HFLAG_FENABLE)) { ASD("doing bit bang\n"); return happy_meal_bb_read(hp, tregs, reg); } hme_write32(hp, tregs + TCVR_FRAME, (FRAME_READ | (hp->paddr << 23) | ((reg & 0xff) << 18))); while (!(hme_read32(hp, tregs + TCVR_FRAME) & 0x10000) && --tries) udelay(20); if (!tries) { netdev_err(hp->dev, "Aieee, transceiver MIF read bolixed\n"); return TCVR_FAILURE; } retval = hme_read32(hp, tregs + TCVR_FRAME) & 0xffff; ASD("reg=0x%02x value=%04x\n", reg, retval); return retval; } #define TCVR_WRITE_TRIES 16 static void happy_meal_tcvr_write(struct happy_meal *hp, void __iomem *tregs, int reg, unsigned short value) { int tries = TCVR_WRITE_TRIES; ASD("reg=0x%02x value=%04x\n", reg, value); /* Welcome to Sun Microsystems, can I take your order please? */ if (!(hp->happy_flags & HFLAG_FENABLE)) { happy_meal_bb_write(hp, tregs, reg, value); return; } /* Would you like fries with that? */ hme_write32(hp, tregs + TCVR_FRAME, (FRAME_WRITE | (hp->paddr << 23) | ((reg & 0xff) << 18) | (value & 0xffff))); while (!(hme_read32(hp, tregs + TCVR_FRAME) & 0x10000) && --tries) udelay(20); /* Anything else? */ if (!tries) netdev_err(hp->dev, "Aieee, transceiver MIF write bolixed\n"); /* Fifty-two cents is your change, have a nice day. */ } /* Auto negotiation. The scheme is very simple. We have a timer routine * that keeps watching the auto negotiation process as it progresses. * The DP83840 is first told to start doing it's thing, we set up the time * and place the timer state machine in it's initial state. * * Here the timer peeks at the DP83840 status registers at each click to see * if the auto negotiation has completed, we assume here that the DP83840 PHY * will time out at some point and just tell us what (didn't) happen. For * complete coverage we only allow so many of the ticks at this level to run, * when this has expired we print a warning message and try another strategy. * This "other" strategy is to force the interface into various speed/duplex * configurations and we stop when we see a link-up condition before the * maximum number of "peek" ticks have occurred. * * Once a valid link status has been detected we configure the BigMAC and * the rest of the Happy Meal to speak the most efficient protocol we could * get a clean link for. The priority for link configurations, highest first * is: * 100 Base-T Full Duplex * 100 Base-T Half Duplex * 10 Base-T Full Duplex * 10 Base-T Half Duplex * * We start a new timer now, after a successful auto negotiation status has * been detected. This timer just waits for the link-up bit to get set in * the BMCR of the DP83840. When this occurs we print a kernel log message * describing the link type in use and the fact that it is up. * * If a fatal error of some sort is signalled and detected in the interrupt * service routine, and the chip is reset, or the link is ifconfig'd down * and then back up, this entire process repeats itself all over again. */ static int try_next_permutation(struct happy_meal *hp, void __iomem *tregs) { hp->sw_bmcr = happy_meal_tcvr_read(hp, tregs, MII_BMCR); /* Downgrade from full to half duplex. Only possible * via ethtool. */ if (hp->sw_bmcr & BMCR_FULLDPLX) { hp->sw_bmcr &= ~(BMCR_FULLDPLX); happy_meal_tcvr_write(hp, tregs, MII_BMCR, hp->sw_bmcr); return 0; } /* Downgrade from 100 to 10. */ if (hp->sw_bmcr & BMCR_SPEED100) { hp->sw_bmcr &= ~(BMCR_SPEED100); happy_meal_tcvr_write(hp, tregs, MII_BMCR, hp->sw_bmcr); return 0; } /* We've tried everything. */ return -1; } static void display_link_mode(struct happy_meal *hp, void __iomem *tregs) { hp->sw_lpa = happy_meal_tcvr_read(hp, tregs, MII_LPA); netdev_info(hp->dev, "Link is up using %s transceiver at %dMb/s, %s Duplex.\n", hp->tcvr_type == external ? "external" : "internal", hp->sw_lpa & (LPA_100HALF | LPA_100FULL) ? 100 : 10, hp->sw_lpa & (LPA_100FULL | LPA_10FULL) ? "Full" : "Half"); } static void display_forced_link_mode(struct happy_meal *hp, void __iomem *tregs) { hp->sw_bmcr = happy_meal_tcvr_read(hp, tregs, MII_BMCR); netdev_info(hp->dev, "Link has been forced up using %s transceiver at %dMb/s, %s Duplex.\n", hp->tcvr_type == external ? "external" : "internal", hp->sw_bmcr & BMCR_SPEED100 ? 100 : 10, hp->sw_bmcr & BMCR_FULLDPLX ? "Full" : "Half"); } static int set_happy_link_modes(struct happy_meal *hp, void __iomem *tregs) { int full; /* All we care about is making sure the bigmac tx_cfg has a * proper duplex setting. */ if (hp->timer_state == arbwait) { hp->sw_lpa = happy_meal_tcvr_read(hp, tregs, MII_LPA); if (!(hp->sw_lpa & (LPA_10HALF | LPA_10FULL | LPA_100HALF | LPA_100FULL))) goto no_response; if (hp->sw_lpa & LPA_100FULL) full = 1; else if (hp->sw_lpa & LPA_100HALF) full = 0; else if (hp->sw_lpa & LPA_10FULL) full = 1; else full = 0; } else { /* Forcing a link mode. */ hp->sw_bmcr = happy_meal_tcvr_read(hp, tregs, MII_BMCR); if (hp->sw_bmcr & BMCR_FULLDPLX) full = 1; else full = 0; } /* Before changing other bits in the tx_cfg register, and in * general any of other the TX config registers too, you * must: * 1) Clear Enable * 2) Poll with reads until that bit reads back as zero * 3) Make TX configuration changes * 4) Set Enable once more */ hme_write32(hp, hp->bigmacregs + BMAC_TXCFG, hme_read32(hp, hp->bigmacregs + BMAC_TXCFG) & ~(BIGMAC_TXCFG_ENABLE)); while (hme_read32(hp, hp->bigmacregs + BMAC_TXCFG) & BIGMAC_TXCFG_ENABLE) barrier(); if (full) { hp->happy_flags |= HFLAG_FULL; hme_write32(hp, hp->bigmacregs + BMAC_TXCFG, hme_read32(hp, hp->bigmacregs + BMAC_TXCFG) | BIGMAC_TXCFG_FULLDPLX); } else { hp->happy_flags &= ~(HFLAG_FULL); hme_write32(hp, hp->bigmacregs + BMAC_TXCFG, hme_read32(hp, hp->bigmacregs + BMAC_TXCFG) & ~(BIGMAC_TXCFG_FULLDPLX)); } hme_write32(hp, hp->bigmacregs + BMAC_TXCFG, hme_read32(hp, hp->bigmacregs + BMAC_TXCFG) | BIGMAC_TXCFG_ENABLE); return 0; no_response: return 1; } static int is_lucent_phy(struct happy_meal *hp) { void __iomem *tregs = hp->tcvregs; unsigned short mr2, mr3; int ret = 0; mr2 = happy_meal_tcvr_read(hp, tregs, 2); mr3 = happy_meal_tcvr_read(hp, tregs, 3); if ((mr2 & 0xffff) == 0x0180 && ((mr3 & 0xffff) >> 10) == 0x1d) ret = 1; return ret; } /* hp->happy_lock must be held */ static void happy_meal_begin_auto_negotiation(struct happy_meal *hp, void __iomem *tregs, const struct ethtool_link_ksettings *ep) { int timeout; /* Read all of the registers we are interested in now. */ hp->sw_bmsr = happy_meal_tcvr_read(hp, tregs, MII_BMSR); hp->sw_bmcr = happy_meal_tcvr_read(hp, tregs, MII_BMCR); hp->sw_physid1 = happy_meal_tcvr_read(hp, tregs, MII_PHYSID1); hp->sw_physid2 = happy_meal_tcvr_read(hp, tregs, MII_PHYSID2); /* XXX Check BMSR_ANEGCAPABLE, should not be necessary though. */ hp->sw_advertise = happy_meal_tcvr_read(hp, tregs, MII_ADVERTISE); if (!ep || ep->base.autoneg == AUTONEG_ENABLE) { /* Advertise everything we can support. */ if (hp->sw_bmsr & BMSR_10HALF) hp->sw_advertise |= (ADVERTISE_10HALF); else hp->sw_advertise &= ~(ADVERTISE_10HALF); if (hp->sw_bmsr & BMSR_10FULL) hp->sw_advertise |= (ADVERTISE_10FULL); else hp->sw_advertise &= ~(ADVERTISE_10FULL); if (hp->sw_bmsr & BMSR_100HALF) hp->sw_advertise |= (ADVERTISE_100HALF); else hp->sw_advertise &= ~(ADVERTISE_100HALF); if (hp->sw_bmsr & BMSR_100FULL) hp->sw_advertise |= (ADVERTISE_100FULL); else hp->sw_advertise &= ~(ADVERTISE_100FULL); happy_meal_tcvr_write(hp, tregs, MII_ADVERTISE, hp->sw_advertise); /* XXX Currently no Happy Meal cards I know off support 100BaseT4, * XXX and this is because the DP83840 does not support it, changes * XXX would need to be made to the tx/rx logic in the driver as well * XXX so I completely skip checking for it in the BMSR for now. */ ASD("Advertising [ %s%s%s%s]\n", hp->sw_advertise & ADVERTISE_10HALF ? "10H " : "", hp->sw_advertise & ADVERTISE_10FULL ? "10F " : "", hp->sw_advertise & ADVERTISE_100HALF ? "100H " : "", hp->sw_advertise & ADVERTISE_100FULL ? "100F " : ""); /* Enable Auto-Negotiation, this is usually on already... */ hp->sw_bmcr |= BMCR_ANENABLE; happy_meal_tcvr_write(hp, tregs, MII_BMCR, hp->sw_bmcr); /* Restart it to make sure it is going. */ hp->sw_bmcr |= BMCR_ANRESTART; happy_meal_tcvr_write(hp, tregs, MII_BMCR, hp->sw_bmcr); /* BMCR_ANRESTART self clears when the process has begun. */ timeout = 64; /* More than enough. */ while (--timeout) { hp->sw_bmcr = happy_meal_tcvr_read(hp, tregs, MII_BMCR); if (!(hp->sw_bmcr & BMCR_ANRESTART)) break; /* got it. */ udelay(10); } if (!timeout) { netdev_err(hp->dev, "Happy Meal would not start auto negotiation BMCR=0x%04x\n", hp->sw_bmcr); netdev_notice(hp->dev, "Performing force link detection.\n"); goto force_link; } else { hp->timer_state = arbwait; } } else { force_link: /* Force the link up, trying first a particular mode. * Either we are here at the request of ethtool or * because the Happy Meal would not start to autoneg. */ /* Disable auto-negotiation in BMCR, enable the duplex and * speed setting, init the timer state machine, and fire it off. */ if (!ep || ep->base.autoneg == AUTONEG_ENABLE) { hp->sw_bmcr = BMCR_SPEED100; } else { if (ep->base.speed == SPEED_100) hp->sw_bmcr = BMCR_SPEED100; else hp->sw_bmcr = 0; if (ep->base.duplex == DUPLEX_FULL) hp->sw_bmcr |= BMCR_FULLDPLX; } happy_meal_tcvr_write(hp, tregs, MII_BMCR, hp->sw_bmcr); if (!is_lucent_phy(hp)) { /* OK, seems we need do disable the transceiver for the first * tick to make sure we get an accurate link state at the * second tick. */ hp->sw_csconfig = happy_meal_tcvr_read(hp, tregs, DP83840_CSCONFIG); hp->sw_csconfig &= ~(CSCONFIG_TCVDISAB); happy_meal_tcvr_write(hp, tregs, DP83840_CSCONFIG, hp->sw_csconfig); } hp->timer_state = ltrywait; } hp->timer_ticks = 0; hp->happy_timer.expires = jiffies + (12 * HZ)/10; /* 1.2 sec. */ add_timer(&hp->happy_timer); } static void happy_meal_timer(struct timer_list *t) { struct happy_meal *hp = from_timer(hp, t, happy_timer); void __iomem *tregs = hp->tcvregs; int restart_timer = 0; spin_lock_irq(&hp->happy_lock); hp->timer_ticks++; switch(hp->timer_state) { case arbwait: /* Only allow for 5 ticks, thats 10 seconds and much too * long to wait for arbitration to complete. */ if (hp->timer_ticks >= 10) { /* Enter force mode. */ do_force_mode: hp->sw_bmcr = happy_meal_tcvr_read(hp, tregs, MII_BMCR); netdev_notice(hp->dev, "Auto-Negotiation unsuccessful, trying force link mode\n"); hp->sw_bmcr = BMCR_SPEED100; happy_meal_tcvr_write(hp, tregs, MII_BMCR, hp->sw_bmcr); if (!is_lucent_phy(hp)) { /* OK, seems we need do disable the transceiver for the first * tick to make sure we get an accurate link state at the * second tick. */ hp->sw_csconfig = happy_meal_tcvr_read(hp, tregs, DP83840_CSCONFIG); hp->sw_csconfig &= ~(CSCONFIG_TCVDISAB); happy_meal_tcvr_write(hp, tregs, DP83840_CSCONFIG, hp->sw_csconfig); } hp->timer_state = ltrywait; hp->timer_ticks = 0; restart_timer = 1; } else { /* Anything interesting happen? */ hp->sw_bmsr = happy_meal_tcvr_read(hp, tregs, MII_BMSR); if (hp->sw_bmsr & BMSR_ANEGCOMPLETE) { int ret; /* Just what we've been waiting for... */ ret = set_happy_link_modes(hp, tregs); if (ret) { /* Ooops, something bad happened, go to force * mode. * * XXX Broken hubs which don't support 802.3u * XXX auto-negotiation make this happen as well. */ goto do_force_mode; } /* Success, at least so far, advance our state engine. */ hp->timer_state = lupwait; restart_timer = 1; } else { restart_timer = 1; } } break; case lupwait: /* Auto negotiation was successful and we are awaiting a * link up status. I have decided to let this timer run * forever until some sort of error is signalled, reporting * a message to the user at 10 second intervals. */ hp->sw_bmsr = happy_meal_tcvr_read(hp, tregs, MII_BMSR); if (hp->sw_bmsr & BMSR_LSTATUS) { /* Wheee, it's up, display the link mode in use and put * the timer to sleep. */ display_link_mode(hp, tregs); hp->timer_state = asleep; restart_timer = 0; } else { if (hp->timer_ticks >= 10) { netdev_notice(hp->dev, "Auto negotiation successful, link still not completely up.\n"); hp->timer_ticks = 0; restart_timer = 1; } else { restart_timer = 1; } } break; case ltrywait: /* Making the timeout here too long can make it take * annoyingly long to attempt all of the link mode * permutations, but then again this is essentially * error recovery code for the most part. */ hp->sw_bmsr = happy_meal_tcvr_read(hp, tregs, MII_BMSR); hp->sw_csconfig = happy_meal_tcvr_read(hp, tregs, DP83840_CSCONFIG); if (hp->timer_ticks == 1) { if (!is_lucent_phy(hp)) { /* Re-enable transceiver, we'll re-enable the transceiver next * tick, then check link state on the following tick. */ hp->sw_csconfig |= CSCONFIG_TCVDISAB; happy_meal_tcvr_write(hp, tregs, DP83840_CSCONFIG, hp->sw_csconfig); } restart_timer = 1; break; } if (hp->timer_ticks == 2) { if (!is_lucent_phy(hp)) { hp->sw_csconfig &= ~(CSCONFIG_TCVDISAB); happy_meal_tcvr_write(hp, tregs, DP83840_CSCONFIG, hp->sw_csconfig); } restart_timer = 1; break; } if (hp->sw_bmsr & BMSR_LSTATUS) { /* Force mode selection success. */ display_forced_link_mode(hp, tregs); set_happy_link_modes(hp, tregs); /* XXX error? then what? */ hp->timer_state = asleep; restart_timer = 0; } else { if (hp->timer_ticks >= 4) { /* 6 seconds or so... */ int ret; ret = try_next_permutation(hp, tregs); if (ret == -1) { /* Aieee, tried them all, reset the * chip and try all over again. */ /* Let the user know... */ netdev_notice(hp->dev, "Link down, cable problem?\n"); happy_meal_begin_auto_negotiation(hp, tregs, NULL); goto out; } if (!is_lucent_phy(hp)) { hp->sw_csconfig = happy_meal_tcvr_read(hp, tregs, DP83840_CSCONFIG); hp->sw_csconfig |= CSCONFIG_TCVDISAB; happy_meal_tcvr_write(hp, tregs, DP83840_CSCONFIG, hp->sw_csconfig); } hp->timer_ticks = 0; restart_timer = 1; } else { restart_timer = 1; } } break; case asleep: default: /* Can't happens.... */ netdev_err(hp->dev, "Aieee, link timer is asleep but we got one anyways!\n"); restart_timer = 0; hp->timer_ticks = 0; hp->timer_state = asleep; /* foo on you */ break; } if (restart_timer) { hp->happy_timer.expires = jiffies + ((12 * HZ)/10); /* 1.2 sec. */ add_timer(&hp->happy_timer); } out: spin_unlock_irq(&hp->happy_lock); } #define TX_RESET_TRIES 32 #define RX_RESET_TRIES 32 /* hp->happy_lock must be held */ static void happy_meal_tx_reset(struct happy_meal *hp, void __iomem *bregs) { int tries = TX_RESET_TRIES; HMD("reset...\n"); /* Would you like to try our SMCC Delux? */ hme_write32(hp, bregs + BMAC_TXSWRESET, 0); while ((hme_read32(hp, bregs + BMAC_TXSWRESET) & 1) && --tries) udelay(20); /* Lettuce, tomato, buggy hardware (no extra charge)? */ if (!tries) netdev_err(hp->dev, "Transceiver BigMac ATTACK!"); /* Take care. */ HMD("done\n"); } /* hp->happy_lock must be held */ static void happy_meal_rx_reset(struct happy_meal *hp, void __iomem *bregs) { int tries = RX_RESET_TRIES; HMD("reset...\n"); /* We have a special on GNU/Viking hardware bugs today. */ hme_write32(hp, bregs + BMAC_RXSWRESET, 0); while ((hme_read32(hp, bregs + BMAC_RXSWRESET) & 1) && --tries) udelay(20); /* Will that be all? */ if (!tries) netdev_err(hp->dev, "Receiver BigMac ATTACK!\n"); /* Don't forget your vik_1137125_wa. Have a nice day. */ HMD("done\n"); } #define STOP_TRIES 16 /* hp->happy_lock must be held */ static void happy_meal_stop(struct happy_meal *hp, void __iomem *gregs) { int tries = STOP_TRIES; HMD("reset...\n"); /* We're consolidating our STB products, it's your lucky day. */ hme_write32(hp, gregs + GREG_SWRESET, GREG_RESET_ALL); while (hme_read32(hp, gregs + GREG_SWRESET) && --tries) udelay(20); /* Come back next week when we are "Sun Microelectronics". */ if (!tries) netdev_err(hp->dev, "Fry guys.\n"); /* Remember: "Different name, same old buggy as shit hardware." */ HMD("done\n"); } /* hp->happy_lock must be held */ static void happy_meal_get_counters(struct happy_meal *hp, void __iomem *bregs) { struct net_device_stats *stats = &hp->dev->stats; stats->rx_crc_errors += hme_read32(hp, bregs + BMAC_RCRCECTR); hme_write32(hp, bregs + BMAC_RCRCECTR, 0); stats->rx_frame_errors += hme_read32(hp, bregs + BMAC_UNALECTR); hme_write32(hp, bregs + BMAC_UNALECTR, 0); stats->rx_length_errors += hme_read32(hp, bregs + BMAC_GLECTR); hme_write32(hp, bregs + BMAC_GLECTR, 0); stats->tx_aborted_errors += hme_read32(hp, bregs + BMAC_EXCTR); stats->collisions += (hme_read32(hp, bregs + BMAC_EXCTR) + hme_read32(hp, bregs + BMAC_LTCTR)); hme_write32(hp, bregs + BMAC_EXCTR, 0); hme_write32(hp, bregs + BMAC_LTCTR, 0); } /* Only Sun can take such nice parts and fuck up the programming interface * like this. Good job guys... */ #define TCVR_RESET_TRIES 16 /* It should reset quickly */ #define TCVR_UNISOLATE_TRIES 32 /* Dis-isolation can take longer. */ /* hp->happy_lock must be held */ static int happy_meal_tcvr_reset(struct happy_meal *hp, void __iomem *tregs) { u32 tconfig; int result, tries = TCVR_RESET_TRIES; tconfig = hme_read32(hp, tregs + TCVR_CFG); ASD("tcfg=%08x\n", tconfig); if (hp->tcvr_type == external) { hme_write32(hp, tregs + TCVR_CFG, tconfig & ~(TCV_CFG_PSELECT)); hp->tcvr_type = internal; hp->paddr = TCV_PADDR_ITX; happy_meal_tcvr_write(hp, tregs, MII_BMCR, (BMCR_LOOPBACK|BMCR_PDOWN|BMCR_ISOLATE)); result = happy_meal_tcvr_read(hp, tregs, MII_BMCR); if (result == TCVR_FAILURE) { ASD("phyread_fail\n"); return -1; } ASD("external: ISOLATE, phyread_ok, PSELECT\n"); hme_write32(hp, tregs + TCVR_CFG, tconfig | TCV_CFG_PSELECT); hp->tcvr_type = external; hp->paddr = TCV_PADDR_ETX; } else { if (tconfig & TCV_CFG_MDIO1) { hme_write32(hp, tregs + TCVR_CFG, (tconfig | TCV_CFG_PSELECT)); happy_meal_tcvr_write(hp, tregs, MII_BMCR, (BMCR_LOOPBACK|BMCR_PDOWN|BMCR_ISOLATE)); result = happy_meal_tcvr_read(hp, tregs, MII_BMCR); if (result == TCVR_FAILURE) { ASD("phyread_fail>\n"); return -1; } ASD("internal: PSELECT, ISOLATE, phyread_ok, ~PSELECT\n"); hme_write32(hp, tregs + TCVR_CFG, (tconfig & ~(TCV_CFG_PSELECT))); hp->tcvr_type = internal; hp->paddr = TCV_PADDR_ITX; } } ASD("BMCR_RESET...\n"); happy_meal_tcvr_write(hp, tregs, MII_BMCR, BMCR_RESET); while (--tries) { result = happy_meal_tcvr_read(hp, tregs, MII_BMCR); if (result == TCVR_FAILURE) return -1; hp->sw_bmcr = result; if (!(result & BMCR_RESET)) break; udelay(20); } if (!tries) { ASD("BMCR RESET FAILED!\n"); return -1; } ASD("RESET_OK\n"); /* Get fresh copies of the PHY registers. */ hp->sw_bmsr = happy_meal_tcvr_read(hp, tregs, MII_BMSR); hp->sw_physid1 = happy_meal_tcvr_read(hp, tregs, MII_PHYSID1); hp->sw_physid2 = happy_meal_tcvr_read(hp, tregs, MII_PHYSID2); hp->sw_advertise = happy_meal_tcvr_read(hp, tregs, MII_ADVERTISE); ASD("UNISOLATE...\n"); hp->sw_bmcr &= ~(BMCR_ISOLATE); happy_meal_tcvr_write(hp, tregs, MII_BMCR, hp->sw_bmcr); tries = TCVR_UNISOLATE_TRIES; while (--tries) { result = happy_meal_tcvr_read(hp, tregs, MII_BMCR); if (result == TCVR_FAILURE) return -1; if (!(result & BMCR_ISOLATE)) break; udelay(20); } if (!tries) { ASD("UNISOLATE FAILED!\n"); return -1; } ASD("SUCCESS and CSCONFIG_DFBYPASS\n"); if (!is_lucent_phy(hp)) { result = happy_meal_tcvr_read(hp, tregs, DP83840_CSCONFIG); happy_meal_tcvr_write(hp, tregs, DP83840_CSCONFIG, (result | CSCONFIG_DFBYPASS)); } return 0; } /* Figure out whether we have an internal or external transceiver. * * hp->happy_lock must be held */ static void happy_meal_transceiver_check(struct happy_meal *hp, void __iomem *tregs) { unsigned long tconfig = hme_read32(hp, tregs + TCVR_CFG); u32 reread = hme_read32(hp, tregs + TCVR_CFG); ASD("tcfg=%08lx\n", tconfig); if (reread & TCV_CFG_MDIO1) { hme_write32(hp, tregs + TCVR_CFG, tconfig | TCV_CFG_PSELECT); hp->paddr = TCV_PADDR_ETX; hp->tcvr_type = external; ASD("not polling, external\n"); } else { if (reread & TCV_CFG_MDIO0) { hme_write32(hp, tregs + TCVR_CFG, tconfig & ~(TCV_CFG_PSELECT)); hp->paddr = TCV_PADDR_ITX; hp->tcvr_type = internal; ASD("not polling, internal\n"); } else { netdev_err(hp->dev, "Transceiver and a coke please."); hp->tcvr_type = none; /* Grrr... */ ASD("not polling, none\n"); } } } /* The receive ring buffers are a bit tricky to get right. Here goes... * * The buffers we dma into must be 64 byte aligned. So we use a special * alloc_skb() routine for the happy meal to allocate 64 bytes more than * we really need. * * We use skb_reserve() to align the data block we get in the skb. We * also program the etxregs->cfg register to use an offset of 2. This * imperical constant plus the ethernet header size will always leave * us with a nicely aligned ip header once we pass things up to the * protocol layers. * * The numbers work out to: * * Max ethernet frame size 1518 * Ethernet header size 14 * Happy Meal base offset 2 * * Say a skb data area is at 0xf001b010, and its size alloced is * (ETH_FRAME_LEN + 64 + 2) = (1514 + 64 + 2) = 1580 bytes. * * First our alloc_skb() routine aligns the data base to a 64 byte * boundary. We now have 0xf001b040 as our skb data address. We * plug this into the receive descriptor address. * * Next, we skb_reserve() 2 bytes to account for the Happy Meal offset. * So now the data we will end up looking at starts at 0xf001b042. When * the packet arrives, we will check out the size received and subtract * this from the skb->length. Then we just pass the packet up to the * protocols as is, and allocate a new skb to replace this slot we have * just received from. * * The ethernet layer will strip the ether header from the front of the * skb we just sent to it, this leaves us with the ip header sitting * nicely aligned at 0xf001b050. Also, for tcp and udp packets the * Happy Meal has even checksummed the tcp/udp data for us. The 16 * bit checksum is obtained from the low bits of the receive descriptor * flags, thus: * * skb->csum = rxd->rx_flags & 0xffff; * skb->ip_summed = CHECKSUM_COMPLETE; * * before sending off the skb to the protocols, and we are good as gold. */ static void happy_meal_clean_rings(struct happy_meal *hp) { int i; for (i = 0; i < RX_RING_SIZE; i++) { if (hp->rx_skbs[i] != NULL) { struct sk_buff *skb = hp->rx_skbs[i]; struct happy_meal_rxd *rxd; u32 dma_addr; rxd = &hp->happy_block->happy_meal_rxd[i]; dma_addr = hme_read_desc32(hp, &rxd->rx_addr); dma_unmap_single(hp->dma_dev, dma_addr, RX_BUF_ALLOC_SIZE, DMA_FROM_DEVICE); dev_kfree_skb_any(skb); hp->rx_skbs[i] = NULL; } } for (i = 0; i < TX_RING_SIZE; i++) { if (hp->tx_skbs[i] != NULL) { struct sk_buff *skb = hp->tx_skbs[i]; struct happy_meal_txd *txd; u32 dma_addr; int frag; hp->tx_skbs[i] = NULL; for (frag = 0; frag <= skb_shinfo(skb)->nr_frags; frag++) { txd = &hp->happy_block->happy_meal_txd[i]; dma_addr = hme_read_desc32(hp, &txd->tx_addr); if (!frag) dma_unmap_single(hp->dma_dev, dma_addr, (hme_read_desc32(hp, &txd->tx_flags) & TXFLAG_SIZE), DMA_TO_DEVICE); else dma_unmap_page(hp->dma_dev, dma_addr, (hme_read_desc32(hp, &txd->tx_flags) & TXFLAG_SIZE), DMA_TO_DEVICE); if (frag != skb_shinfo(skb)->nr_frags) i++; } dev_kfree_skb_any(skb); } } } /* hp->happy_lock must be held */ static void happy_meal_init_rings(struct happy_meal *hp) { struct hmeal_init_block *hb = hp->happy_block; int i; HMD("counters to zero\n"); hp->rx_new = hp->rx_old = hp->tx_new = hp->tx_old = 0; /* Free any skippy bufs left around in the rings. */ happy_meal_clean_rings(hp); /* Now get new skippy bufs for the receive ring. */ HMD("init rxring\n"); for (i = 0; i < RX_RING_SIZE; i++) { struct sk_buff *skb; u32 mapping; skb = happy_meal_alloc_skb(RX_BUF_ALLOC_SIZE, GFP_ATOMIC); if (!skb) { hme_write_rxd(hp, &hb->happy_meal_rxd[i], 0, 0); continue; } hp->rx_skbs[i] = skb; /* Because we reserve afterwards. */ skb_put(skb, (ETH_FRAME_LEN + RX_OFFSET + 4)); mapping = dma_map_single(hp->dma_dev, skb->data, RX_BUF_ALLOC_SIZE, DMA_FROM_DEVICE); if (dma_mapping_error(hp->dma_dev, mapping)) { dev_kfree_skb_any(skb); hme_write_rxd(hp, &hb->happy_meal_rxd[i], 0, 0); continue; } hme_write_rxd(hp, &hb->happy_meal_rxd[i], (RXFLAG_OWN | ((RX_BUF_ALLOC_SIZE - RX_OFFSET) << 16)), mapping); skb_reserve(skb, RX_OFFSET); } HMD("init txring\n"); for (i = 0; i < TX_RING_SIZE; i++) hme_write_txd(hp, &hb->happy_meal_txd[i], 0, 0); HMD("done\n"); } /* hp->happy_lock must be held */ static int happy_meal_init(struct happy_meal *hp) { const unsigned char *e = &hp->dev->dev_addr[0]; void __iomem *gregs = hp->gregs; void __iomem *etxregs = hp->etxregs; void __iomem *erxregs = hp->erxregs; void __iomem *bregs = hp->bigmacregs; void __iomem *tregs = hp->tcvregs; const char *bursts = "64"; u32 regtmp, rxcfg; /* If auto-negotiation timer is running, kill it. */ del_timer(&hp->happy_timer); HMD("happy_flags[%08x]\n", hp->happy_flags); if (!(hp->happy_flags & HFLAG_INIT)) { HMD("set HFLAG_INIT\n"); hp->happy_flags |= HFLAG_INIT; happy_meal_get_counters(hp, bregs); } /* Stop transmitter and receiver. */ HMD("to happy_meal_stop\n"); happy_meal_stop(hp, gregs); /* Alloc and reset the tx/rx descriptor chains. */ HMD("to happy_meal_init_rings\n"); happy_meal_init_rings(hp); /* See if we can enable the MIF frame on this card to speak to the DP83840. */ if (hp->happy_flags & HFLAG_FENABLE) { HMD("use frame old[%08x]\n", hme_read32(hp, tregs + TCVR_CFG)); hme_write32(hp, tregs + TCVR_CFG, hme_read32(hp, tregs + TCVR_CFG) & ~(TCV_CFG_BENABLE)); } else { HMD("use bitbang old[%08x]\n", hme_read32(hp, tregs + TCVR_CFG)); hme_write32(hp, tregs + TCVR_CFG, hme_read32(hp, tregs + TCVR_CFG) | TCV_CFG_BENABLE); } /* Check the state of the transceiver. */ HMD("to happy_meal_transceiver_check\n"); happy_meal_transceiver_check(hp, tregs); /* Put the Big Mac into a sane state. */ switch(hp->tcvr_type) { case none: /* Cannot operate if we don't know the transceiver type! */ HMD("AAIEEE no transceiver type, EAGAIN\n"); return -EAGAIN; case internal: /* Using the MII buffers. */ HMD("internal, using MII\n"); hme_write32(hp, bregs + BMAC_XIFCFG, 0); break; case external: /* Not using the MII, disable it. */ HMD("external, disable MII\n"); hme_write32(hp, bregs + BMAC_XIFCFG, BIGMAC_XCFG_MIIDISAB); break; } if (happy_meal_tcvr_reset(hp, tregs)) return -EAGAIN; /* Reset the Happy Meal Big Mac transceiver and the receiver. */ HMD("tx/rx reset\n"); happy_meal_tx_reset(hp, bregs); happy_meal_rx_reset(hp, bregs); /* Set jam size and inter-packet gaps to reasonable defaults. */ hme_write32(hp, bregs + BMAC_JSIZE, DEFAULT_JAMSIZE); hme_write32(hp, bregs + BMAC_IGAP1, DEFAULT_IPG1); hme_write32(hp, bregs + BMAC_IGAP2, DEFAULT_IPG2); /* Load up the MAC address and random seed. */ /* The docs recommend to use the 10LSB of our MAC here. */ hme_write32(hp, bregs + BMAC_RSEED, ((e[5] | e[4]<<8)&0x3ff)); hme_write32(hp, bregs + BMAC_MACADDR2, ((e[4] << 8) | e[5])); hme_write32(hp, bregs + BMAC_MACADDR1, ((e[2] << 8) | e[3])); hme_write32(hp, bregs + BMAC_MACADDR0, ((e[0] << 8) | e[1])); if ((hp->dev->flags & IFF_ALLMULTI) || (netdev_mc_count(hp->dev) > 64)) { hme_write32(hp, bregs + BMAC_HTABLE0, 0xffff); hme_write32(hp, bregs + BMAC_HTABLE1, 0xffff); hme_write32(hp, bregs + BMAC_HTABLE2, 0xffff); hme_write32(hp, bregs + BMAC_HTABLE3, 0xffff); } else if ((hp->dev->flags & IFF_PROMISC) == 0) { u16 hash_table[4]; struct netdev_hw_addr *ha; u32 crc; memset(hash_table, 0, sizeof(hash_table)); netdev_for_each_mc_addr(ha, hp->dev) { crc = ether_crc_le(6, ha->addr); crc >>= 26; hash_table[crc >> 4] |= 1 << (crc & 0xf); } hme_write32(hp, bregs + BMAC_HTABLE0, hash_table[0]); hme_write32(hp, bregs + BMAC_HTABLE1, hash_table[1]); hme_write32(hp, bregs + BMAC_HTABLE2, hash_table[2]); hme_write32(hp, bregs + BMAC_HTABLE3, hash_table[3]); } else { hme_write32(hp, bregs + BMAC_HTABLE3, 0); hme_write32(hp, bregs + BMAC_HTABLE2, 0); hme_write32(hp, bregs + BMAC_HTABLE1, 0); hme_write32(hp, bregs + BMAC_HTABLE0, 0); } /* Set the RX and TX ring ptrs. */ HMD("ring ptrs rxr[%08x] txr[%08x]\n", ((__u32)hp->hblock_dvma + hblock_offset(happy_meal_rxd, 0)), ((__u32)hp->hblock_dvma + hblock_offset(happy_meal_txd, 0))); hme_write32(hp, erxregs + ERX_RING, ((__u32)hp->hblock_dvma + hblock_offset(happy_meal_rxd, 0))); hme_write32(hp, etxregs + ETX_RING, ((__u32)hp->hblock_dvma + hblock_offset(happy_meal_txd, 0))); /* Parity issues in the ERX unit of some HME revisions can cause some * registers to not be written unless their parity is even. Detect such * lost writes and simply rewrite with a low bit set (which will be ignored * since the rxring needs to be 2K aligned). */ if (hme_read32(hp, erxregs + ERX_RING) != ((__u32)hp->hblock_dvma + hblock_offset(happy_meal_rxd, 0))) hme_write32(hp, erxregs + ERX_RING, ((__u32)hp->hblock_dvma + hblock_offset(happy_meal_rxd, 0)) | 0x4); /* Set the supported burst sizes. */ #ifndef CONFIG_SPARC /* It is always PCI and can handle 64byte bursts. */ hme_write32(hp, gregs + GREG_CFG, GREG_CFG_BURST64); #else if ((hp->happy_bursts & DMA_BURST64) && ((hp->happy_flags & HFLAG_PCI) != 0 #ifdef CONFIG_SBUS || sbus_can_burst64() #endif || 0)) { u32 gcfg = GREG_CFG_BURST64; /* I have no idea if I should set the extended * transfer mode bit for Cheerio, so for now I * do not. -DaveM */ #ifdef CONFIG_SBUS if ((hp->happy_flags & HFLAG_PCI) == 0) { struct platform_device *op = hp->happy_dev; if (sbus_can_dma_64bit()) { sbus_set_sbus64(&op->dev, hp->happy_bursts); gcfg |= GREG_CFG_64BIT; } } #endif bursts = "64"; hme_write32(hp, gregs + GREG_CFG, gcfg); } else if (hp->happy_bursts & DMA_BURST32) { bursts = "32"; hme_write32(hp, gregs + GREG_CFG, GREG_CFG_BURST32); } else if (hp->happy_bursts & DMA_BURST16) { bursts = "16"; hme_write32(hp, gregs + GREG_CFG, GREG_CFG_BURST16); } else { bursts = "XXX"; hme_write32(hp, gregs + GREG_CFG, 0); } #endif /* CONFIG_SPARC */ HMD("old[%08x] bursts<%s>\n", hme_read32(hp, gregs + GREG_CFG), bursts); /* Turn off interrupts we do not want to hear. */ hme_write32(hp, gregs + GREG_IMASK, (GREG_IMASK_GOTFRAME | GREG_IMASK_RCNTEXP | GREG_IMASK_SENTFRAME | GREG_IMASK_TXPERR)); /* Set the transmit ring buffer size. */ HMD("tx rsize=%d oreg[%08x]\n", (int)TX_RING_SIZE, hme_read32(hp, etxregs + ETX_RSIZE)); hme_write32(hp, etxregs + ETX_RSIZE, (TX_RING_SIZE >> ETX_RSIZE_SHIFT) - 1); /* Enable transmitter DVMA. */ HMD("tx dma enable old[%08x]\n", hme_read32(hp, etxregs + ETX_CFG)); hme_write32(hp, etxregs + ETX_CFG, hme_read32(hp, etxregs + ETX_CFG) | ETX_CFG_DMAENABLE); /* This chip really rots, for the receiver sometimes when you * write to its control registers not all the bits get there * properly. I cannot think of a sane way to provide complete * coverage for this hardware bug yet. */ HMD("erx regs bug old[%08x]\n", hme_read32(hp, erxregs + ERX_CFG)); hme_write32(hp, erxregs + ERX_CFG, ERX_CFG_DEFAULT(RX_OFFSET)); regtmp = hme_read32(hp, erxregs + ERX_CFG); hme_write32(hp, erxregs + ERX_CFG, ERX_CFG_DEFAULT(RX_OFFSET)); if (hme_read32(hp, erxregs + ERX_CFG) != ERX_CFG_DEFAULT(RX_OFFSET)) { netdev_err(hp->dev, "Eieee, rx config register gets greasy fries.\n"); netdev_err(hp->dev, "Trying to set %08x, reread gives %08x\n", ERX_CFG_DEFAULT(RX_OFFSET), regtmp); /* XXX Should return failure here... */ } /* Enable Big Mac hash table filter. */ HMD("enable hash rx_cfg_old[%08x]\n", hme_read32(hp, bregs + BMAC_RXCFG)); rxcfg = BIGMAC_RXCFG_HENABLE | BIGMAC_RXCFG_REJME; if (hp->dev->flags & IFF_PROMISC) rxcfg |= BIGMAC_RXCFG_PMISC; hme_write32(hp, bregs + BMAC_RXCFG, rxcfg); /* Let the bits settle in the chip. */ udelay(10); /* Ok, configure the Big Mac transmitter. */ HMD("BIGMAC init\n"); regtmp = 0; if (hp->happy_flags & HFLAG_FULL) regtmp |= BIGMAC_TXCFG_FULLDPLX; /* Don't turn on the "don't give up" bit for now. It could cause hme * to deadlock with the PHY if a Jabber occurs. */ hme_write32(hp, bregs + BMAC_TXCFG, regtmp /*| BIGMAC_TXCFG_DGIVEUP*/); /* Give up after 16 TX attempts. */ hme_write32(hp, bregs + BMAC_ALIMIT, 16); /* Enable the output drivers no matter what. */ regtmp = BIGMAC_XCFG_ODENABLE; /* If card can do lance mode, enable it. */ if (hp->happy_flags & HFLAG_LANCE) regtmp |= (DEFAULT_IPG0 << 5) | BIGMAC_XCFG_LANCE; /* Disable the MII buffers if using external transceiver. */ if (hp->tcvr_type == external) regtmp |= BIGMAC_XCFG_MIIDISAB; HMD("XIF config old[%08x]\n", hme_read32(hp, bregs + BMAC_XIFCFG)); hme_write32(hp, bregs + BMAC_XIFCFG, regtmp); /* Start things up. */ HMD("tx old[%08x] and rx [%08x] ON!\n", hme_read32(hp, bregs + BMAC_TXCFG), hme_read32(hp, bregs + BMAC_RXCFG)); /* Set larger TX/RX size to allow for 802.1q */ hme_write32(hp, bregs + BMAC_TXMAX, ETH_FRAME_LEN + 8); hme_write32(hp, bregs + BMAC_RXMAX, ETH_FRAME_LEN + 8); hme_write32(hp, bregs + BMAC_TXCFG, hme_read32(hp, bregs + BMAC_TXCFG) | BIGMAC_TXCFG_ENABLE); hme_write32(hp, bregs + BMAC_RXCFG, hme_read32(hp, bregs + BMAC_RXCFG) | BIGMAC_RXCFG_ENABLE); /* Get the autonegotiation started, and the watch timer ticking. */ happy_meal_begin_auto_negotiation(hp, tregs, NULL); /* Success. */ return 0; } /* hp->happy_lock must be held */ static void happy_meal_set_initial_advertisement(struct happy_meal *hp) { void __iomem *tregs = hp->tcvregs; void __iomem *bregs = hp->bigmacregs; void __iomem *gregs = hp->gregs; happy_meal_stop(hp, gregs); if (hp->happy_flags & HFLAG_FENABLE) hme_write32(hp, tregs + TCVR_CFG, hme_read32(hp, tregs + TCVR_CFG) & ~(TCV_CFG_BENABLE)); else hme_write32(hp, tregs + TCVR_CFG, hme_read32(hp, tregs + TCVR_CFG) | TCV_CFG_BENABLE); happy_meal_transceiver_check(hp, tregs); switch(hp->tcvr_type) { case none: return; case internal: hme_write32(hp, bregs + BMAC_XIFCFG, 0); break; case external: hme_write32(hp, bregs + BMAC_XIFCFG, BIGMAC_XCFG_MIIDISAB); break; } if (happy_meal_tcvr_reset(hp, tregs)) return; /* Latch PHY registers as of now. */ hp->sw_bmsr = happy_meal_tcvr_read(hp, tregs, MII_BMSR); hp->sw_advertise = happy_meal_tcvr_read(hp, tregs, MII_ADVERTISE); /* Advertise everything we can support. */ if (hp->sw_bmsr & BMSR_10HALF) hp->sw_advertise |= (ADVERTISE_10HALF); else hp->sw_advertise &= ~(ADVERTISE_10HALF); if (hp->sw_bmsr & BMSR_10FULL) hp->sw_advertise |= (ADVERTISE_10FULL); else hp->sw_advertise &= ~(ADVERTISE_10FULL); if (hp->sw_bmsr & BMSR_100HALF) hp->sw_advertise |= (ADVERTISE_100HALF); else hp->sw_advertise &= ~(ADVERTISE_100HALF); if (hp->sw_bmsr & BMSR_100FULL) hp->sw_advertise |= (ADVERTISE_100FULL); else hp->sw_advertise &= ~(ADVERTISE_100FULL); /* Update the PHY advertisement register. */ happy_meal_tcvr_write(hp, tregs, MII_ADVERTISE, hp->sw_advertise); } /* Once status is latched (by happy_meal_interrupt) it is cleared by * the hardware, so we cannot re-read it and get a correct value. * * hp->happy_lock must be held */ static int happy_meal_is_not_so_happy(struct happy_meal *hp, u32 status) { int reset = 0; /* Only print messages for non-counter related interrupts. */ if (status & (GREG_STAT_STSTERR | GREG_STAT_TFIFO_UND | GREG_STAT_MAXPKTERR | GREG_STAT_RXERR | GREG_STAT_RXPERR | GREG_STAT_RXTERR | GREG_STAT_EOPERR | GREG_STAT_MIFIRQ | GREG_STAT_TXEACK | GREG_STAT_TXLERR | GREG_STAT_TXPERR | GREG_STAT_TXTERR | GREG_STAT_SLVERR | GREG_STAT_SLVPERR)) netdev_err(hp->dev, "Error interrupt for happy meal, status = %08x\n", status); if (status & GREG_STAT_RFIFOVF) { /* Receive FIFO overflow is harmless and the hardware will take care of it, just some packets are lost. Who cares. */ netdev_dbg(hp->dev, "Happy Meal receive FIFO overflow.\n"); } if (status & GREG_STAT_STSTERR) { /* BigMAC SQE link test failed. */ netdev_err(hp->dev, "Happy Meal BigMAC SQE test failed.\n"); reset = 1; } if (status & GREG_STAT_TFIFO_UND) { /* Transmit FIFO underrun, again DMA error likely. */ netdev_err(hp->dev, "Happy Meal transmitter FIFO underrun, DMA error.\n"); reset = 1; } if (status & GREG_STAT_MAXPKTERR) { /* Driver error, tried to transmit something larger * than ethernet max mtu. */ netdev_err(hp->dev, "Happy Meal MAX Packet size error.\n"); reset = 1; } if (status & GREG_STAT_NORXD) { /* This is harmless, it just means the system is * quite loaded and the incoming packet rate was * faster than the interrupt handler could keep up * with. */ netdev_info(hp->dev, "Happy Meal out of receive descriptors, packet dropped.\n"); } if (status & (GREG_STAT_RXERR|GREG_STAT_RXPERR|GREG_STAT_RXTERR)) { /* All sorts of DMA receive errors. */ netdev_err(hp->dev, "Happy Meal rx DMA errors [ %s%s%s]\n", status & GREG_STAT_RXERR ? "GenericError " : "", status & GREG_STAT_RXPERR ? "ParityError " : "", status & GREG_STAT_RXTERR ? "RxTagBotch " : ""); reset = 1; } if (status & GREG_STAT_EOPERR) { /* Driver bug, didn't set EOP bit in tx descriptor given * to the happy meal. */ netdev_err(hp->dev, "EOP not set in happy meal transmit descriptor!\n"); reset = 1; } if (status & GREG_STAT_MIFIRQ) { /* MIF signalled an interrupt, were we polling it? */ netdev_err(hp->dev, "Happy Meal MIF interrupt.\n"); } if (status & (GREG_STAT_TXEACK|GREG_STAT_TXLERR|GREG_STAT_TXPERR|GREG_STAT_TXTERR)) { /* All sorts of transmit DMA errors. */ netdev_err(hp->dev, "Happy Meal tx DMA errors [ %s%s%s%s]\n", status & GREG_STAT_TXEACK ? "GenericError " : "", status & GREG_STAT_TXLERR ? "LateError " : "", status & GREG_STAT_TXPERR ? "ParityError " : "", status & GREG_STAT_TXTERR ? "TagBotch " : ""); reset = 1; } if (status & (GREG_STAT_SLVERR|GREG_STAT_SLVPERR)) { /* Bus or parity error when cpu accessed happy meal registers * or it's internal FIFO's. Should never see this. */ netdev_err(hp->dev, "Happy Meal register access SBUS slave (%s) error.\n", (status & GREG_STAT_SLVPERR) ? "parity" : "generic"); reset = 1; } if (reset) { netdev_notice(hp->dev, "Resetting...\n"); happy_meal_init(hp); return 1; } return 0; } /* hp->happy_lock must be held */ static void happy_meal_tx(struct happy_meal *hp) { struct happy_meal_txd *txbase = &hp->happy_block->happy_meal_txd[0]; struct happy_meal_txd *this; struct net_device *dev = hp->dev; int elem; elem = hp->tx_old; while (elem != hp->tx_new) { struct sk_buff *skb; u32 flags, dma_addr, dma_len; int frag; netdev_vdbg(hp->dev, "TX[%d]\n", elem); this = &txbase[elem]; flags = hme_read_desc32(hp, &this->tx_flags); if (flags & TXFLAG_OWN) break; skb = hp->tx_skbs[elem]; if (skb_shinfo(skb)->nr_frags) { int last; last = elem + skb_shinfo(skb)->nr_frags; last &= (TX_RING_SIZE - 1); flags = hme_read_desc32(hp, &txbase[last].tx_flags); if (flags & TXFLAG_OWN) break; } hp->tx_skbs[elem] = NULL; dev->stats.tx_bytes += skb->len; for (frag = 0; frag <= skb_shinfo(skb)->nr_frags; frag++) { dma_addr = hme_read_desc32(hp, &this->tx_addr); dma_len = hme_read_desc32(hp, &this->tx_flags); dma_len &= TXFLAG_SIZE; if (!frag) dma_unmap_single(hp->dma_dev, dma_addr, dma_len, DMA_TO_DEVICE); else dma_unmap_page(hp->dma_dev, dma_addr, dma_len, DMA_TO_DEVICE); elem = NEXT_TX(elem); this = &txbase[elem]; } dev_consume_skb_irq(skb); dev->stats.tx_packets++; } hp->tx_old = elem; if (netif_queue_stopped(dev) && TX_BUFFS_AVAIL(hp) > (MAX_SKB_FRAGS + 1)) netif_wake_queue(dev); } /* Originally I used to handle the allocation failure by just giving back just * that one ring buffer to the happy meal. Problem is that usually when that * condition is triggered, the happy meal expects you to do something reasonable * with all of the packets it has DMA'd in. So now I just drop the entire * ring when we cannot get a new skb and give them all back to the happy meal, * maybe things will be "happier" now. * * hp->happy_lock must be held */ static void happy_meal_rx(struct happy_meal *hp, struct net_device *dev) { struct happy_meal_rxd *rxbase = &hp->happy_block->happy_meal_rxd[0]; struct happy_meal_rxd *this; int elem = hp->rx_new, drops = 0; u32 flags; this = &rxbase[elem]; while (!((flags = hme_read_desc32(hp, &this->rx_flags)) & RXFLAG_OWN)) { struct sk_buff *skb; int len = flags >> 16; u16 csum = flags & RXFLAG_CSUM; u32 dma_addr = hme_read_desc32(hp, &this->rx_addr); /* Check for errors. */ if ((len < ETH_ZLEN) || (flags & RXFLAG_OVERFLOW)) { netdev_vdbg(dev, "RX[%d ERR(%08x)]", elem, flags); dev->stats.rx_errors++; if (len < ETH_ZLEN) dev->stats.rx_length_errors++; if (len & (RXFLAG_OVERFLOW >> 16)) { dev->stats.rx_over_errors++; dev->stats.rx_fifo_errors++; } /* Return it to the Happy meal. */ drop_it: dev->stats.rx_dropped++; hme_write_rxd(hp, this, (RXFLAG_OWN|((RX_BUF_ALLOC_SIZE-RX_OFFSET)<<16)), dma_addr); goto next; } skb = hp->rx_skbs[elem]; if (len > RX_COPY_THRESHOLD) { struct sk_buff *new_skb; u32 mapping; /* Now refill the entry, if we can. */ new_skb = happy_meal_alloc_skb(RX_BUF_ALLOC_SIZE, GFP_ATOMIC); if (new_skb == NULL) { drops++; goto drop_it; } skb_put(new_skb, (ETH_FRAME_LEN + RX_OFFSET + 4)); mapping = dma_map_single(hp->dma_dev, new_skb->data, RX_BUF_ALLOC_SIZE, DMA_FROM_DEVICE); if (unlikely(dma_mapping_error(hp->dma_dev, mapping))) { dev_kfree_skb_any(new_skb); drops++; goto drop_it; } dma_unmap_single(hp->dma_dev, dma_addr, RX_BUF_ALLOC_SIZE, DMA_FROM_DEVICE); hp->rx_skbs[elem] = new_skb; hme_write_rxd(hp, this, (RXFLAG_OWN|((RX_BUF_ALLOC_SIZE-RX_OFFSET)<<16)), mapping); skb_reserve(new_skb, RX_OFFSET); /* Trim the original skb for the netif. */ skb_trim(skb, len); } else { struct sk_buff *copy_skb = netdev_alloc_skb(dev, len + 2); if (copy_skb == NULL) { drops++; goto drop_it; } skb_reserve(copy_skb, 2); skb_put(copy_skb, len); dma_sync_single_for_cpu(hp->dma_dev, dma_addr, len + 2, DMA_FROM_DEVICE); skb_copy_from_linear_data(skb, copy_skb->data, len); dma_sync_single_for_device(hp->dma_dev, dma_addr, len + 2, DMA_FROM_DEVICE); /* Reuse original ring buffer. */ hme_write_rxd(hp, this, (RXFLAG_OWN|((RX_BUF_ALLOC_SIZE-RX_OFFSET)<<16)), dma_addr); skb = copy_skb; } /* This card is _fucking_ hot... */ skb->csum = csum_unfold(~(__force __sum16)htons(csum)); skb->ip_summed = CHECKSUM_COMPLETE; netdev_vdbg(dev, "RX[%d len=%d csum=%4x]", elem, len, csum); skb->protocol = eth_type_trans(skb, dev); netif_rx(skb); dev->stats.rx_packets++; dev->stats.rx_bytes += len; next: elem = NEXT_RX(elem); this = &rxbase[elem]; } hp->rx_new = elem; if (drops) netdev_info(hp->dev, "Memory squeeze, deferring packet.\n"); } static irqreturn_t happy_meal_interrupt(int irq, void *dev_id) { struct net_device *dev = dev_id; struct happy_meal *hp = netdev_priv(dev); u32 happy_status = hme_read32(hp, hp->gregs + GREG_STAT); HMD("status=%08x\n", happy_status); if (!happy_status) return IRQ_NONE; spin_lock(&hp->happy_lock); if (happy_status & GREG_STAT_ERRORS) { if (happy_meal_is_not_so_happy(hp, /* un- */ happy_status)) goto out; } if (happy_status & GREG_STAT_TXALL) happy_meal_tx(hp); if (happy_status & GREG_STAT_RXTOHOST) happy_meal_rx(hp, dev); HMD("done\n"); out: spin_unlock(&hp->happy_lock); return IRQ_HANDLED; } static int happy_meal_open(struct net_device *dev) { struct happy_meal *hp = netdev_priv(dev); int res; res = request_irq(hp->irq, happy_meal_interrupt, IRQF_SHARED, dev->name, dev); if (res) { netdev_err(dev, "Can't order irq %d to go.\n", hp->irq); return res; } HMD("to happy_meal_init\n"); spin_lock_irq(&hp->happy_lock); res = happy_meal_init(hp); spin_unlock_irq(&hp->happy_lock); if (res) free_irq(hp->irq, dev); return res; } static int happy_meal_close(struct net_device *dev) { struct happy_meal *hp = netdev_priv(dev); spin_lock_irq(&hp->happy_lock); happy_meal_stop(hp, hp->gregs); happy_meal_clean_rings(hp); /* If auto-negotiation timer is running, kill it. */ del_timer(&hp->happy_timer); spin_unlock_irq(&hp->happy_lock); free_irq(hp->irq, dev); return 0; } static void happy_meal_tx_timeout(struct net_device *dev, unsigned int txqueue) { struct happy_meal *hp = netdev_priv(dev); netdev_err(dev, "transmit timed out, resetting\n"); tx_dump_log(); netdev_err(dev, "Happy Status %08x TX[%08x:%08x]\n", hme_read32(hp, hp->gregs + GREG_STAT), hme_read32(hp, hp->etxregs + ETX_CFG), hme_read32(hp, hp->bigmacregs + BMAC_TXCFG)); spin_lock_irq(&hp->happy_lock); happy_meal_init(hp); spin_unlock_irq(&hp->happy_lock); netif_wake_queue(dev); } static void unmap_partial_tx_skb(struct happy_meal *hp, u32 first_mapping, u32 first_len, u32 first_entry, u32 entry) { struct happy_meal_txd *txbase = &hp->happy_block->happy_meal_txd[0]; dma_unmap_single(hp->dma_dev, first_mapping, first_len, DMA_TO_DEVICE); first_entry = NEXT_TX(first_entry); while (first_entry != entry) { struct happy_meal_txd *this = &txbase[first_entry]; u32 addr, len; addr = hme_read_desc32(hp, &this->tx_addr); len = hme_read_desc32(hp, &this->tx_flags); len &= TXFLAG_SIZE; dma_unmap_page(hp->dma_dev, addr, len, DMA_TO_DEVICE); } } static netdev_tx_t happy_meal_start_xmit(struct sk_buff *skb, struct net_device *dev) { struct happy_meal *hp = netdev_priv(dev); int entry; u32 tx_flags; tx_flags = TXFLAG_OWN; if (skb->ip_summed == CHECKSUM_PARTIAL) { const u32 csum_start_off = skb_checksum_start_offset(skb); const u32 csum_stuff_off = csum_start_off + skb->csum_offset; tx_flags = (TXFLAG_OWN | TXFLAG_CSENABLE | ((csum_start_off << 14) & TXFLAG_CSBUFBEGIN) | ((csum_stuff_off << 20) & TXFLAG_CSLOCATION)); } spin_lock_irq(&hp->happy_lock); if (TX_BUFFS_AVAIL(hp) <= (skb_shinfo(skb)->nr_frags + 1)) { netif_stop_queue(dev); spin_unlock_irq(&hp->happy_lock); netdev_err(dev, "BUG! Tx Ring full when queue awake!\n"); return NETDEV_TX_BUSY; } entry = hp->tx_new; netdev_vdbg(dev, "SX<l[%d]e[%d]>\n", skb->len, entry); hp->tx_skbs[entry] = skb; if (skb_shinfo(skb)->nr_frags == 0) { u32 mapping, len; len = skb->len; mapping = dma_map_single(hp->dma_dev, skb->data, len, DMA_TO_DEVICE); if (unlikely(dma_mapping_error(hp->dma_dev, mapping))) goto out_dma_error; tx_flags |= (TXFLAG_SOP | TXFLAG_EOP); hme_write_txd(hp, &hp->happy_block->happy_meal_txd[entry], (tx_flags | (len & TXFLAG_SIZE)), mapping); entry = NEXT_TX(entry); } else { u32 first_len, first_mapping; int frag, first_entry = entry; /* We must give this initial chunk to the device last. * Otherwise we could race with the device. */ first_len = skb_headlen(skb); first_mapping = dma_map_single(hp->dma_dev, skb->data, first_len, DMA_TO_DEVICE); if (unlikely(dma_mapping_error(hp->dma_dev, first_mapping))) goto out_dma_error; entry = NEXT_TX(entry); for (frag = 0; frag < skb_shinfo(skb)->nr_frags; frag++) { const skb_frag_t *this_frag = &skb_shinfo(skb)->frags[frag]; u32 len, mapping, this_txflags; len = skb_frag_size(this_frag); mapping = skb_frag_dma_map(hp->dma_dev, this_frag, 0, len, DMA_TO_DEVICE); if (unlikely(dma_mapping_error(hp->dma_dev, mapping))) { unmap_partial_tx_skb(hp, first_mapping, first_len, first_entry, entry); goto out_dma_error; } this_txflags = tx_flags; if (frag == skb_shinfo(skb)->nr_frags - 1) this_txflags |= TXFLAG_EOP; hme_write_txd(hp, &hp->happy_block->happy_meal_txd[entry], (this_txflags | (len & TXFLAG_SIZE)), mapping); entry = NEXT_TX(entry); } hme_write_txd(hp, &hp->happy_block->happy_meal_txd[first_entry], (tx_flags | TXFLAG_SOP | (first_len & TXFLAG_SIZE)), first_mapping); } hp->tx_new = entry; if (TX_BUFFS_AVAIL(hp) <= (MAX_SKB_FRAGS + 1)) netif_stop_queue(dev); /* Get it going. */ hme_write32(hp, hp->etxregs + ETX_PENDING, ETX_TP_DMAWAKEUP); spin_unlock_irq(&hp->happy_lock); tx_add_log(hp, TXLOG_ACTION_TXMIT, 0); return NETDEV_TX_OK; out_dma_error: hp->tx_skbs[hp->tx_new] = NULL; spin_unlock_irq(&hp->happy_lock); dev_kfree_skb_any(skb); dev->stats.tx_dropped++; return NETDEV_TX_OK; } static struct net_device_stats *happy_meal_get_stats(struct net_device *dev) { struct happy_meal *hp = netdev_priv(dev); spin_lock_irq(&hp->happy_lock); happy_meal_get_counters(hp, hp->bigmacregs); spin_unlock_irq(&hp->happy_lock); return &dev->stats; } static void happy_meal_set_multicast(struct net_device *dev) { struct happy_meal *hp = netdev_priv(dev); void __iomem *bregs = hp->bigmacregs; struct netdev_hw_addr *ha; u32 crc; spin_lock_irq(&hp->happy_lock); if ((dev->flags & IFF_ALLMULTI) || (netdev_mc_count(dev) > 64)) { hme_write32(hp, bregs + BMAC_HTABLE0, 0xffff); hme_write32(hp, bregs + BMAC_HTABLE1, 0xffff); hme_write32(hp, bregs + BMAC_HTABLE2, 0xffff); hme_write32(hp, bregs + BMAC_HTABLE3, 0xffff); } else if (dev->flags & IFF_PROMISC) { hme_write32(hp, bregs + BMAC_RXCFG, hme_read32(hp, bregs + BMAC_RXCFG) | BIGMAC_RXCFG_PMISC); } else { u16 hash_table[4]; memset(hash_table, 0, sizeof(hash_table)); netdev_for_each_mc_addr(ha, dev) { crc = ether_crc_le(6, ha->addr); crc >>= 26; hash_table[crc >> 4] |= 1 << (crc & 0xf); } hme_write32(hp, bregs + BMAC_HTABLE0, hash_table[0]); hme_write32(hp, bregs + BMAC_HTABLE1, hash_table[1]); hme_write32(hp, bregs + BMAC_HTABLE2, hash_table[2]); hme_write32(hp, bregs + BMAC_HTABLE3, hash_table[3]); } spin_unlock_irq(&hp->happy_lock); } /* Ethtool support... */ static int hme_get_link_ksettings(struct net_device *dev, struct ethtool_link_ksettings *cmd) { struct happy_meal *hp = netdev_priv(dev); u32 speed; u32 supported; supported = (SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full | SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full | SUPPORTED_Autoneg | SUPPORTED_TP | SUPPORTED_MII); /* XXX hardcoded stuff for now */ cmd->base.port = PORT_TP; /* XXX no MII support */ cmd->base.phy_address = 0; /* XXX fixed PHYAD */ /* Record PHY settings. */ spin_lock_irq(&hp->happy_lock); hp->sw_bmcr = happy_meal_tcvr_read(hp, hp->tcvregs, MII_BMCR); hp->sw_lpa = happy_meal_tcvr_read(hp, hp->tcvregs, MII_LPA); spin_unlock_irq(&hp->happy_lock); if (hp->sw_bmcr & BMCR_ANENABLE) { cmd->base.autoneg = AUTONEG_ENABLE; speed = ((hp->sw_lpa & (LPA_100HALF | LPA_100FULL)) ? SPEED_100 : SPEED_10); if (speed == SPEED_100) cmd->base.duplex = (hp->sw_lpa & (LPA_100FULL)) ? DUPLEX_FULL : DUPLEX_HALF; else cmd->base.duplex = (hp->sw_lpa & (LPA_10FULL)) ? DUPLEX_FULL : DUPLEX_HALF; } else { cmd->base.autoneg = AUTONEG_DISABLE; speed = (hp->sw_bmcr & BMCR_SPEED100) ? SPEED_100 : SPEED_10; cmd->base.duplex = (hp->sw_bmcr & BMCR_FULLDPLX) ? DUPLEX_FULL : DUPLEX_HALF; } cmd->base.speed = speed; ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported, supported); return 0; } static int hme_set_link_ksettings(struct net_device *dev, const struct ethtool_link_ksettings *cmd) { struct happy_meal *hp = netdev_priv(dev); /* Verify the settings we care about. */ if (cmd->base.autoneg != AUTONEG_ENABLE && cmd->base.autoneg != AUTONEG_DISABLE) return -EINVAL; if (cmd->base.autoneg == AUTONEG_DISABLE && ((cmd->base.speed != SPEED_100 && cmd->base.speed != SPEED_10) || (cmd->base.duplex != DUPLEX_HALF && cmd->base.duplex != DUPLEX_FULL))) return -EINVAL; /* Ok, do it to it. */ spin_lock_irq(&hp->happy_lock); del_timer(&hp->happy_timer); happy_meal_begin_auto_negotiation(hp, hp->tcvregs, cmd); spin_unlock_irq(&hp->happy_lock); return 0; } static void hme_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) { struct happy_meal *hp = netdev_priv(dev); strscpy(info->driver, DRV_NAME, sizeof(info->driver)); if (hp->happy_flags & HFLAG_PCI) { struct pci_dev *pdev = hp->happy_dev; strscpy(info->bus_info, pci_name(pdev), sizeof(info->bus_info)); } #ifdef CONFIG_SBUS else { const struct linux_prom_registers *regs; struct platform_device *op = hp->happy_dev; regs = of_get_property(op->dev.of_node, "regs", NULL); if (regs) snprintf(info->bus_info, sizeof(info->bus_info), "SBUS:%d", regs->which_io); } #endif } static u32 hme_get_link(struct net_device *dev) { struct happy_meal *hp = netdev_priv(dev); spin_lock_irq(&hp->happy_lock); hp->sw_bmcr = happy_meal_tcvr_read(hp, hp->tcvregs, MII_BMCR); spin_unlock_irq(&hp->happy_lock); return hp->sw_bmsr & BMSR_LSTATUS; } static const struct ethtool_ops hme_ethtool_ops = { .get_drvinfo = hme_get_drvinfo, .get_link = hme_get_link, .get_link_ksettings = hme_get_link_ksettings, .set_link_ksettings = hme_set_link_ksettings, }; #ifdef CONFIG_SBUS /* Given a happy meal sbus device, find it's quattro parent. * If none exist, allocate and return a new one. * * Return NULL on failure. */ static struct quattro *quattro_sbus_find(struct platform_device *child) { struct device *parent = child->dev.parent; struct platform_device *op; struct quattro *qp; op = to_platform_device(parent); qp = platform_get_drvdata(op); if (qp) return qp; qp = kzalloc(sizeof(*qp), GFP_KERNEL); if (!qp) return NULL; qp->quattro_dev = child; qp->next = qfe_sbus_list; qfe_sbus_list = qp; platform_set_drvdata(op, qp); return qp; } #endif /* CONFIG_SBUS */ #ifdef CONFIG_PCI static struct quattro *quattro_pci_find(struct pci_dev *pdev) { int i; struct pci_dev *bdev = pdev->bus->self; struct quattro *qp; if (!bdev) return ERR_PTR(-ENODEV); for (qp = qfe_pci_list; qp != NULL; qp = qp->next) { struct pci_dev *qpdev = qp->quattro_dev; if (qpdev == bdev) return qp; } qp = kmalloc(sizeof(struct quattro), GFP_KERNEL); if (!qp) return ERR_PTR(-ENOMEM); for (i = 0; i < 4; i++) qp->happy_meals[i] = NULL; qp->quattro_dev = bdev; qp->next = qfe_pci_list; qfe_pci_list = qp; /* No range tricks necessary on PCI. */ qp->nranges = 0; return qp; } #endif /* CONFIG_PCI */ static const struct net_device_ops hme_netdev_ops = { .ndo_open = happy_meal_open, .ndo_stop = happy_meal_close, .ndo_start_xmit = happy_meal_start_xmit, .ndo_tx_timeout = happy_meal_tx_timeout, .ndo_get_stats = happy_meal_get_stats, .ndo_set_rx_mode = happy_meal_set_multicast, .ndo_set_mac_address = eth_mac_addr, .ndo_validate_addr = eth_validate_addr, }; #ifdef CONFIG_PCI static int is_quattro_p(struct pci_dev *pdev) { struct pci_dev *busdev = pdev->bus->self; struct pci_dev *this_pdev; int n_hmes; if (!busdev || busdev->vendor != PCI_VENDOR_ID_DEC || busdev->device != PCI_DEVICE_ID_DEC_21153) return 0; n_hmes = 0; list_for_each_entry(this_pdev, &pdev->bus->devices, bus_list) { if (this_pdev->vendor == PCI_VENDOR_ID_SUN && this_pdev->device == PCI_DEVICE_ID_SUN_HAPPYMEAL) n_hmes++; } if (n_hmes != 4) return 0; return 1; } /* Fetch MAC address from vital product data of PCI ROM. */ static int find_eth_addr_in_vpd(void __iomem *rom_base, int len, int index, unsigned char *dev_addr) { int this_offset; for (this_offset = 0x20; this_offset < len; this_offset++) { void __iomem *p = rom_base + this_offset; if (readb(p + 0) != 0x90 || readb(p + 1) != 0x00 || readb(p + 2) != 0x09 || readb(p + 3) != 0x4e || readb(p + 4) != 0x41 || readb(p + 5) != 0x06) continue; this_offset += 6; p += 6; if (index == 0) { for (int i = 0; i < 6; i++) dev_addr[i] = readb(p + i); return 1; } index--; } return 0; } static void __maybe_unused get_hme_mac_nonsparc(struct pci_dev *pdev, unsigned char *dev_addr) { void __iomem *p; size_t size; p = pci_map_rom(pdev, &size); if (p) { int index = 0; int found; if (is_quattro_p(pdev)) index = PCI_SLOT(pdev->devfn); found = readb(p) == 0x55 && readb(p + 1) == 0xaa && find_eth_addr_in_vpd(p, (64 * 1024), index, dev_addr); pci_unmap_rom(pdev, p); if (found) return; } /* Sun MAC prefix then 3 random bytes. */ dev_addr[0] = 0x08; dev_addr[1] = 0x00; dev_addr[2] = 0x20; get_random_bytes(&dev_addr[3], 3); } #endif static void happy_meal_addr_init(struct happy_meal *hp, struct device_node *dp, int qfe_slot) { int i; for (i = 0; i < 6; i++) { if (macaddr[i] != 0) break; } if (i < 6) { /* a mac address was given */ u8 addr[ETH_ALEN]; for (i = 0; i < 6; i++) addr[i] = macaddr[i]; eth_hw_addr_set(hp->dev, addr); macaddr[5]++; } else { #ifdef CONFIG_SPARC const unsigned char *addr; int len; /* If user did not specify a MAC address specifically, use * the Quattro local-mac-address property... */ if (qfe_slot != -1) { addr = of_get_property(dp, "local-mac-address", &len); if (addr && len == 6) { eth_hw_addr_set(hp->dev, addr); return; } } eth_hw_addr_set(hp->dev, idprom->id_ethaddr); #else u8 addr[ETH_ALEN]; get_hme_mac_nonsparc(hp->happy_dev, addr); eth_hw_addr_set(hp->dev, addr); #endif } } static int happy_meal_common_probe(struct happy_meal *hp, struct device_node *dp) { struct net_device *dev = hp->dev; int err; #ifdef CONFIG_SPARC hp->hm_revision = of_getintprop_default(dp, "hm-rev", hp->hm_revision); #endif /* Now enable the feature flags we can. */ if (hp->hm_revision == 0x20 || hp->hm_revision == 0x21) hp->happy_flags |= HFLAG_20_21; else if (hp->hm_revision != 0xa0) hp->happy_flags |= HFLAG_NOT_A0; hp->happy_block = dmam_alloc_coherent(hp->dma_dev, PAGE_SIZE, &hp->hblock_dvma, GFP_KERNEL); if (!hp->happy_block) return -ENOMEM; /* Force check of the link first time we are brought up. */ hp->linkcheck = 0; /* Force timer state to 'asleep' with count of zero. */ hp->timer_state = asleep; hp->timer_ticks = 0; timer_setup(&hp->happy_timer, happy_meal_timer, 0); dev->netdev_ops = &hme_netdev_ops; dev->watchdog_timeo = 5 * HZ; dev->ethtool_ops = &hme_ethtool_ops; /* Happy Meal can do it all... */ dev->hw_features = NETIF_F_SG | NETIF_F_HW_CSUM; dev->features |= dev->hw_features | NETIF_F_RXCSUM; /* Grrr, Happy Meal comes up by default not advertising * full duplex 100baseT capabilities, fix this. */ spin_lock_irq(&hp->happy_lock); happy_meal_set_initial_advertisement(hp); spin_unlock_irq(&hp->happy_lock); err = devm_register_netdev(hp->dma_dev, dev); if (err) dev_err(hp->dma_dev, "Cannot register net device, aborting.\n"); return err; } #ifdef CONFIG_SBUS static int happy_meal_sbus_probe_one(struct platform_device *op, int is_qfe) { struct device_node *dp = op->dev.of_node, *sbus_dp; struct quattro *qp = NULL; struct happy_meal *hp; struct net_device *dev; int qfe_slot = -1; int err; sbus_dp = op->dev.parent->of_node; /* We can match PCI devices too, do not accept those here. */ if (!of_node_name_eq(sbus_dp, "sbus") && !of_node_name_eq(sbus_dp, "sbi")) return -ENODEV; if (is_qfe) { qp = quattro_sbus_find(op); if (qp == NULL) return -ENODEV; for (qfe_slot = 0; qfe_slot < 4; qfe_slot++) if (qp->happy_meals[qfe_slot] == NULL) break; if (qfe_slot == 4) return -ENODEV; } dev = devm_alloc_etherdev(&op->dev, sizeof(struct happy_meal)); if (!dev) return -ENOMEM; SET_NETDEV_DEV(dev, &op->dev); hp = netdev_priv(dev); hp->dev = dev; hp->happy_dev = op; hp->dma_dev = &op->dev; happy_meal_addr_init(hp, dp, qfe_slot); spin_lock_init(&hp->happy_lock); if (qp != NULL) { hp->qfe_parent = qp; hp->qfe_ent = qfe_slot; qp->happy_meals[qfe_slot] = dev; } hp->gregs = devm_platform_ioremap_resource(op, 0); if (IS_ERR(hp->gregs)) { dev_err(&op->dev, "Cannot map global registers.\n"); err = PTR_ERR(hp->gregs); goto err_out_clear_quattro; } hp->etxregs = devm_platform_ioremap_resource(op, 1); if (IS_ERR(hp->etxregs)) { dev_err(&op->dev, "Cannot map MAC TX registers.\n"); err = PTR_ERR(hp->etxregs); goto err_out_clear_quattro; } hp->erxregs = devm_platform_ioremap_resource(op, 2); if (IS_ERR(hp->erxregs)) { dev_err(&op->dev, "Cannot map MAC RX registers.\n"); err = PTR_ERR(hp->erxregs); goto err_out_clear_quattro; } hp->bigmacregs = devm_platform_ioremap_resource(op, 3); if (IS_ERR(hp->bigmacregs)) { dev_err(&op->dev, "Cannot map BIGMAC registers.\n"); err = PTR_ERR(hp->bigmacregs); goto err_out_clear_quattro; } hp->tcvregs = devm_platform_ioremap_resource(op, 4); if (IS_ERR(hp->tcvregs)) { dev_err(&op->dev, "Cannot map TCVR registers.\n"); err = PTR_ERR(hp->tcvregs); goto err_out_clear_quattro; } hp->hm_revision = 0xa0; if (qp != NULL) hp->happy_flags |= HFLAG_QUATTRO; hp->irq = op->archdata.irqs[0]; /* Get the supported DVMA burst sizes from our Happy SBUS. */ hp->happy_bursts = of_getintprop_default(sbus_dp, "burst-sizes", 0x00); #ifdef CONFIG_PCI /* Hook up SBUS register/descriptor accessors. */ hp->read_desc32 = sbus_hme_read_desc32; hp->write_txd = sbus_hme_write_txd; hp->write_rxd = sbus_hme_write_rxd; hp->read32 = sbus_hme_read32; hp->write32 = sbus_hme_write32; #endif err = happy_meal_common_probe(hp, dp); if (err) goto err_out_clear_quattro; platform_set_drvdata(op, hp); if (qfe_slot != -1) netdev_info(dev, "Quattro HME slot %d (SBUS) 10/100baseT Ethernet %pM\n", qfe_slot, dev->dev_addr); else netdev_info(dev, "HAPPY MEAL (SBUS) 10/100baseT Ethernet %pM\n", dev->dev_addr); return 0; err_out_clear_quattro: if (qp) qp->happy_meals[qfe_slot] = NULL; return err; } #endif #ifdef CONFIG_PCI static int happy_meal_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) { struct device_node *dp = NULL; struct quattro *qp = NULL; struct happy_meal *hp; struct net_device *dev; void __iomem *hpreg_base; struct resource *hpreg_res; char prom_name[64]; int qfe_slot = -1; int err = -ENODEV; /* Now make sure pci_dev cookie is there. */ #ifdef CONFIG_SPARC dp = pci_device_to_OF_node(pdev); snprintf(prom_name, sizeof(prom_name), "%pOFn", dp); #else if (is_quattro_p(pdev)) strcpy(prom_name, "SUNW,qfe"); else strcpy(prom_name, "SUNW,hme"); #endif err = pcim_enable_device(pdev); if (err) return err; pci_set_master(pdev); if (!strcmp(prom_name, "SUNW,qfe") || !strcmp(prom_name, "qfe")) { qp = quattro_pci_find(pdev); if (IS_ERR(qp)) return PTR_ERR(qp); for (qfe_slot = 0; qfe_slot < 4; qfe_slot++) if (!qp->happy_meals[qfe_slot]) break; if (qfe_slot == 4) return -ENODEV; } dev = devm_alloc_etherdev(&pdev->dev, sizeof(struct happy_meal)); if (!dev) return -ENOMEM; SET_NETDEV_DEV(dev, &pdev->dev); hp = netdev_priv(dev); hp->dev = dev; hp->happy_dev = pdev; hp->dma_dev = &pdev->dev; spin_lock_init(&hp->happy_lock); if (qp != NULL) { hp->qfe_parent = qp; hp->qfe_ent = qfe_slot; qp->happy_meals[qfe_slot] = dev; } err = -EINVAL; if ((pci_resource_flags(pdev, 0) & IORESOURCE_IO) != 0) { dev_err(&pdev->dev, "Cannot find proper PCI device base address.\n"); goto err_out_clear_quattro; } hpreg_res = devm_request_mem_region(&pdev->dev, pci_resource_start(pdev, 0), pci_resource_len(pdev, 0), DRV_NAME); if (!hpreg_res) { err = -EBUSY; dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting.\n"); goto err_out_clear_quattro; } hpreg_base = pcim_iomap(pdev, 0, 0x8000); if (!hpreg_base) { err = -ENOMEM; dev_err(&pdev->dev, "Unable to remap card memory.\n"); goto err_out_clear_quattro; } happy_meal_addr_init(hp, dp, qfe_slot); /* Layout registers. */ hp->gregs = (hpreg_base + 0x0000UL); hp->etxregs = (hpreg_base + 0x2000UL); hp->erxregs = (hpreg_base + 0x4000UL); hp->bigmacregs = (hpreg_base + 0x6000UL); hp->tcvregs = (hpreg_base + 0x7000UL); if (IS_ENABLED(CONFIG_SPARC)) hp->hm_revision = 0xc0 | (pdev->revision & 0x0f); else hp->hm_revision = 0x20; if (qp != NULL) hp->happy_flags |= HFLAG_QUATTRO; /* And of course, indicate this is PCI. */ hp->happy_flags |= HFLAG_PCI; #ifdef CONFIG_SPARC /* Assume PCI happy meals can handle all burst sizes. */ hp->happy_bursts = DMA_BURSTBITS; #endif hp->irq = pdev->irq; #ifdef CONFIG_SBUS /* Hook up PCI register/descriptor accessors. */ hp->read_desc32 = pci_hme_read_desc32; hp->write_txd = pci_hme_write_txd; hp->write_rxd = pci_hme_write_rxd; hp->read32 = pci_hme_read32; hp->write32 = pci_hme_write32; #endif err = happy_meal_common_probe(hp, dp); if (err) goto err_out_clear_quattro; pci_set_drvdata(pdev, hp); if (!qfe_slot) { struct pci_dev *qpdev = qp->quattro_dev; prom_name[0] = 0; if (!strncmp(dev->name, "eth", 3)) { int i = simple_strtoul(dev->name + 3, NULL, 10); sprintf(prom_name, "-%d", i + 3); } netdev_info(dev, "%s: Quattro HME (PCI/CheerIO) 10/100baseT Ethernet bridge %04x.%04x\n", prom_name, qpdev->vendor, qpdev->device); } if (qfe_slot != -1) netdev_info(dev, "Quattro HME slot %d (PCI/CheerIO) 10/100baseT Ethernet %pM\n", qfe_slot, dev->dev_addr); else netdev_info(dev, "HAPPY MEAL (PCI/CheerIO) 10/100BaseT Ethernet %pM\n", dev->dev_addr); return 0; err_out_clear_quattro: if (qp != NULL) qp->happy_meals[qfe_slot] = NULL; return err; } static const struct pci_device_id happymeal_pci_ids[] = { { PCI_DEVICE(PCI_VENDOR_ID_SUN, PCI_DEVICE_ID_SUN_HAPPYMEAL) }, { } /* Terminating entry */ }; MODULE_DEVICE_TABLE(pci, happymeal_pci_ids); static struct pci_driver hme_pci_driver = { .name = "hme", .id_table = happymeal_pci_ids, .probe = happy_meal_pci_probe, }; static int __init happy_meal_pci_init(void) { return pci_register_driver(&hme_pci_driver); } static void happy_meal_pci_exit(void) { pci_unregister_driver(&hme_pci_driver); while (qfe_pci_list) { struct quattro *qfe = qfe_pci_list; struct quattro *next = qfe->next; kfree(qfe); qfe_pci_list = next; } } #endif #ifdef CONFIG_SBUS static const struct of_device_id hme_sbus_match[]; static int hme_sbus_probe(struct platform_device *op) { const struct of_device_id *match; struct device_node *dp = op->dev.of_node; const char *model = of_get_property(dp, "model", NULL); int is_qfe; match = of_match_device(hme_sbus_match, &op->dev); if (!match) return -EINVAL; is_qfe = (match->data != NULL); if (!is_qfe && model && !strcmp(model, "SUNW,sbus-qfe")) is_qfe = 1; return happy_meal_sbus_probe_one(op, is_qfe); } static const struct of_device_id hme_sbus_match[] = { { .name = "SUNW,hme", }, { .name = "SUNW,qfe", .data = (void *) 1, }, { .name = "qfe", .data = (void *) 1, }, {}, }; MODULE_DEVICE_TABLE(of, hme_sbus_match); static struct platform_driver hme_sbus_driver = { .driver = { .name = "hme", .of_match_table = hme_sbus_match, }, .probe = hme_sbus_probe, }; static int __init happy_meal_sbus_init(void) { return platform_driver_register(&hme_sbus_driver); } static void happy_meal_sbus_exit(void) { platform_driver_unregister(&hme_sbus_driver); while (qfe_sbus_list) { struct quattro *qfe = qfe_sbus_list; struct quattro *next = qfe->next; kfree(qfe); qfe_sbus_list = next; } } #endif static int __init happy_meal_probe(void) { int err = 0; #ifdef CONFIG_SBUS err = happy_meal_sbus_init(); #endif #ifdef CONFIG_PCI if (!err) { err = happy_meal_pci_init(); #ifdef CONFIG_SBUS if (err) happy_meal_sbus_exit(); #endif } #endif return err; } static void __exit happy_meal_exit(void) { #ifdef CONFIG_SBUS happy_meal_sbus_exit(); #endif #ifdef CONFIG_PCI happy_meal_pci_exit(); #endif } module_init(happy_meal_probe); module_exit(happy_meal_exit);
linux-master
drivers/net/ethernet/sun/sunhme.c
// SPDX-License-Identifier: GPL-2.0 /* niu.c: Neptune ethernet driver. * * Copyright (C) 2007, 2008 David S. Miller ([email protected]) */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/module.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/pci.h> #include <linux/dma-mapping.h> #include <linux/netdevice.h> #include <linux/ethtool.h> #include <linux/etherdevice.h> #include <linux/platform_device.h> #include <linux/delay.h> #include <linux/bitops.h> #include <linux/mii.h> #include <linux/if.h> #include <linux/if_ether.h> #include <linux/if_vlan.h> #include <linux/ip.h> #include <linux/in.h> #include <linux/ipv6.h> #include <linux/log2.h> #include <linux/jiffies.h> #include <linux/crc32.h> #include <linux/list.h> #include <linux/slab.h> #include <linux/io.h> #include <linux/of.h> #include "niu.h" /* This driver wants to store a link to a "next page" within the * page struct itself by overloading the content of the "mapping" * member. This is not expected by the page API, but does currently * work. However, the randstruct plugin gets very bothered by this * case because "mapping" (struct address_space) is randomized, so * casts to/from it trigger warnings. Hide this by way of a union, * to create a typed alias of "mapping", since that's how it is * actually being used here. */ union niu_page { struct page page; struct { unsigned long __flags; /* unused alias of "flags" */ struct list_head __lru; /* unused alias of "lru" */ struct page *next; /* alias of "mapping" */ }; }; #define niu_next_page(p) container_of(p, union niu_page, page)->next #define DRV_MODULE_NAME "niu" #define DRV_MODULE_VERSION "1.1" #define DRV_MODULE_RELDATE "Apr 22, 2010" static char version[] = DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n"; MODULE_AUTHOR("David S. Miller ([email protected])"); MODULE_DESCRIPTION("NIU ethernet driver"); MODULE_LICENSE("GPL"); MODULE_VERSION(DRV_MODULE_VERSION); #ifndef readq static u64 readq(void __iomem *reg) { return ((u64) readl(reg)) | (((u64) readl(reg + 4UL)) << 32); } static void writeq(u64 val, void __iomem *reg) { writel(val & 0xffffffff, reg); writel(val >> 32, reg + 0x4UL); } #endif static const struct pci_device_id niu_pci_tbl[] = { {PCI_DEVICE(PCI_VENDOR_ID_SUN, 0xabcd)}, {} }; MODULE_DEVICE_TABLE(pci, niu_pci_tbl); #define NIU_TX_TIMEOUT (5 * HZ) #define nr64(reg) readq(np->regs + (reg)) #define nw64(reg, val) writeq((val), np->regs + (reg)) #define nr64_mac(reg) readq(np->mac_regs + (reg)) #define nw64_mac(reg, val) writeq((val), np->mac_regs + (reg)) #define nr64_ipp(reg) readq(np->regs + np->ipp_off + (reg)) #define nw64_ipp(reg, val) writeq((val), np->regs + np->ipp_off + (reg)) #define nr64_pcs(reg) readq(np->regs + np->pcs_off + (reg)) #define nw64_pcs(reg, val) writeq((val), np->regs + np->pcs_off + (reg)) #define nr64_xpcs(reg) readq(np->regs + np->xpcs_off + (reg)) #define nw64_xpcs(reg, val) writeq((val), np->regs + np->xpcs_off + (reg)) #define NIU_MSG_DEFAULT (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK) static int niu_debug; static int debug = -1; module_param(debug, int, 0); MODULE_PARM_DESC(debug, "NIU debug level"); #define niu_lock_parent(np, flags) \ spin_lock_irqsave(&np->parent->lock, flags) #define niu_unlock_parent(np, flags) \ spin_unlock_irqrestore(&np->parent->lock, flags) static int serdes_init_10g_serdes(struct niu *np); static int __niu_wait_bits_clear_mac(struct niu *np, unsigned long reg, u64 bits, int limit, int delay) { while (--limit >= 0) { u64 val = nr64_mac(reg); if (!(val & bits)) break; udelay(delay); } if (limit < 0) return -ENODEV; return 0; } static int __niu_set_and_wait_clear_mac(struct niu *np, unsigned long reg, u64 bits, int limit, int delay, const char *reg_name) { int err; nw64_mac(reg, bits); err = __niu_wait_bits_clear_mac(np, reg, bits, limit, delay); if (err) netdev_err(np->dev, "bits (%llx) of register %s would not clear, val[%llx]\n", (unsigned long long)bits, reg_name, (unsigned long long)nr64_mac(reg)); return err; } #define niu_set_and_wait_clear_mac(NP, REG, BITS, LIMIT, DELAY, REG_NAME) \ ({ BUILD_BUG_ON(LIMIT <= 0 || DELAY < 0); \ __niu_set_and_wait_clear_mac(NP, REG, BITS, LIMIT, DELAY, REG_NAME); \ }) static int __niu_wait_bits_clear_ipp(struct niu *np, unsigned long reg, u64 bits, int limit, int delay) { while (--limit >= 0) { u64 val = nr64_ipp(reg); if (!(val & bits)) break; udelay(delay); } if (limit < 0) return -ENODEV; return 0; } static int __niu_set_and_wait_clear_ipp(struct niu *np, unsigned long reg, u64 bits, int limit, int delay, const char *reg_name) { int err; u64 val; val = nr64_ipp(reg); val |= bits; nw64_ipp(reg, val); err = __niu_wait_bits_clear_ipp(np, reg, bits, limit, delay); if (err) netdev_err(np->dev, "bits (%llx) of register %s would not clear, val[%llx]\n", (unsigned long long)bits, reg_name, (unsigned long long)nr64_ipp(reg)); return err; } #define niu_set_and_wait_clear_ipp(NP, REG, BITS, LIMIT, DELAY, REG_NAME) \ ({ BUILD_BUG_ON(LIMIT <= 0 || DELAY < 0); \ __niu_set_and_wait_clear_ipp(NP, REG, BITS, LIMIT, DELAY, REG_NAME); \ }) static int __niu_wait_bits_clear(struct niu *np, unsigned long reg, u64 bits, int limit, int delay) { while (--limit >= 0) { u64 val = nr64(reg); if (!(val & bits)) break; udelay(delay); } if (limit < 0) return -ENODEV; return 0; } #define niu_wait_bits_clear(NP, REG, BITS, LIMIT, DELAY) \ ({ BUILD_BUG_ON(LIMIT <= 0 || DELAY < 0); \ __niu_wait_bits_clear(NP, REG, BITS, LIMIT, DELAY); \ }) static int __niu_set_and_wait_clear(struct niu *np, unsigned long reg, u64 bits, int limit, int delay, const char *reg_name) { int err; nw64(reg, bits); err = __niu_wait_bits_clear(np, reg, bits, limit, delay); if (err) netdev_err(np->dev, "bits (%llx) of register %s would not clear, val[%llx]\n", (unsigned long long)bits, reg_name, (unsigned long long)nr64(reg)); return err; } #define niu_set_and_wait_clear(NP, REG, BITS, LIMIT, DELAY, REG_NAME) \ ({ BUILD_BUG_ON(LIMIT <= 0 || DELAY < 0); \ __niu_set_and_wait_clear(NP, REG, BITS, LIMIT, DELAY, REG_NAME); \ }) static void niu_ldg_rearm(struct niu *np, struct niu_ldg *lp, int on) { u64 val = (u64) lp->timer; if (on) val |= LDG_IMGMT_ARM; nw64(LDG_IMGMT(lp->ldg_num), val); } static int niu_ldn_irq_enable(struct niu *np, int ldn, int on) { unsigned long mask_reg, bits; u64 val; if (ldn < 0 || ldn > LDN_MAX) return -EINVAL; if (ldn < 64) { mask_reg = LD_IM0(ldn); bits = LD_IM0_MASK; } else { mask_reg = LD_IM1(ldn - 64); bits = LD_IM1_MASK; } val = nr64(mask_reg); if (on) val &= ~bits; else val |= bits; nw64(mask_reg, val); return 0; } static int niu_enable_ldn_in_ldg(struct niu *np, struct niu_ldg *lp, int on) { struct niu_parent *parent = np->parent; int i; for (i = 0; i <= LDN_MAX; i++) { int err; if (parent->ldg_map[i] != lp->ldg_num) continue; err = niu_ldn_irq_enable(np, i, on); if (err) return err; } return 0; } static int niu_enable_interrupts(struct niu *np, int on) { int i; for (i = 0; i < np->num_ldg; i++) { struct niu_ldg *lp = &np->ldg[i]; int err; err = niu_enable_ldn_in_ldg(np, lp, on); if (err) return err; } for (i = 0; i < np->num_ldg; i++) niu_ldg_rearm(np, &np->ldg[i], on); return 0; } static u32 phy_encode(u32 type, int port) { return type << (port * 2); } static u32 phy_decode(u32 val, int port) { return (val >> (port * 2)) & PORT_TYPE_MASK; } static int mdio_wait(struct niu *np) { int limit = 1000; u64 val; while (--limit > 0) { val = nr64(MIF_FRAME_OUTPUT); if ((val >> MIF_FRAME_OUTPUT_TA_SHIFT) & 0x1) return val & MIF_FRAME_OUTPUT_DATA; udelay(10); } return -ENODEV; } static int mdio_read(struct niu *np, int port, int dev, int reg) { int err; nw64(MIF_FRAME_OUTPUT, MDIO_ADDR_OP(port, dev, reg)); err = mdio_wait(np); if (err < 0) return err; nw64(MIF_FRAME_OUTPUT, MDIO_READ_OP(port, dev)); return mdio_wait(np); } static int mdio_write(struct niu *np, int port, int dev, int reg, int data) { int err; nw64(MIF_FRAME_OUTPUT, MDIO_ADDR_OP(port, dev, reg)); err = mdio_wait(np); if (err < 0) return err; nw64(MIF_FRAME_OUTPUT, MDIO_WRITE_OP(port, dev, data)); err = mdio_wait(np); if (err < 0) return err; return 0; } static int mii_read(struct niu *np, int port, int reg) { nw64(MIF_FRAME_OUTPUT, MII_READ_OP(port, reg)); return mdio_wait(np); } static int mii_write(struct niu *np, int port, int reg, int data) { int err; nw64(MIF_FRAME_OUTPUT, MII_WRITE_OP(port, reg, data)); err = mdio_wait(np); if (err < 0) return err; return 0; } static int esr2_set_tx_cfg(struct niu *np, unsigned long channel, u32 val) { int err; err = mdio_write(np, np->port, NIU_ESR2_DEV_ADDR, ESR2_TI_PLL_TX_CFG_L(channel), val & 0xffff); if (!err) err = mdio_write(np, np->port, NIU_ESR2_DEV_ADDR, ESR2_TI_PLL_TX_CFG_H(channel), val >> 16); return err; } static int esr2_set_rx_cfg(struct niu *np, unsigned long channel, u32 val) { int err; err = mdio_write(np, np->port, NIU_ESR2_DEV_ADDR, ESR2_TI_PLL_RX_CFG_L(channel), val & 0xffff); if (!err) err = mdio_write(np, np->port, NIU_ESR2_DEV_ADDR, ESR2_TI_PLL_RX_CFG_H(channel), val >> 16); return err; } /* Mode is always 10G fiber. */ static int serdes_init_niu_10g_fiber(struct niu *np) { struct niu_link_config *lp = &np->link_config; u32 tx_cfg, rx_cfg; unsigned long i; tx_cfg = (PLL_TX_CFG_ENTX | PLL_TX_CFG_SWING_1375MV); rx_cfg = (PLL_RX_CFG_ENRX | PLL_RX_CFG_TERM_0P8VDDT | PLL_RX_CFG_ALIGN_ENA | PLL_RX_CFG_LOS_LTHRESH | PLL_RX_CFG_EQ_LP_ADAPTIVE); if (lp->loopback_mode == LOOPBACK_PHY) { u16 test_cfg = PLL_TEST_CFG_LOOPBACK_CML_DIS; mdio_write(np, np->port, NIU_ESR2_DEV_ADDR, ESR2_TI_PLL_TEST_CFG_L, test_cfg); tx_cfg |= PLL_TX_CFG_ENTEST; rx_cfg |= PLL_RX_CFG_ENTEST; } /* Initialize all 4 lanes of the SERDES. */ for (i = 0; i < 4; i++) { int err = esr2_set_tx_cfg(np, i, tx_cfg); if (err) return err; } for (i = 0; i < 4; i++) { int err = esr2_set_rx_cfg(np, i, rx_cfg); if (err) return err; } return 0; } static int serdes_init_niu_1g_serdes(struct niu *np) { struct niu_link_config *lp = &np->link_config; u16 pll_cfg, pll_sts; int max_retry = 100; u64 sig, mask, val; u32 tx_cfg, rx_cfg; unsigned long i; int err; tx_cfg = (PLL_TX_CFG_ENTX | PLL_TX_CFG_SWING_1375MV | PLL_TX_CFG_RATE_HALF); rx_cfg = (PLL_RX_CFG_ENRX | PLL_RX_CFG_TERM_0P8VDDT | PLL_RX_CFG_ALIGN_ENA | PLL_RX_CFG_LOS_LTHRESH | PLL_RX_CFG_RATE_HALF); if (np->port == 0) rx_cfg |= PLL_RX_CFG_EQ_LP_ADAPTIVE; if (lp->loopback_mode == LOOPBACK_PHY) { u16 test_cfg = PLL_TEST_CFG_LOOPBACK_CML_DIS; mdio_write(np, np->port, NIU_ESR2_DEV_ADDR, ESR2_TI_PLL_TEST_CFG_L, test_cfg); tx_cfg |= PLL_TX_CFG_ENTEST; rx_cfg |= PLL_RX_CFG_ENTEST; } /* Initialize PLL for 1G */ pll_cfg = (PLL_CFG_ENPLL | PLL_CFG_MPY_8X); err = mdio_write(np, np->port, NIU_ESR2_DEV_ADDR, ESR2_TI_PLL_CFG_L, pll_cfg); if (err) { netdev_err(np->dev, "NIU Port %d %s() mdio write to ESR2_TI_PLL_CFG_L failed\n", np->port, __func__); return err; } pll_sts = PLL_CFG_ENPLL; err = mdio_write(np, np->port, NIU_ESR2_DEV_ADDR, ESR2_TI_PLL_STS_L, pll_sts); if (err) { netdev_err(np->dev, "NIU Port %d %s() mdio write to ESR2_TI_PLL_STS_L failed\n", np->port, __func__); return err; } udelay(200); /* Initialize all 4 lanes of the SERDES. */ for (i = 0; i < 4; i++) { err = esr2_set_tx_cfg(np, i, tx_cfg); if (err) return err; } for (i = 0; i < 4; i++) { err = esr2_set_rx_cfg(np, i, rx_cfg); if (err) return err; } switch (np->port) { case 0: val = (ESR_INT_SRDY0_P0 | ESR_INT_DET0_P0); mask = val; break; case 1: val = (ESR_INT_SRDY0_P1 | ESR_INT_DET0_P1); mask = val; break; default: return -EINVAL; } while (max_retry--) { sig = nr64(ESR_INT_SIGNALS); if ((sig & mask) == val) break; mdelay(500); } if ((sig & mask) != val) { netdev_err(np->dev, "Port %u signal bits [%08x] are not [%08x]\n", np->port, (int)(sig & mask), (int)val); return -ENODEV; } return 0; } static int serdes_init_niu_10g_serdes(struct niu *np) { struct niu_link_config *lp = &np->link_config; u32 tx_cfg, rx_cfg, pll_cfg, pll_sts; int max_retry = 100; u64 sig, mask, val; unsigned long i; int err; tx_cfg = (PLL_TX_CFG_ENTX | PLL_TX_CFG_SWING_1375MV); rx_cfg = (PLL_RX_CFG_ENRX | PLL_RX_CFG_TERM_0P8VDDT | PLL_RX_CFG_ALIGN_ENA | PLL_RX_CFG_LOS_LTHRESH | PLL_RX_CFG_EQ_LP_ADAPTIVE); if (lp->loopback_mode == LOOPBACK_PHY) { u16 test_cfg = PLL_TEST_CFG_LOOPBACK_CML_DIS; mdio_write(np, np->port, NIU_ESR2_DEV_ADDR, ESR2_TI_PLL_TEST_CFG_L, test_cfg); tx_cfg |= PLL_TX_CFG_ENTEST; rx_cfg |= PLL_RX_CFG_ENTEST; } /* Initialize PLL for 10G */ pll_cfg = (PLL_CFG_ENPLL | PLL_CFG_MPY_10X); err = mdio_write(np, np->port, NIU_ESR2_DEV_ADDR, ESR2_TI_PLL_CFG_L, pll_cfg & 0xffff); if (err) { netdev_err(np->dev, "NIU Port %d %s() mdio write to ESR2_TI_PLL_CFG_L failed\n", np->port, __func__); return err; } pll_sts = PLL_CFG_ENPLL; err = mdio_write(np, np->port, NIU_ESR2_DEV_ADDR, ESR2_TI_PLL_STS_L, pll_sts & 0xffff); if (err) { netdev_err(np->dev, "NIU Port %d %s() mdio write to ESR2_TI_PLL_STS_L failed\n", np->port, __func__); return err; } udelay(200); /* Initialize all 4 lanes of the SERDES. */ for (i = 0; i < 4; i++) { err = esr2_set_tx_cfg(np, i, tx_cfg); if (err) return err; } for (i = 0; i < 4; i++) { err = esr2_set_rx_cfg(np, i, rx_cfg); if (err) return err; } /* check if serdes is ready */ switch (np->port) { case 0: mask = ESR_INT_SIGNALS_P0_BITS; val = (ESR_INT_SRDY0_P0 | ESR_INT_DET0_P0 | ESR_INT_XSRDY_P0 | ESR_INT_XDP_P0_CH3 | ESR_INT_XDP_P0_CH2 | ESR_INT_XDP_P0_CH1 | ESR_INT_XDP_P0_CH0); break; case 1: mask = ESR_INT_SIGNALS_P1_BITS; val = (ESR_INT_SRDY0_P1 | ESR_INT_DET0_P1 | ESR_INT_XSRDY_P1 | ESR_INT_XDP_P1_CH3 | ESR_INT_XDP_P1_CH2 | ESR_INT_XDP_P1_CH1 | ESR_INT_XDP_P1_CH0); break; default: return -EINVAL; } while (max_retry--) { sig = nr64(ESR_INT_SIGNALS); if ((sig & mask) == val) break; mdelay(500); } if ((sig & mask) != val) { pr_info("NIU Port %u signal bits [%08x] are not [%08x] for 10G...trying 1G\n", np->port, (int)(sig & mask), (int)val); /* 10G failed, try initializing at 1G */ err = serdes_init_niu_1g_serdes(np); if (!err) { np->flags &= ~NIU_FLAGS_10G; np->mac_xcvr = MAC_XCVR_PCS; } else { netdev_err(np->dev, "Port %u 10G/1G SERDES Link Failed\n", np->port); return -ENODEV; } } return 0; } static int esr_read_rxtx_ctrl(struct niu *np, unsigned long chan, u32 *val) { int err; err = mdio_read(np, np->port, NIU_ESR_DEV_ADDR, ESR_RXTX_CTRL_L(chan)); if (err >= 0) { *val = (err & 0xffff); err = mdio_read(np, np->port, NIU_ESR_DEV_ADDR, ESR_RXTX_CTRL_H(chan)); if (err >= 0) *val |= ((err & 0xffff) << 16); err = 0; } return err; } static int esr_read_glue0(struct niu *np, unsigned long chan, u32 *val) { int err; err = mdio_read(np, np->port, NIU_ESR_DEV_ADDR, ESR_GLUE_CTRL0_L(chan)); if (err >= 0) { *val = (err & 0xffff); err = mdio_read(np, np->port, NIU_ESR_DEV_ADDR, ESR_GLUE_CTRL0_H(chan)); if (err >= 0) { *val |= ((err & 0xffff) << 16); err = 0; } } return err; } static int esr_read_reset(struct niu *np, u32 *val) { int err; err = mdio_read(np, np->port, NIU_ESR_DEV_ADDR, ESR_RXTX_RESET_CTRL_L); if (err >= 0) { *val = (err & 0xffff); err = mdio_read(np, np->port, NIU_ESR_DEV_ADDR, ESR_RXTX_RESET_CTRL_H); if (err >= 0) { *val |= ((err & 0xffff) << 16); err = 0; } } return err; } static int esr_write_rxtx_ctrl(struct niu *np, unsigned long chan, u32 val) { int err; err = mdio_write(np, np->port, NIU_ESR_DEV_ADDR, ESR_RXTX_CTRL_L(chan), val & 0xffff); if (!err) err = mdio_write(np, np->port, NIU_ESR_DEV_ADDR, ESR_RXTX_CTRL_H(chan), (val >> 16)); return err; } static int esr_write_glue0(struct niu *np, unsigned long chan, u32 val) { int err; err = mdio_write(np, np->port, NIU_ESR_DEV_ADDR, ESR_GLUE_CTRL0_L(chan), val & 0xffff); if (!err) err = mdio_write(np, np->port, NIU_ESR_DEV_ADDR, ESR_GLUE_CTRL0_H(chan), (val >> 16)); return err; } static int esr_reset(struct niu *np) { u32 reset; int err; err = mdio_write(np, np->port, NIU_ESR_DEV_ADDR, ESR_RXTX_RESET_CTRL_L, 0x0000); if (err) return err; err = mdio_write(np, np->port, NIU_ESR_DEV_ADDR, ESR_RXTX_RESET_CTRL_H, 0xffff); if (err) return err; udelay(200); err = mdio_write(np, np->port, NIU_ESR_DEV_ADDR, ESR_RXTX_RESET_CTRL_L, 0xffff); if (err) return err; udelay(200); err = mdio_write(np, np->port, NIU_ESR_DEV_ADDR, ESR_RXTX_RESET_CTRL_H, 0x0000); if (err) return err; udelay(200); err = esr_read_reset(np, &reset); if (err) return err; if (reset != 0) { netdev_err(np->dev, "Port %u ESR_RESET did not clear [%08x]\n", np->port, reset); return -ENODEV; } return 0; } static int serdes_init_10g(struct niu *np) { struct niu_link_config *lp = &np->link_config; unsigned long ctrl_reg, test_cfg_reg, i; u64 ctrl_val, test_cfg_val, sig, mask, val; int err; switch (np->port) { case 0: ctrl_reg = ENET_SERDES_0_CTRL_CFG; test_cfg_reg = ENET_SERDES_0_TEST_CFG; break; case 1: ctrl_reg = ENET_SERDES_1_CTRL_CFG; test_cfg_reg = ENET_SERDES_1_TEST_CFG; break; default: return -EINVAL; } ctrl_val = (ENET_SERDES_CTRL_SDET_0 | ENET_SERDES_CTRL_SDET_1 | ENET_SERDES_CTRL_SDET_2 | ENET_SERDES_CTRL_SDET_3 | (0x5 << ENET_SERDES_CTRL_EMPH_0_SHIFT) | (0x5 << ENET_SERDES_CTRL_EMPH_1_SHIFT) | (0x5 << ENET_SERDES_CTRL_EMPH_2_SHIFT) | (0x5 << ENET_SERDES_CTRL_EMPH_3_SHIFT) | (0x1 << ENET_SERDES_CTRL_LADJ_0_SHIFT) | (0x1 << ENET_SERDES_CTRL_LADJ_1_SHIFT) | (0x1 << ENET_SERDES_CTRL_LADJ_2_SHIFT) | (0x1 << ENET_SERDES_CTRL_LADJ_3_SHIFT)); test_cfg_val = 0; if (lp->loopback_mode == LOOPBACK_PHY) { test_cfg_val |= ((ENET_TEST_MD_PAD_LOOPBACK << ENET_SERDES_TEST_MD_0_SHIFT) | (ENET_TEST_MD_PAD_LOOPBACK << ENET_SERDES_TEST_MD_1_SHIFT) | (ENET_TEST_MD_PAD_LOOPBACK << ENET_SERDES_TEST_MD_2_SHIFT) | (ENET_TEST_MD_PAD_LOOPBACK << ENET_SERDES_TEST_MD_3_SHIFT)); } nw64(ctrl_reg, ctrl_val); nw64(test_cfg_reg, test_cfg_val); /* Initialize all 4 lanes of the SERDES. */ for (i = 0; i < 4; i++) { u32 rxtx_ctrl, glue0; err = esr_read_rxtx_ctrl(np, i, &rxtx_ctrl); if (err) return err; err = esr_read_glue0(np, i, &glue0); if (err) return err; rxtx_ctrl &= ~(ESR_RXTX_CTRL_VMUXLO); rxtx_ctrl |= (ESR_RXTX_CTRL_ENSTRETCH | (2 << ESR_RXTX_CTRL_VMUXLO_SHIFT)); glue0 &= ~(ESR_GLUE_CTRL0_SRATE | ESR_GLUE_CTRL0_THCNT | ESR_GLUE_CTRL0_BLTIME); glue0 |= (ESR_GLUE_CTRL0_RXLOSENAB | (0xf << ESR_GLUE_CTRL0_SRATE_SHIFT) | (0xff << ESR_GLUE_CTRL0_THCNT_SHIFT) | (BLTIME_300_CYCLES << ESR_GLUE_CTRL0_BLTIME_SHIFT)); err = esr_write_rxtx_ctrl(np, i, rxtx_ctrl); if (err) return err; err = esr_write_glue0(np, i, glue0); if (err) return err; } err = esr_reset(np); if (err) return err; sig = nr64(ESR_INT_SIGNALS); switch (np->port) { case 0: mask = ESR_INT_SIGNALS_P0_BITS; val = (ESR_INT_SRDY0_P0 | ESR_INT_DET0_P0 | ESR_INT_XSRDY_P0 | ESR_INT_XDP_P0_CH3 | ESR_INT_XDP_P0_CH2 | ESR_INT_XDP_P0_CH1 | ESR_INT_XDP_P0_CH0); break; case 1: mask = ESR_INT_SIGNALS_P1_BITS; val = (ESR_INT_SRDY0_P1 | ESR_INT_DET0_P1 | ESR_INT_XSRDY_P1 | ESR_INT_XDP_P1_CH3 | ESR_INT_XDP_P1_CH2 | ESR_INT_XDP_P1_CH1 | ESR_INT_XDP_P1_CH0); break; default: return -EINVAL; } if ((sig & mask) != val) { if (np->flags & NIU_FLAGS_HOTPLUG_PHY) { np->flags &= ~NIU_FLAGS_HOTPLUG_PHY_PRESENT; return 0; } netdev_err(np->dev, "Port %u signal bits [%08x] are not [%08x]\n", np->port, (int)(sig & mask), (int)val); return -ENODEV; } if (np->flags & NIU_FLAGS_HOTPLUG_PHY) np->flags |= NIU_FLAGS_HOTPLUG_PHY_PRESENT; return 0; } static int serdes_init_1g(struct niu *np) { u64 val; val = nr64(ENET_SERDES_1_PLL_CFG); val &= ~ENET_SERDES_PLL_FBDIV2; switch (np->port) { case 0: val |= ENET_SERDES_PLL_HRATE0; break; case 1: val |= ENET_SERDES_PLL_HRATE1; break; case 2: val |= ENET_SERDES_PLL_HRATE2; break; case 3: val |= ENET_SERDES_PLL_HRATE3; break; default: return -EINVAL; } nw64(ENET_SERDES_1_PLL_CFG, val); return 0; } static int serdes_init_1g_serdes(struct niu *np) { struct niu_link_config *lp = &np->link_config; unsigned long ctrl_reg, test_cfg_reg, pll_cfg, i; u64 ctrl_val, test_cfg_val, sig, mask, val; int err; u64 reset_val, val_rd; val = ENET_SERDES_PLL_HRATE0 | ENET_SERDES_PLL_HRATE1 | ENET_SERDES_PLL_HRATE2 | ENET_SERDES_PLL_HRATE3 | ENET_SERDES_PLL_FBDIV0; switch (np->port) { case 0: reset_val = ENET_SERDES_RESET_0; ctrl_reg = ENET_SERDES_0_CTRL_CFG; test_cfg_reg = ENET_SERDES_0_TEST_CFG; pll_cfg = ENET_SERDES_0_PLL_CFG; break; case 1: reset_val = ENET_SERDES_RESET_1; ctrl_reg = ENET_SERDES_1_CTRL_CFG; test_cfg_reg = ENET_SERDES_1_TEST_CFG; pll_cfg = ENET_SERDES_1_PLL_CFG; break; default: return -EINVAL; } ctrl_val = (ENET_SERDES_CTRL_SDET_0 | ENET_SERDES_CTRL_SDET_1 | ENET_SERDES_CTRL_SDET_2 | ENET_SERDES_CTRL_SDET_3 | (0x5 << ENET_SERDES_CTRL_EMPH_0_SHIFT) | (0x5 << ENET_SERDES_CTRL_EMPH_1_SHIFT) | (0x5 << ENET_SERDES_CTRL_EMPH_2_SHIFT) | (0x5 << ENET_SERDES_CTRL_EMPH_3_SHIFT) | (0x1 << ENET_SERDES_CTRL_LADJ_0_SHIFT) | (0x1 << ENET_SERDES_CTRL_LADJ_1_SHIFT) | (0x1 << ENET_SERDES_CTRL_LADJ_2_SHIFT) | (0x1 << ENET_SERDES_CTRL_LADJ_3_SHIFT)); test_cfg_val = 0; if (lp->loopback_mode == LOOPBACK_PHY) { test_cfg_val |= ((ENET_TEST_MD_PAD_LOOPBACK << ENET_SERDES_TEST_MD_0_SHIFT) | (ENET_TEST_MD_PAD_LOOPBACK << ENET_SERDES_TEST_MD_1_SHIFT) | (ENET_TEST_MD_PAD_LOOPBACK << ENET_SERDES_TEST_MD_2_SHIFT) | (ENET_TEST_MD_PAD_LOOPBACK << ENET_SERDES_TEST_MD_3_SHIFT)); } nw64(ENET_SERDES_RESET, reset_val); mdelay(20); val_rd = nr64(ENET_SERDES_RESET); val_rd &= ~reset_val; nw64(pll_cfg, val); nw64(ctrl_reg, ctrl_val); nw64(test_cfg_reg, test_cfg_val); nw64(ENET_SERDES_RESET, val_rd); mdelay(2000); /* Initialize all 4 lanes of the SERDES. */ for (i = 0; i < 4; i++) { u32 rxtx_ctrl, glue0; err = esr_read_rxtx_ctrl(np, i, &rxtx_ctrl); if (err) return err; err = esr_read_glue0(np, i, &glue0); if (err) return err; rxtx_ctrl &= ~(ESR_RXTX_CTRL_VMUXLO); rxtx_ctrl |= (ESR_RXTX_CTRL_ENSTRETCH | (2 << ESR_RXTX_CTRL_VMUXLO_SHIFT)); glue0 &= ~(ESR_GLUE_CTRL0_SRATE | ESR_GLUE_CTRL0_THCNT | ESR_GLUE_CTRL0_BLTIME); glue0 |= (ESR_GLUE_CTRL0_RXLOSENAB | (0xf << ESR_GLUE_CTRL0_SRATE_SHIFT) | (0xff << ESR_GLUE_CTRL0_THCNT_SHIFT) | (BLTIME_300_CYCLES << ESR_GLUE_CTRL0_BLTIME_SHIFT)); err = esr_write_rxtx_ctrl(np, i, rxtx_ctrl); if (err) return err; err = esr_write_glue0(np, i, glue0); if (err) return err; } sig = nr64(ESR_INT_SIGNALS); switch (np->port) { case 0: val = (ESR_INT_SRDY0_P0 | ESR_INT_DET0_P0); mask = val; break; case 1: val = (ESR_INT_SRDY0_P1 | ESR_INT_DET0_P1); mask = val; break; default: return -EINVAL; } if ((sig & mask) != val) { netdev_err(np->dev, "Port %u signal bits [%08x] are not [%08x]\n", np->port, (int)(sig & mask), (int)val); return -ENODEV; } return 0; } static int link_status_1g_serdes(struct niu *np, int *link_up_p) { struct niu_link_config *lp = &np->link_config; int link_up; u64 val; u16 current_speed; unsigned long flags; u8 current_duplex; link_up = 0; current_speed = SPEED_INVALID; current_duplex = DUPLEX_INVALID; spin_lock_irqsave(&np->lock, flags); val = nr64_pcs(PCS_MII_STAT); if (val & PCS_MII_STAT_LINK_STATUS) { link_up = 1; current_speed = SPEED_1000; current_duplex = DUPLEX_FULL; } lp->active_speed = current_speed; lp->active_duplex = current_duplex; spin_unlock_irqrestore(&np->lock, flags); *link_up_p = link_up; return 0; } static int link_status_10g_serdes(struct niu *np, int *link_up_p) { unsigned long flags; struct niu_link_config *lp = &np->link_config; int link_up = 0; int link_ok = 1; u64 val, val2; u16 current_speed; u8 current_duplex; if (!(np->flags & NIU_FLAGS_10G)) return link_status_1g_serdes(np, link_up_p); current_speed = SPEED_INVALID; current_duplex = DUPLEX_INVALID; spin_lock_irqsave(&np->lock, flags); val = nr64_xpcs(XPCS_STATUS(0)); val2 = nr64_mac(XMAC_INTER2); if (val2 & 0x01000000) link_ok = 0; if ((val & 0x1000ULL) && link_ok) { link_up = 1; current_speed = SPEED_10000; current_duplex = DUPLEX_FULL; } lp->active_speed = current_speed; lp->active_duplex = current_duplex; spin_unlock_irqrestore(&np->lock, flags); *link_up_p = link_up; return 0; } static int link_status_mii(struct niu *np, int *link_up_p) { struct niu_link_config *lp = &np->link_config; int err; int bmsr, advert, ctrl1000, stat1000, lpa, bmcr, estatus; int supported, advertising, active_speed, active_duplex; err = mii_read(np, np->phy_addr, MII_BMCR); if (unlikely(err < 0)) return err; bmcr = err; err = mii_read(np, np->phy_addr, MII_BMSR); if (unlikely(err < 0)) return err; bmsr = err; err = mii_read(np, np->phy_addr, MII_ADVERTISE); if (unlikely(err < 0)) return err; advert = err; err = mii_read(np, np->phy_addr, MII_LPA); if (unlikely(err < 0)) return err; lpa = err; if (likely(bmsr & BMSR_ESTATEN)) { err = mii_read(np, np->phy_addr, MII_ESTATUS); if (unlikely(err < 0)) return err; estatus = err; err = mii_read(np, np->phy_addr, MII_CTRL1000); if (unlikely(err < 0)) return err; ctrl1000 = err; err = mii_read(np, np->phy_addr, MII_STAT1000); if (unlikely(err < 0)) return err; stat1000 = err; } else estatus = ctrl1000 = stat1000 = 0; supported = 0; if (bmsr & BMSR_ANEGCAPABLE) supported |= SUPPORTED_Autoneg; if (bmsr & BMSR_10HALF) supported |= SUPPORTED_10baseT_Half; if (bmsr & BMSR_10FULL) supported |= SUPPORTED_10baseT_Full; if (bmsr & BMSR_100HALF) supported |= SUPPORTED_100baseT_Half; if (bmsr & BMSR_100FULL) supported |= SUPPORTED_100baseT_Full; if (estatus & ESTATUS_1000_THALF) supported |= SUPPORTED_1000baseT_Half; if (estatus & ESTATUS_1000_TFULL) supported |= SUPPORTED_1000baseT_Full; lp->supported = supported; advertising = mii_adv_to_ethtool_adv_t(advert); advertising |= mii_ctrl1000_to_ethtool_adv_t(ctrl1000); if (bmcr & BMCR_ANENABLE) { int neg, neg1000; lp->active_autoneg = 1; advertising |= ADVERTISED_Autoneg; neg = advert & lpa; neg1000 = (ctrl1000 << 2) & stat1000; if (neg1000 & (LPA_1000FULL | LPA_1000HALF)) active_speed = SPEED_1000; else if (neg & LPA_100) active_speed = SPEED_100; else if (neg & (LPA_10HALF | LPA_10FULL)) active_speed = SPEED_10; else active_speed = SPEED_INVALID; if ((neg1000 & LPA_1000FULL) || (neg & LPA_DUPLEX)) active_duplex = DUPLEX_FULL; else if (active_speed != SPEED_INVALID) active_duplex = DUPLEX_HALF; else active_duplex = DUPLEX_INVALID; } else { lp->active_autoneg = 0; if ((bmcr & BMCR_SPEED1000) && !(bmcr & BMCR_SPEED100)) active_speed = SPEED_1000; else if (bmcr & BMCR_SPEED100) active_speed = SPEED_100; else active_speed = SPEED_10; if (bmcr & BMCR_FULLDPLX) active_duplex = DUPLEX_FULL; else active_duplex = DUPLEX_HALF; } lp->active_advertising = advertising; lp->active_speed = active_speed; lp->active_duplex = active_duplex; *link_up_p = !!(bmsr & BMSR_LSTATUS); return 0; } static int link_status_1g_rgmii(struct niu *np, int *link_up_p) { struct niu_link_config *lp = &np->link_config; u16 current_speed, bmsr; unsigned long flags; u8 current_duplex; int err, link_up; link_up = 0; current_speed = SPEED_INVALID; current_duplex = DUPLEX_INVALID; spin_lock_irqsave(&np->lock, flags); err = mii_read(np, np->phy_addr, MII_BMSR); if (err < 0) goto out; bmsr = err; if (bmsr & BMSR_LSTATUS) { link_up = 1; current_speed = SPEED_1000; current_duplex = DUPLEX_FULL; } lp->active_speed = current_speed; lp->active_duplex = current_duplex; err = 0; out: spin_unlock_irqrestore(&np->lock, flags); *link_up_p = link_up; return err; } static int link_status_1g(struct niu *np, int *link_up_p) { struct niu_link_config *lp = &np->link_config; unsigned long flags; int err; spin_lock_irqsave(&np->lock, flags); err = link_status_mii(np, link_up_p); lp->supported |= SUPPORTED_TP; lp->active_advertising |= ADVERTISED_TP; spin_unlock_irqrestore(&np->lock, flags); return err; } static int bcm8704_reset(struct niu *np) { int err, limit; err = mdio_read(np, np->phy_addr, BCM8704_PHYXS_DEV_ADDR, MII_BMCR); if (err < 0 || err == 0xffff) return err; err |= BMCR_RESET; err = mdio_write(np, np->phy_addr, BCM8704_PHYXS_DEV_ADDR, MII_BMCR, err); if (err) return err; limit = 1000; while (--limit >= 0) { err = mdio_read(np, np->phy_addr, BCM8704_PHYXS_DEV_ADDR, MII_BMCR); if (err < 0) return err; if (!(err & BMCR_RESET)) break; } if (limit < 0) { netdev_err(np->dev, "Port %u PHY will not reset (bmcr=%04x)\n", np->port, (err & 0xffff)); return -ENODEV; } return 0; } /* When written, certain PHY registers need to be read back twice * in order for the bits to settle properly. */ static int bcm8704_user_dev3_readback(struct niu *np, int reg) { int err = mdio_read(np, np->phy_addr, BCM8704_USER_DEV3_ADDR, reg); if (err < 0) return err; err = mdio_read(np, np->phy_addr, BCM8704_USER_DEV3_ADDR, reg); if (err < 0) return err; return 0; } static int bcm8706_init_user_dev3(struct niu *np) { int err; err = mdio_read(np, np->phy_addr, BCM8704_USER_DEV3_ADDR, BCM8704_USER_OPT_DIGITAL_CTRL); if (err < 0) return err; err &= ~USER_ODIG_CTRL_GPIOS; err |= (0x3 << USER_ODIG_CTRL_GPIOS_SHIFT); err |= USER_ODIG_CTRL_RESV2; err = mdio_write(np, np->phy_addr, BCM8704_USER_DEV3_ADDR, BCM8704_USER_OPT_DIGITAL_CTRL, err); if (err) return err; mdelay(1000); return 0; } static int bcm8704_init_user_dev3(struct niu *np) { int err; err = mdio_write(np, np->phy_addr, BCM8704_USER_DEV3_ADDR, BCM8704_USER_CONTROL, (USER_CONTROL_OPTXRST_LVL | USER_CONTROL_OPBIASFLT_LVL | USER_CONTROL_OBTMPFLT_LVL | USER_CONTROL_OPPRFLT_LVL | USER_CONTROL_OPTXFLT_LVL | USER_CONTROL_OPRXLOS_LVL | USER_CONTROL_OPRXFLT_LVL | USER_CONTROL_OPTXON_LVL | (0x3f << USER_CONTROL_RES1_SHIFT))); if (err) return err; err = mdio_write(np, np->phy_addr, BCM8704_USER_DEV3_ADDR, BCM8704_USER_PMD_TX_CONTROL, (USER_PMD_TX_CTL_XFP_CLKEN | (1 << USER_PMD_TX_CTL_TX_DAC_TXD_SH) | (2 << USER_PMD_TX_CTL_TX_DAC_TXCK_SH) | USER_PMD_TX_CTL_TSCK_LPWREN)); if (err) return err; err = bcm8704_user_dev3_readback(np, BCM8704_USER_CONTROL); if (err) return err; err = bcm8704_user_dev3_readback(np, BCM8704_USER_PMD_TX_CONTROL); if (err) return err; err = mdio_read(np, np->phy_addr, BCM8704_USER_DEV3_ADDR, BCM8704_USER_OPT_DIGITAL_CTRL); if (err < 0) return err; err &= ~USER_ODIG_CTRL_GPIOS; err |= (0x3 << USER_ODIG_CTRL_GPIOS_SHIFT); err = mdio_write(np, np->phy_addr, BCM8704_USER_DEV3_ADDR, BCM8704_USER_OPT_DIGITAL_CTRL, err); if (err) return err; mdelay(1000); return 0; } static int mrvl88x2011_act_led(struct niu *np, int val) { int err; err = mdio_read(np, np->phy_addr, MRVL88X2011_USER_DEV2_ADDR, MRVL88X2011_LED_8_TO_11_CTL); if (err < 0) return err; err &= ~MRVL88X2011_LED(MRVL88X2011_LED_ACT,MRVL88X2011_LED_CTL_MASK); err |= MRVL88X2011_LED(MRVL88X2011_LED_ACT,val); return mdio_write(np, np->phy_addr, MRVL88X2011_USER_DEV2_ADDR, MRVL88X2011_LED_8_TO_11_CTL, err); } static int mrvl88x2011_led_blink_rate(struct niu *np, int rate) { int err; err = mdio_read(np, np->phy_addr, MRVL88X2011_USER_DEV2_ADDR, MRVL88X2011_LED_BLINK_CTL); if (err >= 0) { err &= ~MRVL88X2011_LED_BLKRATE_MASK; err |= (rate << 4); err = mdio_write(np, np->phy_addr, MRVL88X2011_USER_DEV2_ADDR, MRVL88X2011_LED_BLINK_CTL, err); } return err; } static int xcvr_init_10g_mrvl88x2011(struct niu *np) { int err; /* Set LED functions */ err = mrvl88x2011_led_blink_rate(np, MRVL88X2011_LED_BLKRATE_134MS); if (err) return err; /* led activity */ err = mrvl88x2011_act_led(np, MRVL88X2011_LED_CTL_OFF); if (err) return err; err = mdio_read(np, np->phy_addr, MRVL88X2011_USER_DEV3_ADDR, MRVL88X2011_GENERAL_CTL); if (err < 0) return err; err |= MRVL88X2011_ENA_XFPREFCLK; err = mdio_write(np, np->phy_addr, MRVL88X2011_USER_DEV3_ADDR, MRVL88X2011_GENERAL_CTL, err); if (err < 0) return err; err = mdio_read(np, np->phy_addr, MRVL88X2011_USER_DEV1_ADDR, MRVL88X2011_PMA_PMD_CTL_1); if (err < 0) return err; if (np->link_config.loopback_mode == LOOPBACK_MAC) err |= MRVL88X2011_LOOPBACK; else err &= ~MRVL88X2011_LOOPBACK; err = mdio_write(np, np->phy_addr, MRVL88X2011_USER_DEV1_ADDR, MRVL88X2011_PMA_PMD_CTL_1, err); if (err < 0) return err; /* Enable PMD */ return mdio_write(np, np->phy_addr, MRVL88X2011_USER_DEV1_ADDR, MRVL88X2011_10G_PMD_TX_DIS, MRVL88X2011_ENA_PMDTX); } static int xcvr_diag_bcm870x(struct niu *np) { u16 analog_stat0, tx_alarm_status; int err = 0; #if 1 err = mdio_read(np, np->phy_addr, BCM8704_PMA_PMD_DEV_ADDR, MII_STAT1000); if (err < 0) return err; pr_info("Port %u PMA_PMD(MII_STAT1000) [%04x]\n", np->port, err); err = mdio_read(np, np->phy_addr, BCM8704_USER_DEV3_ADDR, 0x20); if (err < 0) return err; pr_info("Port %u USER_DEV3(0x20) [%04x]\n", np->port, err); err = mdio_read(np, np->phy_addr, BCM8704_PHYXS_DEV_ADDR, MII_NWAYTEST); if (err < 0) return err; pr_info("Port %u PHYXS(MII_NWAYTEST) [%04x]\n", np->port, err); #endif /* XXX dig this out it might not be so useful XXX */ err = mdio_read(np, np->phy_addr, BCM8704_USER_DEV3_ADDR, BCM8704_USER_ANALOG_STATUS0); if (err < 0) return err; err = mdio_read(np, np->phy_addr, BCM8704_USER_DEV3_ADDR, BCM8704_USER_ANALOG_STATUS0); if (err < 0) return err; analog_stat0 = err; err = mdio_read(np, np->phy_addr, BCM8704_USER_DEV3_ADDR, BCM8704_USER_TX_ALARM_STATUS); if (err < 0) return err; err = mdio_read(np, np->phy_addr, BCM8704_USER_DEV3_ADDR, BCM8704_USER_TX_ALARM_STATUS); if (err < 0) return err; tx_alarm_status = err; if (analog_stat0 != 0x03fc) { if ((analog_stat0 == 0x43bc) && (tx_alarm_status != 0)) { pr_info("Port %u cable not connected or bad cable\n", np->port); } else if (analog_stat0 == 0x639c) { pr_info("Port %u optical module is bad or missing\n", np->port); } } return 0; } static int xcvr_10g_set_lb_bcm870x(struct niu *np) { struct niu_link_config *lp = &np->link_config; int err; err = mdio_read(np, np->phy_addr, BCM8704_PCS_DEV_ADDR, MII_BMCR); if (err < 0) return err; err &= ~BMCR_LOOPBACK; if (lp->loopback_mode == LOOPBACK_MAC) err |= BMCR_LOOPBACK; err = mdio_write(np, np->phy_addr, BCM8704_PCS_DEV_ADDR, MII_BMCR, err); if (err) return err; return 0; } static int xcvr_init_10g_bcm8706(struct niu *np) { int err = 0; u64 val; if ((np->flags & NIU_FLAGS_HOTPLUG_PHY) && (np->flags & NIU_FLAGS_HOTPLUG_PHY_PRESENT) == 0) return err; val = nr64_mac(XMAC_CONFIG); val &= ~XMAC_CONFIG_LED_POLARITY; val |= XMAC_CONFIG_FORCE_LED_ON; nw64_mac(XMAC_CONFIG, val); val = nr64(MIF_CONFIG); val |= MIF_CONFIG_INDIRECT_MODE; nw64(MIF_CONFIG, val); err = bcm8704_reset(np); if (err) return err; err = xcvr_10g_set_lb_bcm870x(np); if (err) return err; err = bcm8706_init_user_dev3(np); if (err) return err; err = xcvr_diag_bcm870x(np); if (err) return err; return 0; } static int xcvr_init_10g_bcm8704(struct niu *np) { int err; err = bcm8704_reset(np); if (err) return err; err = bcm8704_init_user_dev3(np); if (err) return err; err = xcvr_10g_set_lb_bcm870x(np); if (err) return err; err = xcvr_diag_bcm870x(np); if (err) return err; return 0; } static int xcvr_init_10g(struct niu *np) { int phy_id, err; u64 val; val = nr64_mac(XMAC_CONFIG); val &= ~XMAC_CONFIG_LED_POLARITY; val |= XMAC_CONFIG_FORCE_LED_ON; nw64_mac(XMAC_CONFIG, val); /* XXX shared resource, lock parent XXX */ val = nr64(MIF_CONFIG); val |= MIF_CONFIG_INDIRECT_MODE; nw64(MIF_CONFIG, val); phy_id = phy_decode(np->parent->port_phy, np->port); phy_id = np->parent->phy_probe_info.phy_id[phy_id][np->port]; /* handle different phy types */ switch (phy_id & NIU_PHY_ID_MASK) { case NIU_PHY_ID_MRVL88X2011: err = xcvr_init_10g_mrvl88x2011(np); break; default: /* bcom 8704 */ err = xcvr_init_10g_bcm8704(np); break; } return err; } static int mii_reset(struct niu *np) { int limit, err; err = mii_write(np, np->phy_addr, MII_BMCR, BMCR_RESET); if (err) return err; limit = 1000; while (--limit >= 0) { udelay(500); err = mii_read(np, np->phy_addr, MII_BMCR); if (err < 0) return err; if (!(err & BMCR_RESET)) break; } if (limit < 0) { netdev_err(np->dev, "Port %u MII would not reset, bmcr[%04x]\n", np->port, err); return -ENODEV; } return 0; } static int xcvr_init_1g_rgmii(struct niu *np) { int err; u64 val; u16 bmcr, bmsr, estat; val = nr64(MIF_CONFIG); val &= ~MIF_CONFIG_INDIRECT_MODE; nw64(MIF_CONFIG, val); err = mii_reset(np); if (err) return err; err = mii_read(np, np->phy_addr, MII_BMSR); if (err < 0) return err; bmsr = err; estat = 0; if (bmsr & BMSR_ESTATEN) { err = mii_read(np, np->phy_addr, MII_ESTATUS); if (err < 0) return err; estat = err; } bmcr = 0; err = mii_write(np, np->phy_addr, MII_BMCR, bmcr); if (err) return err; if (bmsr & BMSR_ESTATEN) { u16 ctrl1000 = 0; if (estat & ESTATUS_1000_TFULL) ctrl1000 |= ADVERTISE_1000FULL; err = mii_write(np, np->phy_addr, MII_CTRL1000, ctrl1000); if (err) return err; } bmcr = (BMCR_SPEED1000 | BMCR_FULLDPLX); err = mii_write(np, np->phy_addr, MII_BMCR, bmcr); if (err) return err; err = mii_read(np, np->phy_addr, MII_BMCR); if (err < 0) return err; bmcr = mii_read(np, np->phy_addr, MII_BMCR); err = mii_read(np, np->phy_addr, MII_BMSR); if (err < 0) return err; return 0; } static int mii_init_common(struct niu *np) { struct niu_link_config *lp = &np->link_config; u16 bmcr, bmsr, adv, estat; int err; err = mii_reset(np); if (err) return err; err = mii_read(np, np->phy_addr, MII_BMSR); if (err < 0) return err; bmsr = err; estat = 0; if (bmsr & BMSR_ESTATEN) { err = mii_read(np, np->phy_addr, MII_ESTATUS); if (err < 0) return err; estat = err; } bmcr = 0; err = mii_write(np, np->phy_addr, MII_BMCR, bmcr); if (err) return err; if (lp->loopback_mode == LOOPBACK_MAC) { bmcr |= BMCR_LOOPBACK; if (lp->active_speed == SPEED_1000) bmcr |= BMCR_SPEED1000; if (lp->active_duplex == DUPLEX_FULL) bmcr |= BMCR_FULLDPLX; } if (lp->loopback_mode == LOOPBACK_PHY) { u16 aux; aux = (BCM5464R_AUX_CTL_EXT_LB | BCM5464R_AUX_CTL_WRITE_1); err = mii_write(np, np->phy_addr, BCM5464R_AUX_CTL, aux); if (err) return err; } if (lp->autoneg) { u16 ctrl1000; adv = ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP; if ((bmsr & BMSR_10HALF) && (lp->advertising & ADVERTISED_10baseT_Half)) adv |= ADVERTISE_10HALF; if ((bmsr & BMSR_10FULL) && (lp->advertising & ADVERTISED_10baseT_Full)) adv |= ADVERTISE_10FULL; if ((bmsr & BMSR_100HALF) && (lp->advertising & ADVERTISED_100baseT_Half)) adv |= ADVERTISE_100HALF; if ((bmsr & BMSR_100FULL) && (lp->advertising & ADVERTISED_100baseT_Full)) adv |= ADVERTISE_100FULL; err = mii_write(np, np->phy_addr, MII_ADVERTISE, adv); if (err) return err; if (likely(bmsr & BMSR_ESTATEN)) { ctrl1000 = 0; if ((estat & ESTATUS_1000_THALF) && (lp->advertising & ADVERTISED_1000baseT_Half)) ctrl1000 |= ADVERTISE_1000HALF; if ((estat & ESTATUS_1000_TFULL) && (lp->advertising & ADVERTISED_1000baseT_Full)) ctrl1000 |= ADVERTISE_1000FULL; err = mii_write(np, np->phy_addr, MII_CTRL1000, ctrl1000); if (err) return err; } bmcr |= (BMCR_ANENABLE | BMCR_ANRESTART); } else { /* !lp->autoneg */ int fulldpx; if (lp->duplex == DUPLEX_FULL) { bmcr |= BMCR_FULLDPLX; fulldpx = 1; } else if (lp->duplex == DUPLEX_HALF) fulldpx = 0; else return -EINVAL; if (lp->speed == SPEED_1000) { /* if X-full requested while not supported, or X-half requested while not supported... */ if ((fulldpx && !(estat & ESTATUS_1000_TFULL)) || (!fulldpx && !(estat & ESTATUS_1000_THALF))) return -EINVAL; bmcr |= BMCR_SPEED1000; } else if (lp->speed == SPEED_100) { if ((fulldpx && !(bmsr & BMSR_100FULL)) || (!fulldpx && !(bmsr & BMSR_100HALF))) return -EINVAL; bmcr |= BMCR_SPEED100; } else if (lp->speed == SPEED_10) { if ((fulldpx && !(bmsr & BMSR_10FULL)) || (!fulldpx && !(bmsr & BMSR_10HALF))) return -EINVAL; } else return -EINVAL; } err = mii_write(np, np->phy_addr, MII_BMCR, bmcr); if (err) return err; #if 0 err = mii_read(np, np->phy_addr, MII_BMCR); if (err < 0) return err; bmcr = err; err = mii_read(np, np->phy_addr, MII_BMSR); if (err < 0) return err; bmsr = err; pr_info("Port %u after MII init bmcr[%04x] bmsr[%04x]\n", np->port, bmcr, bmsr); #endif return 0; } static int xcvr_init_1g(struct niu *np) { u64 val; /* XXX shared resource, lock parent XXX */ val = nr64(MIF_CONFIG); val &= ~MIF_CONFIG_INDIRECT_MODE; nw64(MIF_CONFIG, val); return mii_init_common(np); } static int niu_xcvr_init(struct niu *np) { const struct niu_phy_ops *ops = np->phy_ops; int err; err = 0; if (ops->xcvr_init) err = ops->xcvr_init(np); return err; } static int niu_serdes_init(struct niu *np) { const struct niu_phy_ops *ops = np->phy_ops; int err; err = 0; if (ops->serdes_init) err = ops->serdes_init(np); return err; } static void niu_init_xif(struct niu *); static void niu_handle_led(struct niu *, int status); static int niu_link_status_common(struct niu *np, int link_up) { struct niu_link_config *lp = &np->link_config; struct net_device *dev = np->dev; unsigned long flags; if (!netif_carrier_ok(dev) && link_up) { netif_info(np, link, dev, "Link is up at %s, %s duplex\n", lp->active_speed == SPEED_10000 ? "10Gb/sec" : lp->active_speed == SPEED_1000 ? "1Gb/sec" : lp->active_speed == SPEED_100 ? "100Mbit/sec" : "10Mbit/sec", lp->active_duplex == DUPLEX_FULL ? "full" : "half"); spin_lock_irqsave(&np->lock, flags); niu_init_xif(np); niu_handle_led(np, 1); spin_unlock_irqrestore(&np->lock, flags); netif_carrier_on(dev); } else if (netif_carrier_ok(dev) && !link_up) { netif_warn(np, link, dev, "Link is down\n"); spin_lock_irqsave(&np->lock, flags); niu_handle_led(np, 0); spin_unlock_irqrestore(&np->lock, flags); netif_carrier_off(dev); } return 0; } static int link_status_10g_mrvl(struct niu *np, int *link_up_p) { int err, link_up, pma_status, pcs_status; link_up = 0; err = mdio_read(np, np->phy_addr, MRVL88X2011_USER_DEV1_ADDR, MRVL88X2011_10G_PMD_STATUS_2); if (err < 0) goto out; /* Check PMA/PMD Register: 1.0001.2 == 1 */ err = mdio_read(np, np->phy_addr, MRVL88X2011_USER_DEV1_ADDR, MRVL88X2011_PMA_PMD_STATUS_1); if (err < 0) goto out; pma_status = ((err & MRVL88X2011_LNK_STATUS_OK) ? 1 : 0); /* Check PMC Register : 3.0001.2 == 1: read twice */ err = mdio_read(np, np->phy_addr, MRVL88X2011_USER_DEV3_ADDR, MRVL88X2011_PMA_PMD_STATUS_1); if (err < 0) goto out; err = mdio_read(np, np->phy_addr, MRVL88X2011_USER_DEV3_ADDR, MRVL88X2011_PMA_PMD_STATUS_1); if (err < 0) goto out; pcs_status = ((err & MRVL88X2011_LNK_STATUS_OK) ? 1 : 0); /* Check XGXS Register : 4.0018.[0-3,12] */ err = mdio_read(np, np->phy_addr, MRVL88X2011_USER_DEV4_ADDR, MRVL88X2011_10G_XGXS_LANE_STAT); if (err < 0) goto out; if (err == (PHYXS_XGXS_LANE_STAT_ALINGED | PHYXS_XGXS_LANE_STAT_LANE3 | PHYXS_XGXS_LANE_STAT_LANE2 | PHYXS_XGXS_LANE_STAT_LANE1 | PHYXS_XGXS_LANE_STAT_LANE0 | PHYXS_XGXS_LANE_STAT_MAGIC | 0x800)) link_up = (pma_status && pcs_status) ? 1 : 0; np->link_config.active_speed = SPEED_10000; np->link_config.active_duplex = DUPLEX_FULL; err = 0; out: mrvl88x2011_act_led(np, (link_up ? MRVL88X2011_LED_CTL_PCS_ACT : MRVL88X2011_LED_CTL_OFF)); *link_up_p = link_up; return err; } static int link_status_10g_bcm8706(struct niu *np, int *link_up_p) { int err, link_up; link_up = 0; err = mdio_read(np, np->phy_addr, BCM8704_PMA_PMD_DEV_ADDR, BCM8704_PMD_RCV_SIGDET); if (err < 0 || err == 0xffff) goto out; if (!(err & PMD_RCV_SIGDET_GLOBAL)) { err = 0; goto out; } err = mdio_read(np, np->phy_addr, BCM8704_PCS_DEV_ADDR, BCM8704_PCS_10G_R_STATUS); if (err < 0) goto out; if (!(err & PCS_10G_R_STATUS_BLK_LOCK)) { err = 0; goto out; } err = mdio_read(np, np->phy_addr, BCM8704_PHYXS_DEV_ADDR, BCM8704_PHYXS_XGXS_LANE_STAT); if (err < 0) goto out; if (err != (PHYXS_XGXS_LANE_STAT_ALINGED | PHYXS_XGXS_LANE_STAT_MAGIC | PHYXS_XGXS_LANE_STAT_PATTEST | PHYXS_XGXS_LANE_STAT_LANE3 | PHYXS_XGXS_LANE_STAT_LANE2 | PHYXS_XGXS_LANE_STAT_LANE1 | PHYXS_XGXS_LANE_STAT_LANE0)) { err = 0; np->link_config.active_speed = SPEED_INVALID; np->link_config.active_duplex = DUPLEX_INVALID; goto out; } link_up = 1; np->link_config.active_speed = SPEED_10000; np->link_config.active_duplex = DUPLEX_FULL; err = 0; out: *link_up_p = link_up; return err; } static int link_status_10g_bcom(struct niu *np, int *link_up_p) { int err, link_up; link_up = 0; err = mdio_read(np, np->phy_addr, BCM8704_PMA_PMD_DEV_ADDR, BCM8704_PMD_RCV_SIGDET); if (err < 0) goto out; if (!(err & PMD_RCV_SIGDET_GLOBAL)) { err = 0; goto out; } err = mdio_read(np, np->phy_addr, BCM8704_PCS_DEV_ADDR, BCM8704_PCS_10G_R_STATUS); if (err < 0) goto out; if (!(err & PCS_10G_R_STATUS_BLK_LOCK)) { err = 0; goto out; } err = mdio_read(np, np->phy_addr, BCM8704_PHYXS_DEV_ADDR, BCM8704_PHYXS_XGXS_LANE_STAT); if (err < 0) goto out; if (err != (PHYXS_XGXS_LANE_STAT_ALINGED | PHYXS_XGXS_LANE_STAT_MAGIC | PHYXS_XGXS_LANE_STAT_LANE3 | PHYXS_XGXS_LANE_STAT_LANE2 | PHYXS_XGXS_LANE_STAT_LANE1 | PHYXS_XGXS_LANE_STAT_LANE0)) { err = 0; goto out; } link_up = 1; np->link_config.active_speed = SPEED_10000; np->link_config.active_duplex = DUPLEX_FULL; err = 0; out: *link_up_p = link_up; return err; } static int link_status_10g(struct niu *np, int *link_up_p) { unsigned long flags; int err = -EINVAL; spin_lock_irqsave(&np->lock, flags); if (np->link_config.loopback_mode == LOOPBACK_DISABLED) { int phy_id; phy_id = phy_decode(np->parent->port_phy, np->port); phy_id = np->parent->phy_probe_info.phy_id[phy_id][np->port]; /* handle different phy types */ switch (phy_id & NIU_PHY_ID_MASK) { case NIU_PHY_ID_MRVL88X2011: err = link_status_10g_mrvl(np, link_up_p); break; default: /* bcom 8704 */ err = link_status_10g_bcom(np, link_up_p); break; } } spin_unlock_irqrestore(&np->lock, flags); return err; } static int niu_10g_phy_present(struct niu *np) { u64 sig, mask, val; sig = nr64(ESR_INT_SIGNALS); switch (np->port) { case 0: mask = ESR_INT_SIGNALS_P0_BITS; val = (ESR_INT_SRDY0_P0 | ESR_INT_DET0_P0 | ESR_INT_XSRDY_P0 | ESR_INT_XDP_P0_CH3 | ESR_INT_XDP_P0_CH2 | ESR_INT_XDP_P0_CH1 | ESR_INT_XDP_P0_CH0); break; case 1: mask = ESR_INT_SIGNALS_P1_BITS; val = (ESR_INT_SRDY0_P1 | ESR_INT_DET0_P1 | ESR_INT_XSRDY_P1 | ESR_INT_XDP_P1_CH3 | ESR_INT_XDP_P1_CH2 | ESR_INT_XDP_P1_CH1 | ESR_INT_XDP_P1_CH0); break; default: return 0; } if ((sig & mask) != val) return 0; return 1; } static int link_status_10g_hotplug(struct niu *np, int *link_up_p) { unsigned long flags; int err = 0; int phy_present; int phy_present_prev; spin_lock_irqsave(&np->lock, flags); if (np->link_config.loopback_mode == LOOPBACK_DISABLED) { phy_present_prev = (np->flags & NIU_FLAGS_HOTPLUG_PHY_PRESENT) ? 1 : 0; phy_present = niu_10g_phy_present(np); if (phy_present != phy_present_prev) { /* state change */ if (phy_present) { /* A NEM was just plugged in */ np->flags |= NIU_FLAGS_HOTPLUG_PHY_PRESENT; if (np->phy_ops->xcvr_init) err = np->phy_ops->xcvr_init(np); if (err) { err = mdio_read(np, np->phy_addr, BCM8704_PHYXS_DEV_ADDR, MII_BMCR); if (err == 0xffff) { /* No mdio, back-to-back XAUI */ goto out; } /* debounce */ np->flags &= ~NIU_FLAGS_HOTPLUG_PHY_PRESENT; } } else { np->flags &= ~NIU_FLAGS_HOTPLUG_PHY_PRESENT; *link_up_p = 0; netif_warn(np, link, np->dev, "Hotplug PHY Removed\n"); } } out: if (np->flags & NIU_FLAGS_HOTPLUG_PHY_PRESENT) { err = link_status_10g_bcm8706(np, link_up_p); if (err == 0xffff) { /* No mdio, back-to-back XAUI: it is C10NEM */ *link_up_p = 1; np->link_config.active_speed = SPEED_10000; np->link_config.active_duplex = DUPLEX_FULL; } } } spin_unlock_irqrestore(&np->lock, flags); return 0; } static int niu_link_status(struct niu *np, int *link_up_p) { const struct niu_phy_ops *ops = np->phy_ops; int err; err = 0; if (ops->link_status) err = ops->link_status(np, link_up_p); return err; } static void niu_timer(struct timer_list *t) { struct niu *np = from_timer(np, t, timer); unsigned long off; int err, link_up; err = niu_link_status(np, &link_up); if (!err) niu_link_status_common(np, link_up); if (netif_carrier_ok(np->dev)) off = 5 * HZ; else off = 1 * HZ; np->timer.expires = jiffies + off; add_timer(&np->timer); } static const struct niu_phy_ops phy_ops_10g_serdes = { .serdes_init = serdes_init_10g_serdes, .link_status = link_status_10g_serdes, }; static const struct niu_phy_ops phy_ops_10g_serdes_niu = { .serdes_init = serdes_init_niu_10g_serdes, .link_status = link_status_10g_serdes, }; static const struct niu_phy_ops phy_ops_1g_serdes_niu = { .serdes_init = serdes_init_niu_1g_serdes, .link_status = link_status_1g_serdes, }; static const struct niu_phy_ops phy_ops_1g_rgmii = { .xcvr_init = xcvr_init_1g_rgmii, .link_status = link_status_1g_rgmii, }; static const struct niu_phy_ops phy_ops_10g_fiber_niu = { .serdes_init = serdes_init_niu_10g_fiber, .xcvr_init = xcvr_init_10g, .link_status = link_status_10g, }; static const struct niu_phy_ops phy_ops_10g_fiber = { .serdes_init = serdes_init_10g, .xcvr_init = xcvr_init_10g, .link_status = link_status_10g, }; static const struct niu_phy_ops phy_ops_10g_fiber_hotplug = { .serdes_init = serdes_init_10g, .xcvr_init = xcvr_init_10g_bcm8706, .link_status = link_status_10g_hotplug, }; static const struct niu_phy_ops phy_ops_niu_10g_hotplug = { .serdes_init = serdes_init_niu_10g_fiber, .xcvr_init = xcvr_init_10g_bcm8706, .link_status = link_status_10g_hotplug, }; static const struct niu_phy_ops phy_ops_10g_copper = { .serdes_init = serdes_init_10g, .link_status = link_status_10g, /* XXX */ }; static const struct niu_phy_ops phy_ops_1g_fiber = { .serdes_init = serdes_init_1g, .xcvr_init = xcvr_init_1g, .link_status = link_status_1g, }; static const struct niu_phy_ops phy_ops_1g_copper = { .xcvr_init = xcvr_init_1g, .link_status = link_status_1g, }; struct niu_phy_template { const struct niu_phy_ops *ops; u32 phy_addr_base; }; static const struct niu_phy_template phy_template_niu_10g_fiber = { .ops = &phy_ops_10g_fiber_niu, .phy_addr_base = 16, }; static const struct niu_phy_template phy_template_niu_10g_serdes = { .ops = &phy_ops_10g_serdes_niu, .phy_addr_base = 0, }; static const struct niu_phy_template phy_template_niu_1g_serdes = { .ops = &phy_ops_1g_serdes_niu, .phy_addr_base = 0, }; static const struct niu_phy_template phy_template_10g_fiber = { .ops = &phy_ops_10g_fiber, .phy_addr_base = 8, }; static const struct niu_phy_template phy_template_10g_fiber_hotplug = { .ops = &phy_ops_10g_fiber_hotplug, .phy_addr_base = 8, }; static const struct niu_phy_template phy_template_niu_10g_hotplug = { .ops = &phy_ops_niu_10g_hotplug, .phy_addr_base = 8, }; static const struct niu_phy_template phy_template_10g_copper = { .ops = &phy_ops_10g_copper, .phy_addr_base = 10, }; static const struct niu_phy_template phy_template_1g_fiber = { .ops = &phy_ops_1g_fiber, .phy_addr_base = 0, }; static const struct niu_phy_template phy_template_1g_copper = { .ops = &phy_ops_1g_copper, .phy_addr_base = 0, }; static const struct niu_phy_template phy_template_1g_rgmii = { .ops = &phy_ops_1g_rgmii, .phy_addr_base = 0, }; static const struct niu_phy_template phy_template_10g_serdes = { .ops = &phy_ops_10g_serdes, .phy_addr_base = 0, }; static int niu_atca_port_num[4] = { 0, 0, 11, 10 }; static int serdes_init_10g_serdes(struct niu *np) { struct niu_link_config *lp = &np->link_config; unsigned long ctrl_reg, test_cfg_reg, pll_cfg, i; u64 ctrl_val, test_cfg_val, sig, mask, val; switch (np->port) { case 0: ctrl_reg = ENET_SERDES_0_CTRL_CFG; test_cfg_reg = ENET_SERDES_0_TEST_CFG; pll_cfg = ENET_SERDES_0_PLL_CFG; break; case 1: ctrl_reg = ENET_SERDES_1_CTRL_CFG; test_cfg_reg = ENET_SERDES_1_TEST_CFG; pll_cfg = ENET_SERDES_1_PLL_CFG; break; default: return -EINVAL; } ctrl_val = (ENET_SERDES_CTRL_SDET_0 | ENET_SERDES_CTRL_SDET_1 | ENET_SERDES_CTRL_SDET_2 | ENET_SERDES_CTRL_SDET_3 | (0x5 << ENET_SERDES_CTRL_EMPH_0_SHIFT) | (0x5 << ENET_SERDES_CTRL_EMPH_1_SHIFT) | (0x5 << ENET_SERDES_CTRL_EMPH_2_SHIFT) | (0x5 << ENET_SERDES_CTRL_EMPH_3_SHIFT) | (0x1 << ENET_SERDES_CTRL_LADJ_0_SHIFT) | (0x1 << ENET_SERDES_CTRL_LADJ_1_SHIFT) | (0x1 << ENET_SERDES_CTRL_LADJ_2_SHIFT) | (0x1 << ENET_SERDES_CTRL_LADJ_3_SHIFT)); test_cfg_val = 0; if (lp->loopback_mode == LOOPBACK_PHY) { test_cfg_val |= ((ENET_TEST_MD_PAD_LOOPBACK << ENET_SERDES_TEST_MD_0_SHIFT) | (ENET_TEST_MD_PAD_LOOPBACK << ENET_SERDES_TEST_MD_1_SHIFT) | (ENET_TEST_MD_PAD_LOOPBACK << ENET_SERDES_TEST_MD_2_SHIFT) | (ENET_TEST_MD_PAD_LOOPBACK << ENET_SERDES_TEST_MD_3_SHIFT)); } esr_reset(np); nw64(pll_cfg, ENET_SERDES_PLL_FBDIV2); nw64(ctrl_reg, ctrl_val); nw64(test_cfg_reg, test_cfg_val); /* Initialize all 4 lanes of the SERDES. */ for (i = 0; i < 4; i++) { u32 rxtx_ctrl, glue0; int err; err = esr_read_rxtx_ctrl(np, i, &rxtx_ctrl); if (err) return err; err = esr_read_glue0(np, i, &glue0); if (err) return err; rxtx_ctrl &= ~(ESR_RXTX_CTRL_VMUXLO); rxtx_ctrl |= (ESR_RXTX_CTRL_ENSTRETCH | (2 << ESR_RXTX_CTRL_VMUXLO_SHIFT)); glue0 &= ~(ESR_GLUE_CTRL0_SRATE | ESR_GLUE_CTRL0_THCNT | ESR_GLUE_CTRL0_BLTIME); glue0 |= (ESR_GLUE_CTRL0_RXLOSENAB | (0xf << ESR_GLUE_CTRL0_SRATE_SHIFT) | (0xff << ESR_GLUE_CTRL0_THCNT_SHIFT) | (BLTIME_300_CYCLES << ESR_GLUE_CTRL0_BLTIME_SHIFT)); err = esr_write_rxtx_ctrl(np, i, rxtx_ctrl); if (err) return err; err = esr_write_glue0(np, i, glue0); if (err) return err; } sig = nr64(ESR_INT_SIGNALS); switch (np->port) { case 0: mask = ESR_INT_SIGNALS_P0_BITS; val = (ESR_INT_SRDY0_P0 | ESR_INT_DET0_P0 | ESR_INT_XSRDY_P0 | ESR_INT_XDP_P0_CH3 | ESR_INT_XDP_P0_CH2 | ESR_INT_XDP_P0_CH1 | ESR_INT_XDP_P0_CH0); break; case 1: mask = ESR_INT_SIGNALS_P1_BITS; val = (ESR_INT_SRDY0_P1 | ESR_INT_DET0_P1 | ESR_INT_XSRDY_P1 | ESR_INT_XDP_P1_CH3 | ESR_INT_XDP_P1_CH2 | ESR_INT_XDP_P1_CH1 | ESR_INT_XDP_P1_CH0); break; default: return -EINVAL; } if ((sig & mask) != val) { int err; err = serdes_init_1g_serdes(np); if (!err) { np->flags &= ~NIU_FLAGS_10G; np->mac_xcvr = MAC_XCVR_PCS; } else { netdev_err(np->dev, "Port %u 10G/1G SERDES Link Failed\n", np->port); return -ENODEV; } } return 0; } static int niu_determine_phy_disposition(struct niu *np) { struct niu_parent *parent = np->parent; u8 plat_type = parent->plat_type; const struct niu_phy_template *tp; u32 phy_addr_off = 0; if (plat_type == PLAT_TYPE_NIU) { switch (np->flags & (NIU_FLAGS_10G | NIU_FLAGS_FIBER | NIU_FLAGS_XCVR_SERDES)) { case NIU_FLAGS_10G | NIU_FLAGS_XCVR_SERDES: /* 10G Serdes */ tp = &phy_template_niu_10g_serdes; break; case NIU_FLAGS_XCVR_SERDES: /* 1G Serdes */ tp = &phy_template_niu_1g_serdes; break; case NIU_FLAGS_10G | NIU_FLAGS_FIBER: /* 10G Fiber */ default: if (np->flags & NIU_FLAGS_HOTPLUG_PHY) { tp = &phy_template_niu_10g_hotplug; if (np->port == 0) phy_addr_off = 8; if (np->port == 1) phy_addr_off = 12; } else { tp = &phy_template_niu_10g_fiber; phy_addr_off += np->port; } break; } } else { switch (np->flags & (NIU_FLAGS_10G | NIU_FLAGS_FIBER | NIU_FLAGS_XCVR_SERDES)) { case 0: /* 1G copper */ tp = &phy_template_1g_copper; if (plat_type == PLAT_TYPE_VF_P0) phy_addr_off = 10; else if (plat_type == PLAT_TYPE_VF_P1) phy_addr_off = 26; phy_addr_off += (np->port ^ 0x3); break; case NIU_FLAGS_10G: /* 10G copper */ tp = &phy_template_10g_copper; break; case NIU_FLAGS_FIBER: /* 1G fiber */ tp = &phy_template_1g_fiber; break; case NIU_FLAGS_10G | NIU_FLAGS_FIBER: /* 10G fiber */ tp = &phy_template_10g_fiber; if (plat_type == PLAT_TYPE_VF_P0 || plat_type == PLAT_TYPE_VF_P1) phy_addr_off = 8; phy_addr_off += np->port; if (np->flags & NIU_FLAGS_HOTPLUG_PHY) { tp = &phy_template_10g_fiber_hotplug; if (np->port == 0) phy_addr_off = 8; if (np->port == 1) phy_addr_off = 12; } break; case NIU_FLAGS_10G | NIU_FLAGS_XCVR_SERDES: case NIU_FLAGS_XCVR_SERDES | NIU_FLAGS_FIBER: case NIU_FLAGS_XCVR_SERDES: switch(np->port) { case 0: case 1: tp = &phy_template_10g_serdes; break; case 2: case 3: tp = &phy_template_1g_rgmii; break; default: return -EINVAL; } phy_addr_off = niu_atca_port_num[np->port]; break; default: return -EINVAL; } } np->phy_ops = tp->ops; np->phy_addr = tp->phy_addr_base + phy_addr_off; return 0; } static int niu_init_link(struct niu *np) { struct niu_parent *parent = np->parent; int err, ignore; if (parent->plat_type == PLAT_TYPE_NIU) { err = niu_xcvr_init(np); if (err) return err; msleep(200); } err = niu_serdes_init(np); if (err && !(np->flags & NIU_FLAGS_HOTPLUG_PHY)) return err; msleep(200); err = niu_xcvr_init(np); if (!err || (np->flags & NIU_FLAGS_HOTPLUG_PHY)) niu_link_status(np, &ignore); return 0; } static void niu_set_primary_mac(struct niu *np, const unsigned char *addr) { u16 reg0 = addr[4] << 8 | addr[5]; u16 reg1 = addr[2] << 8 | addr[3]; u16 reg2 = addr[0] << 8 | addr[1]; if (np->flags & NIU_FLAGS_XMAC) { nw64_mac(XMAC_ADDR0, reg0); nw64_mac(XMAC_ADDR1, reg1); nw64_mac(XMAC_ADDR2, reg2); } else { nw64_mac(BMAC_ADDR0, reg0); nw64_mac(BMAC_ADDR1, reg1); nw64_mac(BMAC_ADDR2, reg2); } } static int niu_num_alt_addr(struct niu *np) { if (np->flags & NIU_FLAGS_XMAC) return XMAC_NUM_ALT_ADDR; else return BMAC_NUM_ALT_ADDR; } static int niu_set_alt_mac(struct niu *np, int index, unsigned char *addr) { u16 reg0 = addr[4] << 8 | addr[5]; u16 reg1 = addr[2] << 8 | addr[3]; u16 reg2 = addr[0] << 8 | addr[1]; if (index >= niu_num_alt_addr(np)) return -EINVAL; if (np->flags & NIU_FLAGS_XMAC) { nw64_mac(XMAC_ALT_ADDR0(index), reg0); nw64_mac(XMAC_ALT_ADDR1(index), reg1); nw64_mac(XMAC_ALT_ADDR2(index), reg2); } else { nw64_mac(BMAC_ALT_ADDR0(index), reg0); nw64_mac(BMAC_ALT_ADDR1(index), reg1); nw64_mac(BMAC_ALT_ADDR2(index), reg2); } return 0; } static int niu_enable_alt_mac(struct niu *np, int index, int on) { unsigned long reg; u64 val, mask; if (index >= niu_num_alt_addr(np)) return -EINVAL; if (np->flags & NIU_FLAGS_XMAC) { reg = XMAC_ADDR_CMPEN; mask = 1 << index; } else { reg = BMAC_ADDR_CMPEN; mask = 1 << (index + 1); } val = nr64_mac(reg); if (on) val |= mask; else val &= ~mask; nw64_mac(reg, val); return 0; } static void __set_rdc_table_num_hw(struct niu *np, unsigned long reg, int num, int mac_pref) { u64 val = nr64_mac(reg); val &= ~(HOST_INFO_MACRDCTBLN | HOST_INFO_MPR); val |= num; if (mac_pref) val |= HOST_INFO_MPR; nw64_mac(reg, val); } static int __set_rdc_table_num(struct niu *np, int xmac_index, int bmac_index, int rdc_table_num, int mac_pref) { unsigned long reg; if (rdc_table_num & ~HOST_INFO_MACRDCTBLN) return -EINVAL; if (np->flags & NIU_FLAGS_XMAC) reg = XMAC_HOST_INFO(xmac_index); else reg = BMAC_HOST_INFO(bmac_index); __set_rdc_table_num_hw(np, reg, rdc_table_num, mac_pref); return 0; } static int niu_set_primary_mac_rdc_table(struct niu *np, int table_num, int mac_pref) { return __set_rdc_table_num(np, 17, 0, table_num, mac_pref); } static int niu_set_multicast_mac_rdc_table(struct niu *np, int table_num, int mac_pref) { return __set_rdc_table_num(np, 16, 8, table_num, mac_pref); } static int niu_set_alt_mac_rdc_table(struct niu *np, int idx, int table_num, int mac_pref) { if (idx >= niu_num_alt_addr(np)) return -EINVAL; return __set_rdc_table_num(np, idx, idx + 1, table_num, mac_pref); } static u64 vlan_entry_set_parity(u64 reg_val) { u64 port01_mask; u64 port23_mask; port01_mask = 0x00ff; port23_mask = 0xff00; if (hweight64(reg_val & port01_mask) & 1) reg_val |= ENET_VLAN_TBL_PARITY0; else reg_val &= ~ENET_VLAN_TBL_PARITY0; if (hweight64(reg_val & port23_mask) & 1) reg_val |= ENET_VLAN_TBL_PARITY1; else reg_val &= ~ENET_VLAN_TBL_PARITY1; return reg_val; } static void vlan_tbl_write(struct niu *np, unsigned long index, int port, int vpr, int rdc_table) { u64 reg_val = nr64(ENET_VLAN_TBL(index)); reg_val &= ~((ENET_VLAN_TBL_VPR | ENET_VLAN_TBL_VLANRDCTBLN) << ENET_VLAN_TBL_SHIFT(port)); if (vpr) reg_val |= (ENET_VLAN_TBL_VPR << ENET_VLAN_TBL_SHIFT(port)); reg_val |= (rdc_table << ENET_VLAN_TBL_SHIFT(port)); reg_val = vlan_entry_set_parity(reg_val); nw64(ENET_VLAN_TBL(index), reg_val); } static void vlan_tbl_clear(struct niu *np) { int i; for (i = 0; i < ENET_VLAN_TBL_NUM_ENTRIES; i++) nw64(ENET_VLAN_TBL(i), 0); } static int tcam_wait_bit(struct niu *np, u64 bit) { int limit = 1000; while (--limit > 0) { if (nr64(TCAM_CTL) & bit) break; udelay(1); } if (limit <= 0) return -ENODEV; return 0; } static int tcam_flush(struct niu *np, int index) { nw64(TCAM_KEY_0, 0x00); nw64(TCAM_KEY_MASK_0, 0xff); nw64(TCAM_CTL, (TCAM_CTL_RWC_TCAM_WRITE | index)); return tcam_wait_bit(np, TCAM_CTL_STAT); } #if 0 static int tcam_read(struct niu *np, int index, u64 *key, u64 *mask) { int err; nw64(TCAM_CTL, (TCAM_CTL_RWC_TCAM_READ | index)); err = tcam_wait_bit(np, TCAM_CTL_STAT); if (!err) { key[0] = nr64(TCAM_KEY_0); key[1] = nr64(TCAM_KEY_1); key[2] = nr64(TCAM_KEY_2); key[3] = nr64(TCAM_KEY_3); mask[0] = nr64(TCAM_KEY_MASK_0); mask[1] = nr64(TCAM_KEY_MASK_1); mask[2] = nr64(TCAM_KEY_MASK_2); mask[3] = nr64(TCAM_KEY_MASK_3); } return err; } #endif static int tcam_write(struct niu *np, int index, u64 *key, u64 *mask) { nw64(TCAM_KEY_0, key[0]); nw64(TCAM_KEY_1, key[1]); nw64(TCAM_KEY_2, key[2]); nw64(TCAM_KEY_3, key[3]); nw64(TCAM_KEY_MASK_0, mask[0]); nw64(TCAM_KEY_MASK_1, mask[1]); nw64(TCAM_KEY_MASK_2, mask[2]); nw64(TCAM_KEY_MASK_3, mask[3]); nw64(TCAM_CTL, (TCAM_CTL_RWC_TCAM_WRITE | index)); return tcam_wait_bit(np, TCAM_CTL_STAT); } #if 0 static int tcam_assoc_read(struct niu *np, int index, u64 *data) { int err; nw64(TCAM_CTL, (TCAM_CTL_RWC_RAM_READ | index)); err = tcam_wait_bit(np, TCAM_CTL_STAT); if (!err) *data = nr64(TCAM_KEY_1); return err; } #endif static int tcam_assoc_write(struct niu *np, int index, u64 assoc_data) { nw64(TCAM_KEY_1, assoc_data); nw64(TCAM_CTL, (TCAM_CTL_RWC_RAM_WRITE | index)); return tcam_wait_bit(np, TCAM_CTL_STAT); } static void tcam_enable(struct niu *np, int on) { u64 val = nr64(FFLP_CFG_1); if (on) val &= ~FFLP_CFG_1_TCAM_DIS; else val |= FFLP_CFG_1_TCAM_DIS; nw64(FFLP_CFG_1, val); } static void tcam_set_lat_and_ratio(struct niu *np, u64 latency, u64 ratio) { u64 val = nr64(FFLP_CFG_1); val &= ~(FFLP_CFG_1_FFLPINITDONE | FFLP_CFG_1_CAMLAT | FFLP_CFG_1_CAMRATIO); val |= (latency << FFLP_CFG_1_CAMLAT_SHIFT); val |= (ratio << FFLP_CFG_1_CAMRATIO_SHIFT); nw64(FFLP_CFG_1, val); val = nr64(FFLP_CFG_1); val |= FFLP_CFG_1_FFLPINITDONE; nw64(FFLP_CFG_1, val); } static int tcam_user_eth_class_enable(struct niu *np, unsigned long class, int on) { unsigned long reg; u64 val; if (class < CLASS_CODE_ETHERTYPE1 || class > CLASS_CODE_ETHERTYPE2) return -EINVAL; reg = L2_CLS(class - CLASS_CODE_ETHERTYPE1); val = nr64(reg); if (on) val |= L2_CLS_VLD; else val &= ~L2_CLS_VLD; nw64(reg, val); return 0; } #if 0 static int tcam_user_eth_class_set(struct niu *np, unsigned long class, u64 ether_type) { unsigned long reg; u64 val; if (class < CLASS_CODE_ETHERTYPE1 || class > CLASS_CODE_ETHERTYPE2 || (ether_type & ~(u64)0xffff) != 0) return -EINVAL; reg = L2_CLS(class - CLASS_CODE_ETHERTYPE1); val = nr64(reg); val &= ~L2_CLS_ETYPE; val |= (ether_type << L2_CLS_ETYPE_SHIFT); nw64(reg, val); return 0; } #endif static int tcam_user_ip_class_enable(struct niu *np, unsigned long class, int on) { unsigned long reg; u64 val; if (class < CLASS_CODE_USER_PROG1 || class > CLASS_CODE_USER_PROG4) return -EINVAL; reg = L3_CLS(class - CLASS_CODE_USER_PROG1); val = nr64(reg); if (on) val |= L3_CLS_VALID; else val &= ~L3_CLS_VALID; nw64(reg, val); return 0; } static int tcam_user_ip_class_set(struct niu *np, unsigned long class, int ipv6, u64 protocol_id, u64 tos_mask, u64 tos_val) { unsigned long reg; u64 val; if (class < CLASS_CODE_USER_PROG1 || class > CLASS_CODE_USER_PROG4 || (protocol_id & ~(u64)0xff) != 0 || (tos_mask & ~(u64)0xff) != 0 || (tos_val & ~(u64)0xff) != 0) return -EINVAL; reg = L3_CLS(class - CLASS_CODE_USER_PROG1); val = nr64(reg); val &= ~(L3_CLS_IPVER | L3_CLS_PID | L3_CLS_TOSMASK | L3_CLS_TOS); if (ipv6) val |= L3_CLS_IPVER; val |= (protocol_id << L3_CLS_PID_SHIFT); val |= (tos_mask << L3_CLS_TOSMASK_SHIFT); val |= (tos_val << L3_CLS_TOS_SHIFT); nw64(reg, val); return 0; } static int tcam_early_init(struct niu *np) { unsigned long i; int err; tcam_enable(np, 0); tcam_set_lat_and_ratio(np, DEFAULT_TCAM_LATENCY, DEFAULT_TCAM_ACCESS_RATIO); for (i = CLASS_CODE_ETHERTYPE1; i <= CLASS_CODE_ETHERTYPE2; i++) { err = tcam_user_eth_class_enable(np, i, 0); if (err) return err; } for (i = CLASS_CODE_USER_PROG1; i <= CLASS_CODE_USER_PROG4; i++) { err = tcam_user_ip_class_enable(np, i, 0); if (err) return err; } return 0; } static int tcam_flush_all(struct niu *np) { unsigned long i; for (i = 0; i < np->parent->tcam_num_entries; i++) { int err = tcam_flush(np, i); if (err) return err; } return 0; } static u64 hash_addr_regval(unsigned long index, unsigned long num_entries) { return (u64)index | (num_entries == 1 ? HASH_TBL_ADDR_AUTOINC : 0); } #if 0 static int hash_read(struct niu *np, unsigned long partition, unsigned long index, unsigned long num_entries, u64 *data) { u64 val = hash_addr_regval(index, num_entries); unsigned long i; if (partition >= FCRAM_NUM_PARTITIONS || index + num_entries > FCRAM_SIZE) return -EINVAL; nw64(HASH_TBL_ADDR(partition), val); for (i = 0; i < num_entries; i++) data[i] = nr64(HASH_TBL_DATA(partition)); return 0; } #endif static int hash_write(struct niu *np, unsigned long partition, unsigned long index, unsigned long num_entries, u64 *data) { u64 val = hash_addr_regval(index, num_entries); unsigned long i; if (partition >= FCRAM_NUM_PARTITIONS || index + (num_entries * 8) > FCRAM_SIZE) return -EINVAL; nw64(HASH_TBL_ADDR(partition), val); for (i = 0; i < num_entries; i++) nw64(HASH_TBL_DATA(partition), data[i]); return 0; } static void fflp_reset(struct niu *np) { u64 val; nw64(FFLP_CFG_1, FFLP_CFG_1_PIO_FIO_RST); udelay(10); nw64(FFLP_CFG_1, 0); val = FFLP_CFG_1_FCRAMOUTDR_NORMAL | FFLP_CFG_1_FFLPINITDONE; nw64(FFLP_CFG_1, val); } static void fflp_set_timings(struct niu *np) { u64 val = nr64(FFLP_CFG_1); val &= ~FFLP_CFG_1_FFLPINITDONE; val |= (DEFAULT_FCRAMRATIO << FFLP_CFG_1_FCRAMRATIO_SHIFT); nw64(FFLP_CFG_1, val); val = nr64(FFLP_CFG_1); val |= FFLP_CFG_1_FFLPINITDONE; nw64(FFLP_CFG_1, val); val = nr64(FCRAM_REF_TMR); val &= ~(FCRAM_REF_TMR_MAX | FCRAM_REF_TMR_MIN); val |= (DEFAULT_FCRAM_REFRESH_MAX << FCRAM_REF_TMR_MAX_SHIFT); val |= (DEFAULT_FCRAM_REFRESH_MIN << FCRAM_REF_TMR_MIN_SHIFT); nw64(FCRAM_REF_TMR, val); } static int fflp_set_partition(struct niu *np, u64 partition, u64 mask, u64 base, int enable) { unsigned long reg; u64 val; if (partition >= FCRAM_NUM_PARTITIONS || (mask & ~(u64)0x1f) != 0 || (base & ~(u64)0x1f) != 0) return -EINVAL; reg = FLW_PRT_SEL(partition); val = nr64(reg); val &= ~(FLW_PRT_SEL_EXT | FLW_PRT_SEL_MASK | FLW_PRT_SEL_BASE); val |= (mask << FLW_PRT_SEL_MASK_SHIFT); val |= (base << FLW_PRT_SEL_BASE_SHIFT); if (enable) val |= FLW_PRT_SEL_EXT; nw64(reg, val); return 0; } static int fflp_disable_all_partitions(struct niu *np) { unsigned long i; for (i = 0; i < FCRAM_NUM_PARTITIONS; i++) { int err = fflp_set_partition(np, 0, 0, 0, 0); if (err) return err; } return 0; } static void fflp_llcsnap_enable(struct niu *np, int on) { u64 val = nr64(FFLP_CFG_1); if (on) val |= FFLP_CFG_1_LLCSNAP; else val &= ~FFLP_CFG_1_LLCSNAP; nw64(FFLP_CFG_1, val); } static void fflp_errors_enable(struct niu *np, int on) { u64 val = nr64(FFLP_CFG_1); if (on) val &= ~FFLP_CFG_1_ERRORDIS; else val |= FFLP_CFG_1_ERRORDIS; nw64(FFLP_CFG_1, val); } static int fflp_hash_clear(struct niu *np) { struct fcram_hash_ipv4 ent; unsigned long i; /* IPV4 hash entry with valid bit clear, rest is don't care. */ memset(&ent, 0, sizeof(ent)); ent.header = HASH_HEADER_EXT; for (i = 0; i < FCRAM_SIZE; i += sizeof(ent)) { int err = hash_write(np, 0, i, 1, (u64 *) &ent); if (err) return err; } return 0; } static int fflp_early_init(struct niu *np) { struct niu_parent *parent; unsigned long flags; int err; niu_lock_parent(np, flags); parent = np->parent; err = 0; if (!(parent->flags & PARENT_FLGS_CLS_HWINIT)) { if (np->parent->plat_type != PLAT_TYPE_NIU) { fflp_reset(np); fflp_set_timings(np); err = fflp_disable_all_partitions(np); if (err) { netif_printk(np, probe, KERN_DEBUG, np->dev, "fflp_disable_all_partitions failed, err=%d\n", err); goto out; } } err = tcam_early_init(np); if (err) { netif_printk(np, probe, KERN_DEBUG, np->dev, "tcam_early_init failed, err=%d\n", err); goto out; } fflp_llcsnap_enable(np, 1); fflp_errors_enable(np, 0); nw64(H1POLY, 0); nw64(H2POLY, 0); err = tcam_flush_all(np); if (err) { netif_printk(np, probe, KERN_DEBUG, np->dev, "tcam_flush_all failed, err=%d\n", err); goto out; } if (np->parent->plat_type != PLAT_TYPE_NIU) { err = fflp_hash_clear(np); if (err) { netif_printk(np, probe, KERN_DEBUG, np->dev, "fflp_hash_clear failed, err=%d\n", err); goto out; } } vlan_tbl_clear(np); parent->flags |= PARENT_FLGS_CLS_HWINIT; } out: niu_unlock_parent(np, flags); return err; } static int niu_set_flow_key(struct niu *np, unsigned long class_code, u64 key) { if (class_code < CLASS_CODE_USER_PROG1 || class_code > CLASS_CODE_SCTP_IPV6) return -EINVAL; nw64(FLOW_KEY(class_code - CLASS_CODE_USER_PROG1), key); return 0; } static int niu_set_tcam_key(struct niu *np, unsigned long class_code, u64 key) { if (class_code < CLASS_CODE_USER_PROG1 || class_code > CLASS_CODE_SCTP_IPV6) return -EINVAL; nw64(TCAM_KEY(class_code - CLASS_CODE_USER_PROG1), key); return 0; } /* Entries for the ports are interleaved in the TCAM */ static u16 tcam_get_index(struct niu *np, u16 idx) { /* One entry reserved for IP fragment rule */ if (idx >= (np->clas.tcam_sz - 1)) idx = 0; return np->clas.tcam_top + ((idx+1) * np->parent->num_ports); } static u16 tcam_get_size(struct niu *np) { /* One entry reserved for IP fragment rule */ return np->clas.tcam_sz - 1; } static u16 tcam_get_valid_entry_cnt(struct niu *np) { /* One entry reserved for IP fragment rule */ return np->clas.tcam_valid_entries - 1; } static void niu_rx_skb_append(struct sk_buff *skb, struct page *page, u32 offset, u32 size, u32 truesize) { skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags, page, offset, size); skb->len += size; skb->data_len += size; skb->truesize += truesize; } static unsigned int niu_hash_rxaddr(struct rx_ring_info *rp, u64 a) { a >>= PAGE_SHIFT; a ^= (a >> ilog2(MAX_RBR_RING_SIZE)); return a & (MAX_RBR_RING_SIZE - 1); } static struct page *niu_find_rxpage(struct rx_ring_info *rp, u64 addr, struct page ***link) { unsigned int h = niu_hash_rxaddr(rp, addr); struct page *p, **pp; addr &= PAGE_MASK; pp = &rp->rxhash[h]; for (; (p = *pp) != NULL; pp = &niu_next_page(p)) { if (p->index == addr) { *link = pp; goto found; } } BUG(); found: return p; } static void niu_hash_page(struct rx_ring_info *rp, struct page *page, u64 base) { unsigned int h = niu_hash_rxaddr(rp, base); page->index = base; niu_next_page(page) = rp->rxhash[h]; rp->rxhash[h] = page; } static int niu_rbr_add_page(struct niu *np, struct rx_ring_info *rp, gfp_t mask, int start_index) { struct page *page; u64 addr; int i; page = alloc_page(mask); if (!page) return -ENOMEM; addr = np->ops->map_page(np->device, page, 0, PAGE_SIZE, DMA_FROM_DEVICE); if (!addr) { __free_page(page); return -ENOMEM; } niu_hash_page(rp, page, addr); if (rp->rbr_blocks_per_page > 1) page_ref_add(page, rp->rbr_blocks_per_page - 1); for (i = 0; i < rp->rbr_blocks_per_page; i++) { __le32 *rbr = &rp->rbr[start_index + i]; *rbr = cpu_to_le32(addr >> RBR_DESCR_ADDR_SHIFT); addr += rp->rbr_block_size; } return 0; } static void niu_rbr_refill(struct niu *np, struct rx_ring_info *rp, gfp_t mask) { int index = rp->rbr_index; rp->rbr_pending++; if ((rp->rbr_pending % rp->rbr_blocks_per_page) == 0) { int err = niu_rbr_add_page(np, rp, mask, index); if (unlikely(err)) { rp->rbr_pending--; return; } rp->rbr_index += rp->rbr_blocks_per_page; BUG_ON(rp->rbr_index > rp->rbr_table_size); if (rp->rbr_index == rp->rbr_table_size) rp->rbr_index = 0; if (rp->rbr_pending >= rp->rbr_kick_thresh) { nw64(RBR_KICK(rp->rx_channel), rp->rbr_pending); rp->rbr_pending = 0; } } } static int niu_rx_pkt_ignore(struct niu *np, struct rx_ring_info *rp) { unsigned int index = rp->rcr_index; int num_rcr = 0; rp->rx_dropped++; while (1) { struct page *page, **link; u64 addr, val; u32 rcr_size; num_rcr++; val = le64_to_cpup(&rp->rcr[index]); addr = (val & RCR_ENTRY_PKT_BUF_ADDR) << RCR_ENTRY_PKT_BUF_ADDR_SHIFT; page = niu_find_rxpage(rp, addr, &link); rcr_size = rp->rbr_sizes[(val & RCR_ENTRY_PKTBUFSZ) >> RCR_ENTRY_PKTBUFSZ_SHIFT]; if ((page->index + PAGE_SIZE) - rcr_size == addr) { *link = niu_next_page(page); np->ops->unmap_page(np->device, page->index, PAGE_SIZE, DMA_FROM_DEVICE); page->index = 0; niu_next_page(page) = NULL; __free_page(page); rp->rbr_refill_pending++; } index = NEXT_RCR(rp, index); if (!(val & RCR_ENTRY_MULTI)) break; } rp->rcr_index = index; return num_rcr; } static int niu_process_rx_pkt(struct napi_struct *napi, struct niu *np, struct rx_ring_info *rp) { unsigned int index = rp->rcr_index; struct rx_pkt_hdr1 *rh; struct sk_buff *skb; int len, num_rcr; skb = netdev_alloc_skb(np->dev, RX_SKB_ALLOC_SIZE); if (unlikely(!skb)) return niu_rx_pkt_ignore(np, rp); num_rcr = 0; while (1) { struct page *page, **link; u32 rcr_size, append_size; u64 addr, val, off; num_rcr++; val = le64_to_cpup(&rp->rcr[index]); len = (val & RCR_ENTRY_L2_LEN) >> RCR_ENTRY_L2_LEN_SHIFT; append_size = len + ETH_HLEN + ETH_FCS_LEN; addr = (val & RCR_ENTRY_PKT_BUF_ADDR) << RCR_ENTRY_PKT_BUF_ADDR_SHIFT; page = niu_find_rxpage(rp, addr, &link); rcr_size = rp->rbr_sizes[(val & RCR_ENTRY_PKTBUFSZ) >> RCR_ENTRY_PKTBUFSZ_SHIFT]; off = addr & ~PAGE_MASK; if (num_rcr == 1) { int ptype; ptype = (val >> RCR_ENTRY_PKT_TYPE_SHIFT); if ((ptype == RCR_PKT_TYPE_TCP || ptype == RCR_PKT_TYPE_UDP) && !(val & (RCR_ENTRY_NOPORT | RCR_ENTRY_ERROR))) skb->ip_summed = CHECKSUM_UNNECESSARY; else skb_checksum_none_assert(skb); } else if (!(val & RCR_ENTRY_MULTI)) append_size = append_size - skb->len; niu_rx_skb_append(skb, page, off, append_size, rcr_size); if ((page->index + rp->rbr_block_size) - rcr_size == addr) { *link = niu_next_page(page); np->ops->unmap_page(np->device, page->index, PAGE_SIZE, DMA_FROM_DEVICE); page->index = 0; niu_next_page(page) = NULL; rp->rbr_refill_pending++; } else get_page(page); index = NEXT_RCR(rp, index); if (!(val & RCR_ENTRY_MULTI)) break; } rp->rcr_index = index; len += sizeof(*rh); len = min_t(int, len, sizeof(*rh) + VLAN_ETH_HLEN); __pskb_pull_tail(skb, len); rh = (struct rx_pkt_hdr1 *) skb->data; if (np->dev->features & NETIF_F_RXHASH) skb_set_hash(skb, ((u32)rh->hashval2_0 << 24 | (u32)rh->hashval2_1 << 16 | (u32)rh->hashval1_1 << 8 | (u32)rh->hashval1_2 << 0), PKT_HASH_TYPE_L3); skb_pull(skb, sizeof(*rh)); rp->rx_packets++; rp->rx_bytes += skb->len; skb->protocol = eth_type_trans(skb, np->dev); skb_record_rx_queue(skb, rp->rx_channel); napi_gro_receive(napi, skb); return num_rcr; } static int niu_rbr_fill(struct niu *np, struct rx_ring_info *rp, gfp_t mask) { int blocks_per_page = rp->rbr_blocks_per_page; int err, index = rp->rbr_index; err = 0; while (index < (rp->rbr_table_size - blocks_per_page)) { err = niu_rbr_add_page(np, rp, mask, index); if (unlikely(err)) break; index += blocks_per_page; } rp->rbr_index = index; return err; } static void niu_rbr_free(struct niu *np, struct rx_ring_info *rp) { int i; for (i = 0; i < MAX_RBR_RING_SIZE; i++) { struct page *page; page = rp->rxhash[i]; while (page) { struct page *next = niu_next_page(page); u64 base = page->index; np->ops->unmap_page(np->device, base, PAGE_SIZE, DMA_FROM_DEVICE); page->index = 0; niu_next_page(page) = NULL; __free_page(page); page = next; } } for (i = 0; i < rp->rbr_table_size; i++) rp->rbr[i] = cpu_to_le32(0); rp->rbr_index = 0; } static int release_tx_packet(struct niu *np, struct tx_ring_info *rp, int idx) { struct tx_buff_info *tb = &rp->tx_buffs[idx]; struct sk_buff *skb = tb->skb; struct tx_pkt_hdr *tp; u64 tx_flags; int i, len; tp = (struct tx_pkt_hdr *) skb->data; tx_flags = le64_to_cpup(&tp->flags); rp->tx_packets++; rp->tx_bytes += (((tx_flags & TXHDR_LEN) >> TXHDR_LEN_SHIFT) - ((tx_flags & TXHDR_PAD) / 2)); len = skb_headlen(skb); np->ops->unmap_single(np->device, tb->mapping, len, DMA_TO_DEVICE); if (le64_to_cpu(rp->descr[idx]) & TX_DESC_MARK) rp->mark_pending--; tb->skb = NULL; do { idx = NEXT_TX(rp, idx); len -= MAX_TX_DESC_LEN; } while (len > 0); for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { tb = &rp->tx_buffs[idx]; BUG_ON(tb->skb != NULL); np->ops->unmap_page(np->device, tb->mapping, skb_frag_size(&skb_shinfo(skb)->frags[i]), DMA_TO_DEVICE); idx = NEXT_TX(rp, idx); } dev_kfree_skb(skb); return idx; } #define NIU_TX_WAKEUP_THRESH(rp) ((rp)->pending / 4) static void niu_tx_work(struct niu *np, struct tx_ring_info *rp) { struct netdev_queue *txq; u16 pkt_cnt, tmp; int cons, index; u64 cs; index = (rp - np->tx_rings); txq = netdev_get_tx_queue(np->dev, index); cs = rp->tx_cs; if (unlikely(!(cs & (TX_CS_MK | TX_CS_MMK)))) goto out; tmp = pkt_cnt = (cs & TX_CS_PKT_CNT) >> TX_CS_PKT_CNT_SHIFT; pkt_cnt = (pkt_cnt - rp->last_pkt_cnt) & (TX_CS_PKT_CNT >> TX_CS_PKT_CNT_SHIFT); rp->last_pkt_cnt = tmp; cons = rp->cons; netif_printk(np, tx_done, KERN_DEBUG, np->dev, "%s() pkt_cnt[%u] cons[%d]\n", __func__, pkt_cnt, cons); while (pkt_cnt--) cons = release_tx_packet(np, rp, cons); rp->cons = cons; smp_mb(); out: if (unlikely(netif_tx_queue_stopped(txq) && (niu_tx_avail(rp) > NIU_TX_WAKEUP_THRESH(rp)))) { __netif_tx_lock(txq, smp_processor_id()); if (netif_tx_queue_stopped(txq) && (niu_tx_avail(rp) > NIU_TX_WAKEUP_THRESH(rp))) netif_tx_wake_queue(txq); __netif_tx_unlock(txq); } } static inline void niu_sync_rx_discard_stats(struct niu *np, struct rx_ring_info *rp, const int limit) { /* This elaborate scheme is needed for reading the RX discard * counters, as they are only 16-bit and can overflow quickly, * and because the overflow indication bit is not usable as * the counter value does not wrap, but remains at max value * 0xFFFF. * * In theory and in practice counters can be lost in between * reading nr64() and clearing the counter nw64(). For this * reason, the number of counter clearings nw64() is * limited/reduced though the limit parameter. */ int rx_channel = rp->rx_channel; u32 misc, wred; /* RXMISC (Receive Miscellaneous Discard Count), covers the * following discard events: IPP (Input Port Process), * FFLP/TCAM, Full RCR (Receive Completion Ring) RBR (Receive * Block Ring) prefetch buffer is empty. */ misc = nr64(RXMISC(rx_channel)); if (unlikely((misc & RXMISC_COUNT) > limit)) { nw64(RXMISC(rx_channel), 0); rp->rx_errors += misc & RXMISC_COUNT; if (unlikely(misc & RXMISC_OFLOW)) dev_err(np->device, "rx-%d: Counter overflow RXMISC discard\n", rx_channel); netif_printk(np, rx_err, KERN_DEBUG, np->dev, "rx-%d: MISC drop=%u over=%u\n", rx_channel, misc, misc-limit); } /* WRED (Weighted Random Early Discard) by hardware */ wred = nr64(RED_DIS_CNT(rx_channel)); if (unlikely((wred & RED_DIS_CNT_COUNT) > limit)) { nw64(RED_DIS_CNT(rx_channel), 0); rp->rx_dropped += wred & RED_DIS_CNT_COUNT; if (unlikely(wred & RED_DIS_CNT_OFLOW)) dev_err(np->device, "rx-%d: Counter overflow WRED discard\n", rx_channel); netif_printk(np, rx_err, KERN_DEBUG, np->dev, "rx-%d: WRED drop=%u over=%u\n", rx_channel, wred, wred-limit); } } static int niu_rx_work(struct napi_struct *napi, struct niu *np, struct rx_ring_info *rp, int budget) { int qlen, rcr_done = 0, work_done = 0; struct rxdma_mailbox *mbox = rp->mbox; u64 stat; #if 1 stat = nr64(RX_DMA_CTL_STAT(rp->rx_channel)); qlen = nr64(RCRSTAT_A(rp->rx_channel)) & RCRSTAT_A_QLEN; #else stat = le64_to_cpup(&mbox->rx_dma_ctl_stat); qlen = (le64_to_cpup(&mbox->rcrstat_a) & RCRSTAT_A_QLEN); #endif mbox->rx_dma_ctl_stat = 0; mbox->rcrstat_a = 0; netif_printk(np, rx_status, KERN_DEBUG, np->dev, "%s(chan[%d]), stat[%llx] qlen=%d\n", __func__, rp->rx_channel, (unsigned long long)stat, qlen); rcr_done = work_done = 0; qlen = min(qlen, budget); while (work_done < qlen) { rcr_done += niu_process_rx_pkt(napi, np, rp); work_done++; } if (rp->rbr_refill_pending >= rp->rbr_kick_thresh) { unsigned int i; for (i = 0; i < rp->rbr_refill_pending; i++) niu_rbr_refill(np, rp, GFP_ATOMIC); rp->rbr_refill_pending = 0; } stat = (RX_DMA_CTL_STAT_MEX | ((u64)work_done << RX_DMA_CTL_STAT_PKTREAD_SHIFT) | ((u64)rcr_done << RX_DMA_CTL_STAT_PTRREAD_SHIFT)); nw64(RX_DMA_CTL_STAT(rp->rx_channel), stat); /* Only sync discards stats when qlen indicate potential for drops */ if (qlen > 10) niu_sync_rx_discard_stats(np, rp, 0x7FFF); return work_done; } static int niu_poll_core(struct niu *np, struct niu_ldg *lp, int budget) { u64 v0 = lp->v0; u32 tx_vec = (v0 >> 32); u32 rx_vec = (v0 & 0xffffffff); int i, work_done = 0; netif_printk(np, intr, KERN_DEBUG, np->dev, "%s() v0[%016llx]\n", __func__, (unsigned long long)v0); for (i = 0; i < np->num_tx_rings; i++) { struct tx_ring_info *rp = &np->tx_rings[i]; if (tx_vec & (1 << rp->tx_channel)) niu_tx_work(np, rp); nw64(LD_IM0(LDN_TXDMA(rp->tx_channel)), 0); } for (i = 0; i < np->num_rx_rings; i++) { struct rx_ring_info *rp = &np->rx_rings[i]; if (rx_vec & (1 << rp->rx_channel)) { int this_work_done; this_work_done = niu_rx_work(&lp->napi, np, rp, budget); budget -= this_work_done; work_done += this_work_done; } nw64(LD_IM0(LDN_RXDMA(rp->rx_channel)), 0); } return work_done; } static int niu_poll(struct napi_struct *napi, int budget) { struct niu_ldg *lp = container_of(napi, struct niu_ldg, napi); struct niu *np = lp->np; int work_done; work_done = niu_poll_core(np, lp, budget); if (work_done < budget) { napi_complete_done(napi, work_done); niu_ldg_rearm(np, lp, 1); } return work_done; } static void niu_log_rxchan_errors(struct niu *np, struct rx_ring_info *rp, u64 stat) { netdev_err(np->dev, "RX channel %u errors ( ", rp->rx_channel); if (stat & RX_DMA_CTL_STAT_RBR_TMOUT) pr_cont("RBR_TMOUT "); if (stat & RX_DMA_CTL_STAT_RSP_CNT_ERR) pr_cont("RSP_CNT "); if (stat & RX_DMA_CTL_STAT_BYTE_EN_BUS) pr_cont("BYTE_EN_BUS "); if (stat & RX_DMA_CTL_STAT_RSP_DAT_ERR) pr_cont("RSP_DAT "); if (stat & RX_DMA_CTL_STAT_RCR_ACK_ERR) pr_cont("RCR_ACK "); if (stat & RX_DMA_CTL_STAT_RCR_SHA_PAR) pr_cont("RCR_SHA_PAR "); if (stat & RX_DMA_CTL_STAT_RBR_PRE_PAR) pr_cont("RBR_PRE_PAR "); if (stat & RX_DMA_CTL_STAT_CONFIG_ERR) pr_cont("CONFIG "); if (stat & RX_DMA_CTL_STAT_RCRINCON) pr_cont("RCRINCON "); if (stat & RX_DMA_CTL_STAT_RCRFULL) pr_cont("RCRFULL "); if (stat & RX_DMA_CTL_STAT_RBRFULL) pr_cont("RBRFULL "); if (stat & RX_DMA_CTL_STAT_RBRLOGPAGE) pr_cont("RBRLOGPAGE "); if (stat & RX_DMA_CTL_STAT_CFIGLOGPAGE) pr_cont("CFIGLOGPAGE "); if (stat & RX_DMA_CTL_STAT_DC_FIFO_ERR) pr_cont("DC_FIDO "); pr_cont(")\n"); } static int niu_rx_error(struct niu *np, struct rx_ring_info *rp) { u64 stat = nr64(RX_DMA_CTL_STAT(rp->rx_channel)); int err = 0; if (stat & (RX_DMA_CTL_STAT_CHAN_FATAL | RX_DMA_CTL_STAT_PORT_FATAL)) err = -EINVAL; if (err) { netdev_err(np->dev, "RX channel %u error, stat[%llx]\n", rp->rx_channel, (unsigned long long) stat); niu_log_rxchan_errors(np, rp, stat); } nw64(RX_DMA_CTL_STAT(rp->rx_channel), stat & RX_DMA_CTL_WRITE_CLEAR_ERRS); return err; } static void niu_log_txchan_errors(struct niu *np, struct tx_ring_info *rp, u64 cs) { netdev_err(np->dev, "TX channel %u errors ( ", rp->tx_channel); if (cs & TX_CS_MBOX_ERR) pr_cont("MBOX "); if (cs & TX_CS_PKT_SIZE_ERR) pr_cont("PKT_SIZE "); if (cs & TX_CS_TX_RING_OFLOW) pr_cont("TX_RING_OFLOW "); if (cs & TX_CS_PREF_BUF_PAR_ERR) pr_cont("PREF_BUF_PAR "); if (cs & TX_CS_NACK_PREF) pr_cont("NACK_PREF "); if (cs & TX_CS_NACK_PKT_RD) pr_cont("NACK_PKT_RD "); if (cs & TX_CS_CONF_PART_ERR) pr_cont("CONF_PART "); if (cs & TX_CS_PKT_PRT_ERR) pr_cont("PKT_PTR "); pr_cont(")\n"); } static int niu_tx_error(struct niu *np, struct tx_ring_info *rp) { u64 cs, logh, logl; cs = nr64(TX_CS(rp->tx_channel)); logh = nr64(TX_RNG_ERR_LOGH(rp->tx_channel)); logl = nr64(TX_RNG_ERR_LOGL(rp->tx_channel)); netdev_err(np->dev, "TX channel %u error, cs[%llx] logh[%llx] logl[%llx]\n", rp->tx_channel, (unsigned long long)cs, (unsigned long long)logh, (unsigned long long)logl); niu_log_txchan_errors(np, rp, cs); return -ENODEV; } static int niu_mif_interrupt(struct niu *np) { u64 mif_status = nr64(MIF_STATUS); int phy_mdint = 0; if (np->flags & NIU_FLAGS_XMAC) { u64 xrxmac_stat = nr64_mac(XRXMAC_STATUS); if (xrxmac_stat & XRXMAC_STATUS_PHY_MDINT) phy_mdint = 1; } netdev_err(np->dev, "MIF interrupt, stat[%llx] phy_mdint(%d)\n", (unsigned long long)mif_status, phy_mdint); return -ENODEV; } static void niu_xmac_interrupt(struct niu *np) { struct niu_xmac_stats *mp = &np->mac_stats.xmac; u64 val; val = nr64_mac(XTXMAC_STATUS); if (val & XTXMAC_STATUS_FRAME_CNT_EXP) mp->tx_frames += TXMAC_FRM_CNT_COUNT; if (val & XTXMAC_STATUS_BYTE_CNT_EXP) mp->tx_bytes += TXMAC_BYTE_CNT_COUNT; if (val & XTXMAC_STATUS_TXFIFO_XFR_ERR) mp->tx_fifo_errors++; if (val & XTXMAC_STATUS_TXMAC_OFLOW) mp->tx_overflow_errors++; if (val & XTXMAC_STATUS_MAX_PSIZE_ERR) mp->tx_max_pkt_size_errors++; if (val & XTXMAC_STATUS_TXMAC_UFLOW) mp->tx_underflow_errors++; val = nr64_mac(XRXMAC_STATUS); if (val & XRXMAC_STATUS_LCL_FLT_STATUS) mp->rx_local_faults++; if (val & XRXMAC_STATUS_RFLT_DET) mp->rx_remote_faults++; if (val & XRXMAC_STATUS_LFLT_CNT_EXP) mp->rx_link_faults += LINK_FAULT_CNT_COUNT; if (val & XRXMAC_STATUS_ALIGNERR_CNT_EXP) mp->rx_align_errors += RXMAC_ALIGN_ERR_CNT_COUNT; if (val & XRXMAC_STATUS_RXFRAG_CNT_EXP) mp->rx_frags += RXMAC_FRAG_CNT_COUNT; if (val & XRXMAC_STATUS_RXMULTF_CNT_EXP) mp->rx_mcasts += RXMAC_MC_FRM_CNT_COUNT; if (val & XRXMAC_STATUS_RXBCAST_CNT_EXP) mp->rx_bcasts += RXMAC_BC_FRM_CNT_COUNT; if (val & XRXMAC_STATUS_RXHIST1_CNT_EXP) mp->rx_hist_cnt1 += RXMAC_HIST_CNT1_COUNT; if (val & XRXMAC_STATUS_RXHIST2_CNT_EXP) mp->rx_hist_cnt2 += RXMAC_HIST_CNT2_COUNT; if (val & XRXMAC_STATUS_RXHIST3_CNT_EXP) mp->rx_hist_cnt3 += RXMAC_HIST_CNT3_COUNT; if (val & XRXMAC_STATUS_RXHIST4_CNT_EXP) mp->rx_hist_cnt4 += RXMAC_HIST_CNT4_COUNT; if (val & XRXMAC_STATUS_RXHIST5_CNT_EXP) mp->rx_hist_cnt5 += RXMAC_HIST_CNT5_COUNT; if (val & XRXMAC_STATUS_RXHIST6_CNT_EXP) mp->rx_hist_cnt6 += RXMAC_HIST_CNT6_COUNT; if (val & XRXMAC_STATUS_RXHIST7_CNT_EXP) mp->rx_hist_cnt7 += RXMAC_HIST_CNT7_COUNT; if (val & XRXMAC_STATUS_RXOCTET_CNT_EXP) mp->rx_octets += RXMAC_BT_CNT_COUNT; if (val & XRXMAC_STATUS_CVIOLERR_CNT_EXP) mp->rx_code_violations += RXMAC_CD_VIO_CNT_COUNT; if (val & XRXMAC_STATUS_LENERR_CNT_EXP) mp->rx_len_errors += RXMAC_MPSZER_CNT_COUNT; if (val & XRXMAC_STATUS_CRCERR_CNT_EXP) mp->rx_crc_errors += RXMAC_CRC_ER_CNT_COUNT; if (val & XRXMAC_STATUS_RXUFLOW) mp->rx_underflows++; if (val & XRXMAC_STATUS_RXOFLOW) mp->rx_overflows++; val = nr64_mac(XMAC_FC_STAT); if (val & XMAC_FC_STAT_TX_MAC_NPAUSE) mp->pause_off_state++; if (val & XMAC_FC_STAT_TX_MAC_PAUSE) mp->pause_on_state++; if (val & XMAC_FC_STAT_RX_MAC_RPAUSE) mp->pause_received++; } static void niu_bmac_interrupt(struct niu *np) { struct niu_bmac_stats *mp = &np->mac_stats.bmac; u64 val; val = nr64_mac(BTXMAC_STATUS); if (val & BTXMAC_STATUS_UNDERRUN) mp->tx_underflow_errors++; if (val & BTXMAC_STATUS_MAX_PKT_ERR) mp->tx_max_pkt_size_errors++; if (val & BTXMAC_STATUS_BYTE_CNT_EXP) mp->tx_bytes += BTXMAC_BYTE_CNT_COUNT; if (val & BTXMAC_STATUS_FRAME_CNT_EXP) mp->tx_frames += BTXMAC_FRM_CNT_COUNT; val = nr64_mac(BRXMAC_STATUS); if (val & BRXMAC_STATUS_OVERFLOW) mp->rx_overflows++; if (val & BRXMAC_STATUS_FRAME_CNT_EXP) mp->rx_frames += BRXMAC_FRAME_CNT_COUNT; if (val & BRXMAC_STATUS_ALIGN_ERR_EXP) mp->rx_align_errors += BRXMAC_ALIGN_ERR_CNT_COUNT; if (val & BRXMAC_STATUS_CRC_ERR_EXP) mp->rx_crc_errors += BRXMAC_ALIGN_ERR_CNT_COUNT; if (val & BRXMAC_STATUS_LEN_ERR_EXP) mp->rx_len_errors += BRXMAC_CODE_VIOL_ERR_CNT_COUNT; val = nr64_mac(BMAC_CTRL_STATUS); if (val & BMAC_CTRL_STATUS_NOPAUSE) mp->pause_off_state++; if (val & BMAC_CTRL_STATUS_PAUSE) mp->pause_on_state++; if (val & BMAC_CTRL_STATUS_PAUSE_RECV) mp->pause_received++; } static int niu_mac_interrupt(struct niu *np) { if (np->flags & NIU_FLAGS_XMAC) niu_xmac_interrupt(np); else niu_bmac_interrupt(np); return 0; } static void niu_log_device_error(struct niu *np, u64 stat) { netdev_err(np->dev, "Core device errors ( "); if (stat & SYS_ERR_MASK_META2) pr_cont("META2 "); if (stat & SYS_ERR_MASK_META1) pr_cont("META1 "); if (stat & SYS_ERR_MASK_PEU) pr_cont("PEU "); if (stat & SYS_ERR_MASK_TXC) pr_cont("TXC "); if (stat & SYS_ERR_MASK_RDMC) pr_cont("RDMC "); if (stat & SYS_ERR_MASK_TDMC) pr_cont("TDMC "); if (stat & SYS_ERR_MASK_ZCP) pr_cont("ZCP "); if (stat & SYS_ERR_MASK_FFLP) pr_cont("FFLP "); if (stat & SYS_ERR_MASK_IPP) pr_cont("IPP "); if (stat & SYS_ERR_MASK_MAC) pr_cont("MAC "); if (stat & SYS_ERR_MASK_SMX) pr_cont("SMX "); pr_cont(")\n"); } static int niu_device_error(struct niu *np) { u64 stat = nr64(SYS_ERR_STAT); netdev_err(np->dev, "Core device error, stat[%llx]\n", (unsigned long long)stat); niu_log_device_error(np, stat); return -ENODEV; } static int niu_slowpath_interrupt(struct niu *np, struct niu_ldg *lp, u64 v0, u64 v1, u64 v2) { int i, err = 0; lp->v0 = v0; lp->v1 = v1; lp->v2 = v2; if (v1 & 0x00000000ffffffffULL) { u32 rx_vec = (v1 & 0xffffffff); for (i = 0; i < np->num_rx_rings; i++) { struct rx_ring_info *rp = &np->rx_rings[i]; if (rx_vec & (1 << rp->rx_channel)) { int r = niu_rx_error(np, rp); if (r) { err = r; } else { if (!v0) nw64(RX_DMA_CTL_STAT(rp->rx_channel), RX_DMA_CTL_STAT_MEX); } } } } if (v1 & 0x7fffffff00000000ULL) { u32 tx_vec = (v1 >> 32) & 0x7fffffff; for (i = 0; i < np->num_tx_rings; i++) { struct tx_ring_info *rp = &np->tx_rings[i]; if (tx_vec & (1 << rp->tx_channel)) { int r = niu_tx_error(np, rp); if (r) err = r; } } } if ((v0 | v1) & 0x8000000000000000ULL) { int r = niu_mif_interrupt(np); if (r) err = r; } if (v2) { if (v2 & 0x01ef) { int r = niu_mac_interrupt(np); if (r) err = r; } if (v2 & 0x0210) { int r = niu_device_error(np); if (r) err = r; } } if (err) niu_enable_interrupts(np, 0); return err; } static void niu_rxchan_intr(struct niu *np, struct rx_ring_info *rp, int ldn) { struct rxdma_mailbox *mbox = rp->mbox; u64 stat_write, stat = le64_to_cpup(&mbox->rx_dma_ctl_stat); stat_write = (RX_DMA_CTL_STAT_RCRTHRES | RX_DMA_CTL_STAT_RCRTO); nw64(RX_DMA_CTL_STAT(rp->rx_channel), stat_write); netif_printk(np, intr, KERN_DEBUG, np->dev, "%s() stat[%llx]\n", __func__, (unsigned long long)stat); } static void niu_txchan_intr(struct niu *np, struct tx_ring_info *rp, int ldn) { rp->tx_cs = nr64(TX_CS(rp->tx_channel)); netif_printk(np, intr, KERN_DEBUG, np->dev, "%s() cs[%llx]\n", __func__, (unsigned long long)rp->tx_cs); } static void __niu_fastpath_interrupt(struct niu *np, int ldg, u64 v0) { struct niu_parent *parent = np->parent; u32 rx_vec, tx_vec; int i; tx_vec = (v0 >> 32); rx_vec = (v0 & 0xffffffff); for (i = 0; i < np->num_rx_rings; i++) { struct rx_ring_info *rp = &np->rx_rings[i]; int ldn = LDN_RXDMA(rp->rx_channel); if (parent->ldg_map[ldn] != ldg) continue; nw64(LD_IM0(ldn), LD_IM0_MASK); if (rx_vec & (1 << rp->rx_channel)) niu_rxchan_intr(np, rp, ldn); } for (i = 0; i < np->num_tx_rings; i++) { struct tx_ring_info *rp = &np->tx_rings[i]; int ldn = LDN_TXDMA(rp->tx_channel); if (parent->ldg_map[ldn] != ldg) continue; nw64(LD_IM0(ldn), LD_IM0_MASK); if (tx_vec & (1 << rp->tx_channel)) niu_txchan_intr(np, rp, ldn); } } static void niu_schedule_napi(struct niu *np, struct niu_ldg *lp, u64 v0, u64 v1, u64 v2) { if (likely(napi_schedule_prep(&lp->napi))) { lp->v0 = v0; lp->v1 = v1; lp->v2 = v2; __niu_fastpath_interrupt(np, lp->ldg_num, v0); __napi_schedule(&lp->napi); } } static irqreturn_t niu_interrupt(int irq, void *dev_id) { struct niu_ldg *lp = dev_id; struct niu *np = lp->np; int ldg = lp->ldg_num; unsigned long flags; u64 v0, v1, v2; if (netif_msg_intr(np)) printk(KERN_DEBUG KBUILD_MODNAME ": " "%s() ldg[%p](%d)", __func__, lp, ldg); spin_lock_irqsave(&np->lock, flags); v0 = nr64(LDSV0(ldg)); v1 = nr64(LDSV1(ldg)); v2 = nr64(LDSV2(ldg)); if (netif_msg_intr(np)) pr_cont(" v0[%llx] v1[%llx] v2[%llx]\n", (unsigned long long) v0, (unsigned long long) v1, (unsigned long long) v2); if (unlikely(!v0 && !v1 && !v2)) { spin_unlock_irqrestore(&np->lock, flags); return IRQ_NONE; } if (unlikely((v0 & ((u64)1 << LDN_MIF)) || v1 || v2)) { int err = niu_slowpath_interrupt(np, lp, v0, v1, v2); if (err) goto out; } if (likely(v0 & ~((u64)1 << LDN_MIF))) niu_schedule_napi(np, lp, v0, v1, v2); else niu_ldg_rearm(np, lp, 1); out: spin_unlock_irqrestore(&np->lock, flags); return IRQ_HANDLED; } static void niu_free_rx_ring_info(struct niu *np, struct rx_ring_info *rp) { if (rp->mbox) { np->ops->free_coherent(np->device, sizeof(struct rxdma_mailbox), rp->mbox, rp->mbox_dma); rp->mbox = NULL; } if (rp->rcr) { np->ops->free_coherent(np->device, MAX_RCR_RING_SIZE * sizeof(__le64), rp->rcr, rp->rcr_dma); rp->rcr = NULL; rp->rcr_table_size = 0; rp->rcr_index = 0; } if (rp->rbr) { niu_rbr_free(np, rp); np->ops->free_coherent(np->device, MAX_RBR_RING_SIZE * sizeof(__le32), rp->rbr, rp->rbr_dma); rp->rbr = NULL; rp->rbr_table_size = 0; rp->rbr_index = 0; } kfree(rp->rxhash); rp->rxhash = NULL; } static void niu_free_tx_ring_info(struct niu *np, struct tx_ring_info *rp) { if (rp->mbox) { np->ops->free_coherent(np->device, sizeof(struct txdma_mailbox), rp->mbox, rp->mbox_dma); rp->mbox = NULL; } if (rp->descr) { int i; for (i = 0; i < MAX_TX_RING_SIZE; i++) { if (rp->tx_buffs[i].skb) (void) release_tx_packet(np, rp, i); } np->ops->free_coherent(np->device, MAX_TX_RING_SIZE * sizeof(__le64), rp->descr, rp->descr_dma); rp->descr = NULL; rp->pending = 0; rp->prod = 0; rp->cons = 0; rp->wrap_bit = 0; } } static void niu_free_channels(struct niu *np) { int i; if (np->rx_rings) { for (i = 0; i < np->num_rx_rings; i++) { struct rx_ring_info *rp = &np->rx_rings[i]; niu_free_rx_ring_info(np, rp); } kfree(np->rx_rings); np->rx_rings = NULL; np->num_rx_rings = 0; } if (np->tx_rings) { for (i = 0; i < np->num_tx_rings; i++) { struct tx_ring_info *rp = &np->tx_rings[i]; niu_free_tx_ring_info(np, rp); } kfree(np->tx_rings); np->tx_rings = NULL; np->num_tx_rings = 0; } } static int niu_alloc_rx_ring_info(struct niu *np, struct rx_ring_info *rp) { BUILD_BUG_ON(sizeof(struct rxdma_mailbox) != 64); rp->rxhash = kcalloc(MAX_RBR_RING_SIZE, sizeof(struct page *), GFP_KERNEL); if (!rp->rxhash) return -ENOMEM; rp->mbox = np->ops->alloc_coherent(np->device, sizeof(struct rxdma_mailbox), &rp->mbox_dma, GFP_KERNEL); if (!rp->mbox) return -ENOMEM; if ((unsigned long)rp->mbox & (64UL - 1)) { netdev_err(np->dev, "Coherent alloc gives misaligned RXDMA mailbox %p\n", rp->mbox); return -EINVAL; } rp->rcr = np->ops->alloc_coherent(np->device, MAX_RCR_RING_SIZE * sizeof(__le64), &rp->rcr_dma, GFP_KERNEL); if (!rp->rcr) return -ENOMEM; if ((unsigned long)rp->rcr & (64UL - 1)) { netdev_err(np->dev, "Coherent alloc gives misaligned RXDMA RCR table %p\n", rp->rcr); return -EINVAL; } rp->rcr_table_size = MAX_RCR_RING_SIZE; rp->rcr_index = 0; rp->rbr = np->ops->alloc_coherent(np->device, MAX_RBR_RING_SIZE * sizeof(__le32), &rp->rbr_dma, GFP_KERNEL); if (!rp->rbr) return -ENOMEM; if ((unsigned long)rp->rbr & (64UL - 1)) { netdev_err(np->dev, "Coherent alloc gives misaligned RXDMA RBR table %p\n", rp->rbr); return -EINVAL; } rp->rbr_table_size = MAX_RBR_RING_SIZE; rp->rbr_index = 0; rp->rbr_pending = 0; return 0; } static void niu_set_max_burst(struct niu *np, struct tx_ring_info *rp) { int mtu = np->dev->mtu; /* These values are recommended by the HW designers for fair * utilization of DRR amongst the rings. */ rp->max_burst = mtu + 32; if (rp->max_burst > 4096) rp->max_burst = 4096; } static int niu_alloc_tx_ring_info(struct niu *np, struct tx_ring_info *rp) { BUILD_BUG_ON(sizeof(struct txdma_mailbox) != 64); rp->mbox = np->ops->alloc_coherent(np->device, sizeof(struct txdma_mailbox), &rp->mbox_dma, GFP_KERNEL); if (!rp->mbox) return -ENOMEM; if ((unsigned long)rp->mbox & (64UL - 1)) { netdev_err(np->dev, "Coherent alloc gives misaligned TXDMA mailbox %p\n", rp->mbox); return -EINVAL; } rp->descr = np->ops->alloc_coherent(np->device, MAX_TX_RING_SIZE * sizeof(__le64), &rp->descr_dma, GFP_KERNEL); if (!rp->descr) return -ENOMEM; if ((unsigned long)rp->descr & (64UL - 1)) { netdev_err(np->dev, "Coherent alloc gives misaligned TXDMA descr table %p\n", rp->descr); return -EINVAL; } rp->pending = MAX_TX_RING_SIZE; rp->prod = 0; rp->cons = 0; rp->wrap_bit = 0; /* XXX make these configurable... XXX */ rp->mark_freq = rp->pending / 4; niu_set_max_burst(np, rp); return 0; } static void niu_size_rbr(struct niu *np, struct rx_ring_info *rp) { u16 bss; bss = min(PAGE_SHIFT, 15); rp->rbr_block_size = 1 << bss; rp->rbr_blocks_per_page = 1 << (PAGE_SHIFT-bss); rp->rbr_sizes[0] = 256; rp->rbr_sizes[1] = 1024; if (np->dev->mtu > ETH_DATA_LEN) { switch (PAGE_SIZE) { case 4 * 1024: rp->rbr_sizes[2] = 4096; break; default: rp->rbr_sizes[2] = 8192; break; } } else { rp->rbr_sizes[2] = 2048; } rp->rbr_sizes[3] = rp->rbr_block_size; } static int niu_alloc_channels(struct niu *np) { struct niu_parent *parent = np->parent; int first_rx_channel, first_tx_channel; int num_rx_rings, num_tx_rings; struct rx_ring_info *rx_rings; struct tx_ring_info *tx_rings; int i, port, err; port = np->port; first_rx_channel = first_tx_channel = 0; for (i = 0; i < port; i++) { first_rx_channel += parent->rxchan_per_port[i]; first_tx_channel += parent->txchan_per_port[i]; } num_rx_rings = parent->rxchan_per_port[port]; num_tx_rings = parent->txchan_per_port[port]; rx_rings = kcalloc(num_rx_rings, sizeof(struct rx_ring_info), GFP_KERNEL); err = -ENOMEM; if (!rx_rings) goto out_err; np->num_rx_rings = num_rx_rings; smp_wmb(); np->rx_rings = rx_rings; netif_set_real_num_rx_queues(np->dev, num_rx_rings); for (i = 0; i < np->num_rx_rings; i++) { struct rx_ring_info *rp = &np->rx_rings[i]; rp->np = np; rp->rx_channel = first_rx_channel + i; err = niu_alloc_rx_ring_info(np, rp); if (err) goto out_err; niu_size_rbr(np, rp); /* XXX better defaults, configurable, etc... XXX */ rp->nonsyn_window = 64; rp->nonsyn_threshold = rp->rcr_table_size - 64; rp->syn_window = 64; rp->syn_threshold = rp->rcr_table_size - 64; rp->rcr_pkt_threshold = 16; rp->rcr_timeout = 8; rp->rbr_kick_thresh = RBR_REFILL_MIN; if (rp->rbr_kick_thresh < rp->rbr_blocks_per_page) rp->rbr_kick_thresh = rp->rbr_blocks_per_page; err = niu_rbr_fill(np, rp, GFP_KERNEL); if (err) goto out_err; } tx_rings = kcalloc(num_tx_rings, sizeof(struct tx_ring_info), GFP_KERNEL); err = -ENOMEM; if (!tx_rings) goto out_err; np->num_tx_rings = num_tx_rings; smp_wmb(); np->tx_rings = tx_rings; netif_set_real_num_tx_queues(np->dev, num_tx_rings); for (i = 0; i < np->num_tx_rings; i++) { struct tx_ring_info *rp = &np->tx_rings[i]; rp->np = np; rp->tx_channel = first_tx_channel + i; err = niu_alloc_tx_ring_info(np, rp); if (err) goto out_err; } return 0; out_err: niu_free_channels(np); return err; } static int niu_tx_cs_sng_poll(struct niu *np, int channel) { int limit = 1000; while (--limit > 0) { u64 val = nr64(TX_CS(channel)); if (val & TX_CS_SNG_STATE) return 0; } return -ENODEV; } static int niu_tx_channel_stop(struct niu *np, int channel) { u64 val = nr64(TX_CS(channel)); val |= TX_CS_STOP_N_GO; nw64(TX_CS(channel), val); return niu_tx_cs_sng_poll(np, channel); } static int niu_tx_cs_reset_poll(struct niu *np, int channel) { int limit = 1000; while (--limit > 0) { u64 val = nr64(TX_CS(channel)); if (!(val & TX_CS_RST)) return 0; } return -ENODEV; } static int niu_tx_channel_reset(struct niu *np, int channel) { u64 val = nr64(TX_CS(channel)); int err; val |= TX_CS_RST; nw64(TX_CS(channel), val); err = niu_tx_cs_reset_poll(np, channel); if (!err) nw64(TX_RING_KICK(channel), 0); return err; } static int niu_tx_channel_lpage_init(struct niu *np, int channel) { u64 val; nw64(TX_LOG_MASK1(channel), 0); nw64(TX_LOG_VAL1(channel), 0); nw64(TX_LOG_MASK2(channel), 0); nw64(TX_LOG_VAL2(channel), 0); nw64(TX_LOG_PAGE_RELO1(channel), 0); nw64(TX_LOG_PAGE_RELO2(channel), 0); nw64(TX_LOG_PAGE_HDL(channel), 0); val = (u64)np->port << TX_LOG_PAGE_VLD_FUNC_SHIFT; val |= (TX_LOG_PAGE_VLD_PAGE0 | TX_LOG_PAGE_VLD_PAGE1); nw64(TX_LOG_PAGE_VLD(channel), val); /* XXX TXDMA 32bit mode? XXX */ return 0; } static void niu_txc_enable_port(struct niu *np, int on) { unsigned long flags; u64 val, mask; niu_lock_parent(np, flags); val = nr64(TXC_CONTROL); mask = (u64)1 << np->port; if (on) { val |= TXC_CONTROL_ENABLE | mask; } else { val &= ~mask; if ((val & ~TXC_CONTROL_ENABLE) == 0) val &= ~TXC_CONTROL_ENABLE; } nw64(TXC_CONTROL, val); niu_unlock_parent(np, flags); } static void niu_txc_set_imask(struct niu *np, u64 imask) { unsigned long flags; u64 val; niu_lock_parent(np, flags); val = nr64(TXC_INT_MASK); val &= ~TXC_INT_MASK_VAL(np->port); val |= (imask << TXC_INT_MASK_VAL_SHIFT(np->port)); niu_unlock_parent(np, flags); } static void niu_txc_port_dma_enable(struct niu *np, int on) { u64 val = 0; if (on) { int i; for (i = 0; i < np->num_tx_rings; i++) val |= (1 << np->tx_rings[i].tx_channel); } nw64(TXC_PORT_DMA(np->port), val); } static int niu_init_one_tx_channel(struct niu *np, struct tx_ring_info *rp) { int err, channel = rp->tx_channel; u64 val, ring_len; err = niu_tx_channel_stop(np, channel); if (err) return err; err = niu_tx_channel_reset(np, channel); if (err) return err; err = niu_tx_channel_lpage_init(np, channel); if (err) return err; nw64(TXC_DMA_MAX(channel), rp->max_burst); nw64(TX_ENT_MSK(channel), 0); if (rp->descr_dma & ~(TX_RNG_CFIG_STADDR_BASE | TX_RNG_CFIG_STADDR)) { netdev_err(np->dev, "TX ring channel %d DMA addr (%llx) is not aligned\n", channel, (unsigned long long)rp->descr_dma); return -EINVAL; } /* The length field in TX_RNG_CFIG is measured in 64-byte * blocks. rp->pending is the number of TX descriptors in * our ring, 8 bytes each, thus we divide by 8 bytes more * to get the proper value the chip wants. */ ring_len = (rp->pending / 8); val = ((ring_len << TX_RNG_CFIG_LEN_SHIFT) | rp->descr_dma); nw64(TX_RNG_CFIG(channel), val); if (((rp->mbox_dma >> 32) & ~TXDMA_MBH_MBADDR) || ((u32)rp->mbox_dma & ~TXDMA_MBL_MBADDR)) { netdev_err(np->dev, "TX ring channel %d MBOX addr (%llx) has invalid bits\n", channel, (unsigned long long)rp->mbox_dma); return -EINVAL; } nw64(TXDMA_MBH(channel), rp->mbox_dma >> 32); nw64(TXDMA_MBL(channel), rp->mbox_dma & TXDMA_MBL_MBADDR); nw64(TX_CS(channel), 0); rp->last_pkt_cnt = 0; return 0; } static void niu_init_rdc_groups(struct niu *np) { struct niu_rdc_tables *tp = &np->parent->rdc_group_cfg[np->port]; int i, first_table_num = tp->first_table_num; for (i = 0; i < tp->num_tables; i++) { struct rdc_table *tbl = &tp->tables[i]; int this_table = first_table_num + i; int slot; for (slot = 0; slot < NIU_RDC_TABLE_SLOTS; slot++) nw64(RDC_TBL(this_table, slot), tbl->rxdma_channel[slot]); } nw64(DEF_RDC(np->port), np->parent->rdc_default[np->port]); } static void niu_init_drr_weight(struct niu *np) { int type = phy_decode(np->parent->port_phy, np->port); u64 val; switch (type) { case PORT_TYPE_10G: val = PT_DRR_WEIGHT_DEFAULT_10G; break; case PORT_TYPE_1G: default: val = PT_DRR_WEIGHT_DEFAULT_1G; break; } nw64(PT_DRR_WT(np->port), val); } static int niu_init_hostinfo(struct niu *np) { struct niu_parent *parent = np->parent; struct niu_rdc_tables *tp = &parent->rdc_group_cfg[np->port]; int i, err, num_alt = niu_num_alt_addr(np); int first_rdc_table = tp->first_table_num; err = niu_set_primary_mac_rdc_table(np, first_rdc_table, 1); if (err) return err; err = niu_set_multicast_mac_rdc_table(np, first_rdc_table, 1); if (err) return err; for (i = 0; i < num_alt; i++) { err = niu_set_alt_mac_rdc_table(np, i, first_rdc_table, 1); if (err) return err; } return 0; } static int niu_rx_channel_reset(struct niu *np, int channel) { return niu_set_and_wait_clear(np, RXDMA_CFIG1(channel), RXDMA_CFIG1_RST, 1000, 10, "RXDMA_CFIG1"); } static int niu_rx_channel_lpage_init(struct niu *np, int channel) { u64 val; nw64(RX_LOG_MASK1(channel), 0); nw64(RX_LOG_VAL1(channel), 0); nw64(RX_LOG_MASK2(channel), 0); nw64(RX_LOG_VAL2(channel), 0); nw64(RX_LOG_PAGE_RELO1(channel), 0); nw64(RX_LOG_PAGE_RELO2(channel), 0); nw64(RX_LOG_PAGE_HDL(channel), 0); val = (u64)np->port << RX_LOG_PAGE_VLD_FUNC_SHIFT; val |= (RX_LOG_PAGE_VLD_PAGE0 | RX_LOG_PAGE_VLD_PAGE1); nw64(RX_LOG_PAGE_VLD(channel), val); return 0; } static void niu_rx_channel_wred_init(struct niu *np, struct rx_ring_info *rp) { u64 val; val = (((u64)rp->nonsyn_window << RDC_RED_PARA_WIN_SHIFT) | ((u64)rp->nonsyn_threshold << RDC_RED_PARA_THRE_SHIFT) | ((u64)rp->syn_window << RDC_RED_PARA_WIN_SYN_SHIFT) | ((u64)rp->syn_threshold << RDC_RED_PARA_THRE_SYN_SHIFT)); nw64(RDC_RED_PARA(rp->rx_channel), val); } static int niu_compute_rbr_cfig_b(struct rx_ring_info *rp, u64 *ret) { u64 val = 0; *ret = 0; switch (rp->rbr_block_size) { case 4 * 1024: val |= (RBR_BLKSIZE_4K << RBR_CFIG_B_BLKSIZE_SHIFT); break; case 8 * 1024: val |= (RBR_BLKSIZE_8K << RBR_CFIG_B_BLKSIZE_SHIFT); break; case 16 * 1024: val |= (RBR_BLKSIZE_16K << RBR_CFIG_B_BLKSIZE_SHIFT); break; case 32 * 1024: val |= (RBR_BLKSIZE_32K << RBR_CFIG_B_BLKSIZE_SHIFT); break; default: return -EINVAL; } val |= RBR_CFIG_B_VLD2; switch (rp->rbr_sizes[2]) { case 2 * 1024: val |= (RBR_BUFSZ2_2K << RBR_CFIG_B_BUFSZ2_SHIFT); break; case 4 * 1024: val |= (RBR_BUFSZ2_4K << RBR_CFIG_B_BUFSZ2_SHIFT); break; case 8 * 1024: val |= (RBR_BUFSZ2_8K << RBR_CFIG_B_BUFSZ2_SHIFT); break; case 16 * 1024: val |= (RBR_BUFSZ2_16K << RBR_CFIG_B_BUFSZ2_SHIFT); break; default: return -EINVAL; } val |= RBR_CFIG_B_VLD1; switch (rp->rbr_sizes[1]) { case 1 * 1024: val |= (RBR_BUFSZ1_1K << RBR_CFIG_B_BUFSZ1_SHIFT); break; case 2 * 1024: val |= (RBR_BUFSZ1_2K << RBR_CFIG_B_BUFSZ1_SHIFT); break; case 4 * 1024: val |= (RBR_BUFSZ1_4K << RBR_CFIG_B_BUFSZ1_SHIFT); break; case 8 * 1024: val |= (RBR_BUFSZ1_8K << RBR_CFIG_B_BUFSZ1_SHIFT); break; default: return -EINVAL; } val |= RBR_CFIG_B_VLD0; switch (rp->rbr_sizes[0]) { case 256: val |= (RBR_BUFSZ0_256 << RBR_CFIG_B_BUFSZ0_SHIFT); break; case 512: val |= (RBR_BUFSZ0_512 << RBR_CFIG_B_BUFSZ0_SHIFT); break; case 1 * 1024: val |= (RBR_BUFSZ0_1K << RBR_CFIG_B_BUFSZ0_SHIFT); break; case 2 * 1024: val |= (RBR_BUFSZ0_2K << RBR_CFIG_B_BUFSZ0_SHIFT); break; default: return -EINVAL; } *ret = val; return 0; } static int niu_enable_rx_channel(struct niu *np, int channel, int on) { u64 val = nr64(RXDMA_CFIG1(channel)); int limit; if (on) val |= RXDMA_CFIG1_EN; else val &= ~RXDMA_CFIG1_EN; nw64(RXDMA_CFIG1(channel), val); limit = 1000; while (--limit > 0) { if (nr64(RXDMA_CFIG1(channel)) & RXDMA_CFIG1_QST) break; udelay(10); } if (limit <= 0) return -ENODEV; return 0; } static int niu_init_one_rx_channel(struct niu *np, struct rx_ring_info *rp) { int err, channel = rp->rx_channel; u64 val; err = niu_rx_channel_reset(np, channel); if (err) return err; err = niu_rx_channel_lpage_init(np, channel); if (err) return err; niu_rx_channel_wred_init(np, rp); nw64(RX_DMA_ENT_MSK(channel), RX_DMA_ENT_MSK_RBR_EMPTY); nw64(RX_DMA_CTL_STAT(channel), (RX_DMA_CTL_STAT_MEX | RX_DMA_CTL_STAT_RCRTHRES | RX_DMA_CTL_STAT_RCRTO | RX_DMA_CTL_STAT_RBR_EMPTY)); nw64(RXDMA_CFIG1(channel), rp->mbox_dma >> 32); nw64(RXDMA_CFIG2(channel), ((rp->mbox_dma & RXDMA_CFIG2_MBADDR_L) | RXDMA_CFIG2_FULL_HDR)); nw64(RBR_CFIG_A(channel), ((u64)rp->rbr_table_size << RBR_CFIG_A_LEN_SHIFT) | (rp->rbr_dma & (RBR_CFIG_A_STADDR_BASE | RBR_CFIG_A_STADDR))); err = niu_compute_rbr_cfig_b(rp, &val); if (err) return err; nw64(RBR_CFIG_B(channel), val); nw64(RCRCFIG_A(channel), ((u64)rp->rcr_table_size << RCRCFIG_A_LEN_SHIFT) | (rp->rcr_dma & (RCRCFIG_A_STADDR_BASE | RCRCFIG_A_STADDR))); nw64(RCRCFIG_B(channel), ((u64)rp->rcr_pkt_threshold << RCRCFIG_B_PTHRES_SHIFT) | RCRCFIG_B_ENTOUT | ((u64)rp->rcr_timeout << RCRCFIG_B_TIMEOUT_SHIFT)); err = niu_enable_rx_channel(np, channel, 1); if (err) return err; nw64(RBR_KICK(channel), rp->rbr_index); val = nr64(RX_DMA_CTL_STAT(channel)); val |= RX_DMA_CTL_STAT_RBR_EMPTY; nw64(RX_DMA_CTL_STAT(channel), val); return 0; } static int niu_init_rx_channels(struct niu *np) { unsigned long flags; u64 seed = jiffies_64; int err, i; niu_lock_parent(np, flags); nw64(RX_DMA_CK_DIV, np->parent->rxdma_clock_divider); nw64(RED_RAN_INIT, RED_RAN_INIT_OPMODE | (seed & RED_RAN_INIT_VAL)); niu_unlock_parent(np, flags); /* XXX RXDMA 32bit mode? XXX */ niu_init_rdc_groups(np); niu_init_drr_weight(np); err = niu_init_hostinfo(np); if (err) return err; for (i = 0; i < np->num_rx_rings; i++) { struct rx_ring_info *rp = &np->rx_rings[i]; err = niu_init_one_rx_channel(np, rp); if (err) return err; } return 0; } static int niu_set_ip_frag_rule(struct niu *np) { struct niu_parent *parent = np->parent; struct niu_classifier *cp = &np->clas; struct niu_tcam_entry *tp; int index, err; index = cp->tcam_top; tp = &parent->tcam[index]; /* Note that the noport bit is the same in both ipv4 and * ipv6 format TCAM entries. */ memset(tp, 0, sizeof(*tp)); tp->key[1] = TCAM_V4KEY1_NOPORT; tp->key_mask[1] = TCAM_V4KEY1_NOPORT; tp->assoc_data = (TCAM_ASSOCDATA_TRES_USE_OFFSET | ((u64)0 << TCAM_ASSOCDATA_OFFSET_SHIFT)); err = tcam_write(np, index, tp->key, tp->key_mask); if (err) return err; err = tcam_assoc_write(np, index, tp->assoc_data); if (err) return err; tp->valid = 1; cp->tcam_valid_entries++; return 0; } static int niu_init_classifier_hw(struct niu *np) { struct niu_parent *parent = np->parent; struct niu_classifier *cp = &np->clas; int i, err; nw64(H1POLY, cp->h1_init); nw64(H2POLY, cp->h2_init); err = niu_init_hostinfo(np); if (err) return err; for (i = 0; i < ENET_VLAN_TBL_NUM_ENTRIES; i++) { struct niu_vlan_rdc *vp = &cp->vlan_mappings[i]; vlan_tbl_write(np, i, np->port, vp->vlan_pref, vp->rdc_num); } for (i = 0; i < cp->num_alt_mac_mappings; i++) { struct niu_altmac_rdc *ap = &cp->alt_mac_mappings[i]; err = niu_set_alt_mac_rdc_table(np, ap->alt_mac_num, ap->rdc_num, ap->mac_pref); if (err) return err; } for (i = CLASS_CODE_USER_PROG1; i <= CLASS_CODE_SCTP_IPV6; i++) { int index = i - CLASS_CODE_USER_PROG1; err = niu_set_tcam_key(np, i, parent->tcam_key[index]); if (err) return err; err = niu_set_flow_key(np, i, parent->flow_key[index]); if (err) return err; } err = niu_set_ip_frag_rule(np); if (err) return err; tcam_enable(np, 1); return 0; } static int niu_zcp_write(struct niu *np, int index, u64 *data) { nw64(ZCP_RAM_DATA0, data[0]); nw64(ZCP_RAM_DATA1, data[1]); nw64(ZCP_RAM_DATA2, data[2]); nw64(ZCP_RAM_DATA3, data[3]); nw64(ZCP_RAM_DATA4, data[4]); nw64(ZCP_RAM_BE, ZCP_RAM_BE_VAL); nw64(ZCP_RAM_ACC, (ZCP_RAM_ACC_WRITE | (0 << ZCP_RAM_ACC_ZFCID_SHIFT) | (ZCP_RAM_SEL_CFIFO(np->port) << ZCP_RAM_ACC_RAM_SEL_SHIFT))); return niu_wait_bits_clear(np, ZCP_RAM_ACC, ZCP_RAM_ACC_BUSY, 1000, 100); } static int niu_zcp_read(struct niu *np, int index, u64 *data) { int err; err = niu_wait_bits_clear(np, ZCP_RAM_ACC, ZCP_RAM_ACC_BUSY, 1000, 100); if (err) { netdev_err(np->dev, "ZCP read busy won't clear, ZCP_RAM_ACC[%llx]\n", (unsigned long long)nr64(ZCP_RAM_ACC)); return err; } nw64(ZCP_RAM_ACC, (ZCP_RAM_ACC_READ | (0 << ZCP_RAM_ACC_ZFCID_SHIFT) | (ZCP_RAM_SEL_CFIFO(np->port) << ZCP_RAM_ACC_RAM_SEL_SHIFT))); err = niu_wait_bits_clear(np, ZCP_RAM_ACC, ZCP_RAM_ACC_BUSY, 1000, 100); if (err) { netdev_err(np->dev, "ZCP read busy2 won't clear, ZCP_RAM_ACC[%llx]\n", (unsigned long long)nr64(ZCP_RAM_ACC)); return err; } data[0] = nr64(ZCP_RAM_DATA0); data[1] = nr64(ZCP_RAM_DATA1); data[2] = nr64(ZCP_RAM_DATA2); data[3] = nr64(ZCP_RAM_DATA3); data[4] = nr64(ZCP_RAM_DATA4); return 0; } static void niu_zcp_cfifo_reset(struct niu *np) { u64 val = nr64(RESET_CFIFO); val |= RESET_CFIFO_RST(np->port); nw64(RESET_CFIFO, val); udelay(10); val &= ~RESET_CFIFO_RST(np->port); nw64(RESET_CFIFO, val); } static int niu_init_zcp(struct niu *np) { u64 data[5], rbuf[5]; int i, max, err; if (np->parent->plat_type != PLAT_TYPE_NIU) { if (np->port == 0 || np->port == 1) max = ATLAS_P0_P1_CFIFO_ENTRIES; else max = ATLAS_P2_P3_CFIFO_ENTRIES; } else max = NIU_CFIFO_ENTRIES; data[0] = 0; data[1] = 0; data[2] = 0; data[3] = 0; data[4] = 0; for (i = 0; i < max; i++) { err = niu_zcp_write(np, i, data); if (err) return err; err = niu_zcp_read(np, i, rbuf); if (err) return err; } niu_zcp_cfifo_reset(np); nw64(CFIFO_ECC(np->port), 0); nw64(ZCP_INT_STAT, ZCP_INT_STAT_ALL); (void) nr64(ZCP_INT_STAT); nw64(ZCP_INT_MASK, ZCP_INT_MASK_ALL); return 0; } static void niu_ipp_write(struct niu *np, int index, u64 *data) { u64 val = nr64_ipp(IPP_CFIG); nw64_ipp(IPP_CFIG, val | IPP_CFIG_DFIFO_PIO_W); nw64_ipp(IPP_DFIFO_WR_PTR, index); nw64_ipp(IPP_DFIFO_WR0, data[0]); nw64_ipp(IPP_DFIFO_WR1, data[1]); nw64_ipp(IPP_DFIFO_WR2, data[2]); nw64_ipp(IPP_DFIFO_WR3, data[3]); nw64_ipp(IPP_DFIFO_WR4, data[4]); nw64_ipp(IPP_CFIG, val & ~IPP_CFIG_DFIFO_PIO_W); } static void niu_ipp_read(struct niu *np, int index, u64 *data) { nw64_ipp(IPP_DFIFO_RD_PTR, index); data[0] = nr64_ipp(IPP_DFIFO_RD0); data[1] = nr64_ipp(IPP_DFIFO_RD1); data[2] = nr64_ipp(IPP_DFIFO_RD2); data[3] = nr64_ipp(IPP_DFIFO_RD3); data[4] = nr64_ipp(IPP_DFIFO_RD4); } static int niu_ipp_reset(struct niu *np) { return niu_set_and_wait_clear_ipp(np, IPP_CFIG, IPP_CFIG_SOFT_RST, 1000, 100, "IPP_CFIG"); } static int niu_init_ipp(struct niu *np) { u64 data[5], rbuf[5], val; int i, max, err; if (np->parent->plat_type != PLAT_TYPE_NIU) { if (np->port == 0 || np->port == 1) max = ATLAS_P0_P1_DFIFO_ENTRIES; else max = ATLAS_P2_P3_DFIFO_ENTRIES; } else max = NIU_DFIFO_ENTRIES; data[0] = 0; data[1] = 0; data[2] = 0; data[3] = 0; data[4] = 0; for (i = 0; i < max; i++) { niu_ipp_write(np, i, data); niu_ipp_read(np, i, rbuf); } (void) nr64_ipp(IPP_INT_STAT); (void) nr64_ipp(IPP_INT_STAT); err = niu_ipp_reset(np); if (err) return err; (void) nr64_ipp(IPP_PKT_DIS); (void) nr64_ipp(IPP_BAD_CS_CNT); (void) nr64_ipp(IPP_ECC); (void) nr64_ipp(IPP_INT_STAT); nw64_ipp(IPP_MSK, ~IPP_MSK_ALL); val = nr64_ipp(IPP_CFIG); val &= ~IPP_CFIG_IP_MAX_PKT; val |= (IPP_CFIG_IPP_ENABLE | IPP_CFIG_DFIFO_ECC_EN | IPP_CFIG_DROP_BAD_CRC | IPP_CFIG_CKSUM_EN | (0x1ffff << IPP_CFIG_IP_MAX_PKT_SHIFT)); nw64_ipp(IPP_CFIG, val); return 0; } static void niu_handle_led(struct niu *np, int status) { u64 val; val = nr64_mac(XMAC_CONFIG); if ((np->flags & NIU_FLAGS_10G) != 0 && (np->flags & NIU_FLAGS_FIBER) != 0) { if (status) { val |= XMAC_CONFIG_LED_POLARITY; val &= ~XMAC_CONFIG_FORCE_LED_ON; } else { val |= XMAC_CONFIG_FORCE_LED_ON; val &= ~XMAC_CONFIG_LED_POLARITY; } } nw64_mac(XMAC_CONFIG, val); } static void niu_init_xif_xmac(struct niu *np) { struct niu_link_config *lp = &np->link_config; u64 val; if (np->flags & NIU_FLAGS_XCVR_SERDES) { val = nr64(MIF_CONFIG); val |= MIF_CONFIG_ATCA_GE; nw64(MIF_CONFIG, val); } val = nr64_mac(XMAC_CONFIG); val &= ~XMAC_CONFIG_SEL_POR_CLK_SRC; val |= XMAC_CONFIG_TX_OUTPUT_EN; if (lp->loopback_mode == LOOPBACK_MAC) { val &= ~XMAC_CONFIG_SEL_POR_CLK_SRC; val |= XMAC_CONFIG_LOOPBACK; } else { val &= ~XMAC_CONFIG_LOOPBACK; } if (np->flags & NIU_FLAGS_10G) { val &= ~XMAC_CONFIG_LFS_DISABLE; } else { val |= XMAC_CONFIG_LFS_DISABLE; if (!(np->flags & NIU_FLAGS_FIBER) && !(np->flags & NIU_FLAGS_XCVR_SERDES)) val |= XMAC_CONFIG_1G_PCS_BYPASS; else val &= ~XMAC_CONFIG_1G_PCS_BYPASS; } val &= ~XMAC_CONFIG_10G_XPCS_BYPASS; if (lp->active_speed == SPEED_100) val |= XMAC_CONFIG_SEL_CLK_25MHZ; else val &= ~XMAC_CONFIG_SEL_CLK_25MHZ; nw64_mac(XMAC_CONFIG, val); val = nr64_mac(XMAC_CONFIG); val &= ~XMAC_CONFIG_MODE_MASK; if (np->flags & NIU_FLAGS_10G) { val |= XMAC_CONFIG_MODE_XGMII; } else { if (lp->active_speed == SPEED_1000) val |= XMAC_CONFIG_MODE_GMII; else val |= XMAC_CONFIG_MODE_MII; } nw64_mac(XMAC_CONFIG, val); } static void niu_init_xif_bmac(struct niu *np) { struct niu_link_config *lp = &np->link_config; u64 val; val = BMAC_XIF_CONFIG_TX_OUTPUT_EN; if (lp->loopback_mode == LOOPBACK_MAC) val |= BMAC_XIF_CONFIG_MII_LOOPBACK; else val &= ~BMAC_XIF_CONFIG_MII_LOOPBACK; if (lp->active_speed == SPEED_1000) val |= BMAC_XIF_CONFIG_GMII_MODE; else val &= ~BMAC_XIF_CONFIG_GMII_MODE; val &= ~(BMAC_XIF_CONFIG_LINK_LED | BMAC_XIF_CONFIG_LED_POLARITY); if (!(np->flags & NIU_FLAGS_10G) && !(np->flags & NIU_FLAGS_FIBER) && lp->active_speed == SPEED_100) val |= BMAC_XIF_CONFIG_25MHZ_CLOCK; else val &= ~BMAC_XIF_CONFIG_25MHZ_CLOCK; nw64_mac(BMAC_XIF_CONFIG, val); } static void niu_init_xif(struct niu *np) { if (np->flags & NIU_FLAGS_XMAC) niu_init_xif_xmac(np); else niu_init_xif_bmac(np); } static void niu_pcs_mii_reset(struct niu *np) { int limit = 1000; u64 val = nr64_pcs(PCS_MII_CTL); val |= PCS_MII_CTL_RST; nw64_pcs(PCS_MII_CTL, val); while ((--limit >= 0) && (val & PCS_MII_CTL_RST)) { udelay(100); val = nr64_pcs(PCS_MII_CTL); } } static void niu_xpcs_reset(struct niu *np) { int limit = 1000; u64 val = nr64_xpcs(XPCS_CONTROL1); val |= XPCS_CONTROL1_RESET; nw64_xpcs(XPCS_CONTROL1, val); while ((--limit >= 0) && (val & XPCS_CONTROL1_RESET)) { udelay(100); val = nr64_xpcs(XPCS_CONTROL1); } } static int niu_init_pcs(struct niu *np) { struct niu_link_config *lp = &np->link_config; u64 val; switch (np->flags & (NIU_FLAGS_10G | NIU_FLAGS_FIBER | NIU_FLAGS_XCVR_SERDES)) { case NIU_FLAGS_FIBER: /* 1G fiber */ nw64_pcs(PCS_CONF, PCS_CONF_MASK | PCS_CONF_ENABLE); nw64_pcs(PCS_DPATH_MODE, 0); niu_pcs_mii_reset(np); break; case NIU_FLAGS_10G: case NIU_FLAGS_10G | NIU_FLAGS_FIBER: case NIU_FLAGS_10G | NIU_FLAGS_XCVR_SERDES: /* 10G SERDES */ if (!(np->flags & NIU_FLAGS_XMAC)) return -EINVAL; /* 10G copper or fiber */ val = nr64_mac(XMAC_CONFIG); val &= ~XMAC_CONFIG_10G_XPCS_BYPASS; nw64_mac(XMAC_CONFIG, val); niu_xpcs_reset(np); val = nr64_xpcs(XPCS_CONTROL1); if (lp->loopback_mode == LOOPBACK_PHY) val |= XPCS_CONTROL1_LOOPBACK; else val &= ~XPCS_CONTROL1_LOOPBACK; nw64_xpcs(XPCS_CONTROL1, val); nw64_xpcs(XPCS_DESKEW_ERR_CNT, 0); (void) nr64_xpcs(XPCS_SYMERR_CNT01); (void) nr64_xpcs(XPCS_SYMERR_CNT23); break; case NIU_FLAGS_XCVR_SERDES: /* 1G SERDES */ niu_pcs_mii_reset(np); nw64_pcs(PCS_CONF, PCS_CONF_MASK | PCS_CONF_ENABLE); nw64_pcs(PCS_DPATH_MODE, 0); break; case 0: /* 1G copper */ case NIU_FLAGS_XCVR_SERDES | NIU_FLAGS_FIBER: /* 1G RGMII FIBER */ nw64_pcs(PCS_DPATH_MODE, PCS_DPATH_MODE_MII); niu_pcs_mii_reset(np); break; default: return -EINVAL; } return 0; } static int niu_reset_tx_xmac(struct niu *np) { return niu_set_and_wait_clear_mac(np, XTXMAC_SW_RST, (XTXMAC_SW_RST_REG_RS | XTXMAC_SW_RST_SOFT_RST), 1000, 100, "XTXMAC_SW_RST"); } static int niu_reset_tx_bmac(struct niu *np) { int limit; nw64_mac(BTXMAC_SW_RST, BTXMAC_SW_RST_RESET); limit = 1000; while (--limit >= 0) { if (!(nr64_mac(BTXMAC_SW_RST) & BTXMAC_SW_RST_RESET)) break; udelay(100); } if (limit < 0) { dev_err(np->device, "Port %u TX BMAC would not reset, BTXMAC_SW_RST[%llx]\n", np->port, (unsigned long long) nr64_mac(BTXMAC_SW_RST)); return -ENODEV; } return 0; } static int niu_reset_tx_mac(struct niu *np) { if (np->flags & NIU_FLAGS_XMAC) return niu_reset_tx_xmac(np); else return niu_reset_tx_bmac(np); } static void niu_init_tx_xmac(struct niu *np, u64 min, u64 max) { u64 val; val = nr64_mac(XMAC_MIN); val &= ~(XMAC_MIN_TX_MIN_PKT_SIZE | XMAC_MIN_RX_MIN_PKT_SIZE); val |= (min << XMAC_MIN_RX_MIN_PKT_SIZE_SHFT); val |= (min << XMAC_MIN_TX_MIN_PKT_SIZE_SHFT); nw64_mac(XMAC_MIN, val); nw64_mac(XMAC_MAX, max); nw64_mac(XTXMAC_STAT_MSK, ~(u64)0); val = nr64_mac(XMAC_IPG); if (np->flags & NIU_FLAGS_10G) { val &= ~XMAC_IPG_IPG_XGMII; val |= (IPG_12_15_XGMII << XMAC_IPG_IPG_XGMII_SHIFT); } else { val &= ~XMAC_IPG_IPG_MII_GMII; val |= (IPG_12_MII_GMII << XMAC_IPG_IPG_MII_GMII_SHIFT); } nw64_mac(XMAC_IPG, val); val = nr64_mac(XMAC_CONFIG); val &= ~(XMAC_CONFIG_ALWAYS_NO_CRC | XMAC_CONFIG_STRETCH_MODE | XMAC_CONFIG_VAR_MIN_IPG_EN | XMAC_CONFIG_TX_ENABLE); nw64_mac(XMAC_CONFIG, val); nw64_mac(TXMAC_FRM_CNT, 0); nw64_mac(TXMAC_BYTE_CNT, 0); } static void niu_init_tx_bmac(struct niu *np, u64 min, u64 max) { u64 val; nw64_mac(BMAC_MIN_FRAME, min); nw64_mac(BMAC_MAX_FRAME, max); nw64_mac(BTXMAC_STATUS_MASK, ~(u64)0); nw64_mac(BMAC_CTRL_TYPE, 0x8808); nw64_mac(BMAC_PREAMBLE_SIZE, 7); val = nr64_mac(BTXMAC_CONFIG); val &= ~(BTXMAC_CONFIG_FCS_DISABLE | BTXMAC_CONFIG_ENABLE); nw64_mac(BTXMAC_CONFIG, val); } static void niu_init_tx_mac(struct niu *np) { u64 min, max; min = 64; if (np->dev->mtu > ETH_DATA_LEN) max = 9216; else max = 1522; /* The XMAC_MIN register only accepts values for TX min which * have the low 3 bits cleared. */ BUG_ON(min & 0x7); if (np->flags & NIU_FLAGS_XMAC) niu_init_tx_xmac(np, min, max); else niu_init_tx_bmac(np, min, max); } static int niu_reset_rx_xmac(struct niu *np) { int limit; nw64_mac(XRXMAC_SW_RST, XRXMAC_SW_RST_REG_RS | XRXMAC_SW_RST_SOFT_RST); limit = 1000; while (--limit >= 0) { if (!(nr64_mac(XRXMAC_SW_RST) & (XRXMAC_SW_RST_REG_RS | XRXMAC_SW_RST_SOFT_RST))) break; udelay(100); } if (limit < 0) { dev_err(np->device, "Port %u RX XMAC would not reset, XRXMAC_SW_RST[%llx]\n", np->port, (unsigned long long) nr64_mac(XRXMAC_SW_RST)); return -ENODEV; } return 0; } static int niu_reset_rx_bmac(struct niu *np) { int limit; nw64_mac(BRXMAC_SW_RST, BRXMAC_SW_RST_RESET); limit = 1000; while (--limit >= 0) { if (!(nr64_mac(BRXMAC_SW_RST) & BRXMAC_SW_RST_RESET)) break; udelay(100); } if (limit < 0) { dev_err(np->device, "Port %u RX BMAC would not reset, BRXMAC_SW_RST[%llx]\n", np->port, (unsigned long long) nr64_mac(BRXMAC_SW_RST)); return -ENODEV; } return 0; } static int niu_reset_rx_mac(struct niu *np) { if (np->flags & NIU_FLAGS_XMAC) return niu_reset_rx_xmac(np); else return niu_reset_rx_bmac(np); } static void niu_init_rx_xmac(struct niu *np) { struct niu_parent *parent = np->parent; struct niu_rdc_tables *tp = &parent->rdc_group_cfg[np->port]; int first_rdc_table = tp->first_table_num; unsigned long i; u64 val; nw64_mac(XMAC_ADD_FILT0, 0); nw64_mac(XMAC_ADD_FILT1, 0); nw64_mac(XMAC_ADD_FILT2, 0); nw64_mac(XMAC_ADD_FILT12_MASK, 0); nw64_mac(XMAC_ADD_FILT00_MASK, 0); for (i = 0; i < MAC_NUM_HASH; i++) nw64_mac(XMAC_HASH_TBL(i), 0); nw64_mac(XRXMAC_STAT_MSK, ~(u64)0); niu_set_primary_mac_rdc_table(np, first_rdc_table, 1); niu_set_multicast_mac_rdc_table(np, first_rdc_table, 1); val = nr64_mac(XMAC_CONFIG); val &= ~(XMAC_CONFIG_RX_MAC_ENABLE | XMAC_CONFIG_PROMISCUOUS | XMAC_CONFIG_PROMISC_GROUP | XMAC_CONFIG_ERR_CHK_DIS | XMAC_CONFIG_RX_CRC_CHK_DIS | XMAC_CONFIG_RESERVED_MULTICAST | XMAC_CONFIG_RX_CODEV_CHK_DIS | XMAC_CONFIG_ADDR_FILTER_EN | XMAC_CONFIG_RCV_PAUSE_ENABLE | XMAC_CONFIG_STRIP_CRC | XMAC_CONFIG_PASS_FLOW_CTRL | XMAC_CONFIG_MAC2IPP_PKT_CNT_EN); val |= (XMAC_CONFIG_HASH_FILTER_EN); nw64_mac(XMAC_CONFIG, val); nw64_mac(RXMAC_BT_CNT, 0); nw64_mac(RXMAC_BC_FRM_CNT, 0); nw64_mac(RXMAC_MC_FRM_CNT, 0); nw64_mac(RXMAC_FRAG_CNT, 0); nw64_mac(RXMAC_HIST_CNT1, 0); nw64_mac(RXMAC_HIST_CNT2, 0); nw64_mac(RXMAC_HIST_CNT3, 0); nw64_mac(RXMAC_HIST_CNT4, 0); nw64_mac(RXMAC_HIST_CNT5, 0); nw64_mac(RXMAC_HIST_CNT6, 0); nw64_mac(RXMAC_HIST_CNT7, 0); nw64_mac(RXMAC_MPSZER_CNT, 0); nw64_mac(RXMAC_CRC_ER_CNT, 0); nw64_mac(RXMAC_CD_VIO_CNT, 0); nw64_mac(LINK_FAULT_CNT, 0); } static void niu_init_rx_bmac(struct niu *np) { struct niu_parent *parent = np->parent; struct niu_rdc_tables *tp = &parent->rdc_group_cfg[np->port]; int first_rdc_table = tp->first_table_num; unsigned long i; u64 val; nw64_mac(BMAC_ADD_FILT0, 0); nw64_mac(BMAC_ADD_FILT1, 0); nw64_mac(BMAC_ADD_FILT2, 0); nw64_mac(BMAC_ADD_FILT12_MASK, 0); nw64_mac(BMAC_ADD_FILT00_MASK, 0); for (i = 0; i < MAC_NUM_HASH; i++) nw64_mac(BMAC_HASH_TBL(i), 0); niu_set_primary_mac_rdc_table(np, first_rdc_table, 1); niu_set_multicast_mac_rdc_table(np, first_rdc_table, 1); nw64_mac(BRXMAC_STATUS_MASK, ~(u64)0); val = nr64_mac(BRXMAC_CONFIG); val &= ~(BRXMAC_CONFIG_ENABLE | BRXMAC_CONFIG_STRIP_PAD | BRXMAC_CONFIG_STRIP_FCS | BRXMAC_CONFIG_PROMISC | BRXMAC_CONFIG_PROMISC_GRP | BRXMAC_CONFIG_ADDR_FILT_EN | BRXMAC_CONFIG_DISCARD_DIS); val |= (BRXMAC_CONFIG_HASH_FILT_EN); nw64_mac(BRXMAC_CONFIG, val); val = nr64_mac(BMAC_ADDR_CMPEN); val |= BMAC_ADDR_CMPEN_EN0; nw64_mac(BMAC_ADDR_CMPEN, val); } static void niu_init_rx_mac(struct niu *np) { niu_set_primary_mac(np, np->dev->dev_addr); if (np->flags & NIU_FLAGS_XMAC) niu_init_rx_xmac(np); else niu_init_rx_bmac(np); } static void niu_enable_tx_xmac(struct niu *np, int on) { u64 val = nr64_mac(XMAC_CONFIG); if (on) val |= XMAC_CONFIG_TX_ENABLE; else val &= ~XMAC_CONFIG_TX_ENABLE; nw64_mac(XMAC_CONFIG, val); } static void niu_enable_tx_bmac(struct niu *np, int on) { u64 val = nr64_mac(BTXMAC_CONFIG); if (on) val |= BTXMAC_CONFIG_ENABLE; else val &= ~BTXMAC_CONFIG_ENABLE; nw64_mac(BTXMAC_CONFIG, val); } static void niu_enable_tx_mac(struct niu *np, int on) { if (np->flags & NIU_FLAGS_XMAC) niu_enable_tx_xmac(np, on); else niu_enable_tx_bmac(np, on); } static void niu_enable_rx_xmac(struct niu *np, int on) { u64 val = nr64_mac(XMAC_CONFIG); val &= ~(XMAC_CONFIG_HASH_FILTER_EN | XMAC_CONFIG_PROMISCUOUS); if (np->flags & NIU_FLAGS_MCAST) val |= XMAC_CONFIG_HASH_FILTER_EN; if (np->flags & NIU_FLAGS_PROMISC) val |= XMAC_CONFIG_PROMISCUOUS; if (on) val |= XMAC_CONFIG_RX_MAC_ENABLE; else val &= ~XMAC_CONFIG_RX_MAC_ENABLE; nw64_mac(XMAC_CONFIG, val); } static void niu_enable_rx_bmac(struct niu *np, int on) { u64 val = nr64_mac(BRXMAC_CONFIG); val &= ~(BRXMAC_CONFIG_HASH_FILT_EN | BRXMAC_CONFIG_PROMISC); if (np->flags & NIU_FLAGS_MCAST) val |= BRXMAC_CONFIG_HASH_FILT_EN; if (np->flags & NIU_FLAGS_PROMISC) val |= BRXMAC_CONFIG_PROMISC; if (on) val |= BRXMAC_CONFIG_ENABLE; else val &= ~BRXMAC_CONFIG_ENABLE; nw64_mac(BRXMAC_CONFIG, val); } static void niu_enable_rx_mac(struct niu *np, int on) { if (np->flags & NIU_FLAGS_XMAC) niu_enable_rx_xmac(np, on); else niu_enable_rx_bmac(np, on); } static int niu_init_mac(struct niu *np) { int err; niu_init_xif(np); err = niu_init_pcs(np); if (err) return err; err = niu_reset_tx_mac(np); if (err) return err; niu_init_tx_mac(np); err = niu_reset_rx_mac(np); if (err) return err; niu_init_rx_mac(np); /* This looks hookey but the RX MAC reset we just did will * undo some of the state we setup in niu_init_tx_mac() so we * have to call it again. In particular, the RX MAC reset will * set the XMAC_MAX register back to it's default value. */ niu_init_tx_mac(np); niu_enable_tx_mac(np, 1); niu_enable_rx_mac(np, 1); return 0; } static void niu_stop_one_tx_channel(struct niu *np, struct tx_ring_info *rp) { (void) niu_tx_channel_stop(np, rp->tx_channel); } static void niu_stop_tx_channels(struct niu *np) { int i; for (i = 0; i < np->num_tx_rings; i++) { struct tx_ring_info *rp = &np->tx_rings[i]; niu_stop_one_tx_channel(np, rp); } } static void niu_reset_one_tx_channel(struct niu *np, struct tx_ring_info *rp) { (void) niu_tx_channel_reset(np, rp->tx_channel); } static void niu_reset_tx_channels(struct niu *np) { int i; for (i = 0; i < np->num_tx_rings; i++) { struct tx_ring_info *rp = &np->tx_rings[i]; niu_reset_one_tx_channel(np, rp); } } static void niu_stop_one_rx_channel(struct niu *np, struct rx_ring_info *rp) { (void) niu_enable_rx_channel(np, rp->rx_channel, 0); } static void niu_stop_rx_channels(struct niu *np) { int i; for (i = 0; i < np->num_rx_rings; i++) { struct rx_ring_info *rp = &np->rx_rings[i]; niu_stop_one_rx_channel(np, rp); } } static void niu_reset_one_rx_channel(struct niu *np, struct rx_ring_info *rp) { int channel = rp->rx_channel; (void) niu_rx_channel_reset(np, channel); nw64(RX_DMA_ENT_MSK(channel), RX_DMA_ENT_MSK_ALL); nw64(RX_DMA_CTL_STAT(channel), 0); (void) niu_enable_rx_channel(np, channel, 0); } static void niu_reset_rx_channels(struct niu *np) { int i; for (i = 0; i < np->num_rx_rings; i++) { struct rx_ring_info *rp = &np->rx_rings[i]; niu_reset_one_rx_channel(np, rp); } } static void niu_disable_ipp(struct niu *np) { u64 rd, wr, val; int limit; rd = nr64_ipp(IPP_DFIFO_RD_PTR); wr = nr64_ipp(IPP_DFIFO_WR_PTR); limit = 100; while (--limit >= 0 && (rd != wr)) { rd = nr64_ipp(IPP_DFIFO_RD_PTR); wr = nr64_ipp(IPP_DFIFO_WR_PTR); } if (limit < 0 && (rd != 0 && wr != 1)) { netdev_err(np->dev, "IPP would not quiesce, rd_ptr[%llx] wr_ptr[%llx]\n", (unsigned long long)nr64_ipp(IPP_DFIFO_RD_PTR), (unsigned long long)nr64_ipp(IPP_DFIFO_WR_PTR)); } val = nr64_ipp(IPP_CFIG); val &= ~(IPP_CFIG_IPP_ENABLE | IPP_CFIG_DFIFO_ECC_EN | IPP_CFIG_DROP_BAD_CRC | IPP_CFIG_CKSUM_EN); nw64_ipp(IPP_CFIG, val); (void) niu_ipp_reset(np); } static int niu_init_hw(struct niu *np) { int i, err; netif_printk(np, ifup, KERN_DEBUG, np->dev, "Initialize TXC\n"); niu_txc_enable_port(np, 1); niu_txc_port_dma_enable(np, 1); niu_txc_set_imask(np, 0); netif_printk(np, ifup, KERN_DEBUG, np->dev, "Initialize TX channels\n"); for (i = 0; i < np->num_tx_rings; i++) { struct tx_ring_info *rp = &np->tx_rings[i]; err = niu_init_one_tx_channel(np, rp); if (err) return err; } netif_printk(np, ifup, KERN_DEBUG, np->dev, "Initialize RX channels\n"); err = niu_init_rx_channels(np); if (err) goto out_uninit_tx_channels; netif_printk(np, ifup, KERN_DEBUG, np->dev, "Initialize classifier\n"); err = niu_init_classifier_hw(np); if (err) goto out_uninit_rx_channels; netif_printk(np, ifup, KERN_DEBUG, np->dev, "Initialize ZCP\n"); err = niu_init_zcp(np); if (err) goto out_uninit_rx_channels; netif_printk(np, ifup, KERN_DEBUG, np->dev, "Initialize IPP\n"); err = niu_init_ipp(np); if (err) goto out_uninit_rx_channels; netif_printk(np, ifup, KERN_DEBUG, np->dev, "Initialize MAC\n"); err = niu_init_mac(np); if (err) goto out_uninit_ipp; return 0; out_uninit_ipp: netif_printk(np, ifup, KERN_DEBUG, np->dev, "Uninit IPP\n"); niu_disable_ipp(np); out_uninit_rx_channels: netif_printk(np, ifup, KERN_DEBUG, np->dev, "Uninit RX channels\n"); niu_stop_rx_channels(np); niu_reset_rx_channels(np); out_uninit_tx_channels: netif_printk(np, ifup, KERN_DEBUG, np->dev, "Uninit TX channels\n"); niu_stop_tx_channels(np); niu_reset_tx_channels(np); return err; } static void niu_stop_hw(struct niu *np) { netif_printk(np, ifdown, KERN_DEBUG, np->dev, "Disable interrupts\n"); niu_enable_interrupts(np, 0); netif_printk(np, ifdown, KERN_DEBUG, np->dev, "Disable RX MAC\n"); niu_enable_rx_mac(np, 0); netif_printk(np, ifdown, KERN_DEBUG, np->dev, "Disable IPP\n"); niu_disable_ipp(np); netif_printk(np, ifdown, KERN_DEBUG, np->dev, "Stop TX channels\n"); niu_stop_tx_channels(np); netif_printk(np, ifdown, KERN_DEBUG, np->dev, "Stop RX channels\n"); niu_stop_rx_channels(np); netif_printk(np, ifdown, KERN_DEBUG, np->dev, "Reset TX channels\n"); niu_reset_tx_channels(np); netif_printk(np, ifdown, KERN_DEBUG, np->dev, "Reset RX channels\n"); niu_reset_rx_channels(np); } static void niu_set_irq_name(struct niu *np) { int port = np->port; int i, j = 1; sprintf(np->irq_name[0], "%s:MAC", np->dev->name); if (port == 0) { sprintf(np->irq_name[1], "%s:MIF", np->dev->name); sprintf(np->irq_name[2], "%s:SYSERR", np->dev->name); j = 3; } for (i = 0; i < np->num_ldg - j; i++) { if (i < np->num_rx_rings) sprintf(np->irq_name[i+j], "%s-rx-%d", np->dev->name, i); else if (i < np->num_tx_rings + np->num_rx_rings) sprintf(np->irq_name[i+j], "%s-tx-%d", np->dev->name, i - np->num_rx_rings); } } static int niu_request_irq(struct niu *np) { int i, j, err; niu_set_irq_name(np); err = 0; for (i = 0; i < np->num_ldg; i++) { struct niu_ldg *lp = &np->ldg[i]; err = request_irq(lp->irq, niu_interrupt, IRQF_SHARED, np->irq_name[i], lp); if (err) goto out_free_irqs; } return 0; out_free_irqs: for (j = 0; j < i; j++) { struct niu_ldg *lp = &np->ldg[j]; free_irq(lp->irq, lp); } return err; } static void niu_free_irq(struct niu *np) { int i; for (i = 0; i < np->num_ldg; i++) { struct niu_ldg *lp = &np->ldg[i]; free_irq(lp->irq, lp); } } static void niu_enable_napi(struct niu *np) { int i; for (i = 0; i < np->num_ldg; i++) napi_enable(&np->ldg[i].napi); } static void niu_disable_napi(struct niu *np) { int i; for (i = 0; i < np->num_ldg; i++) napi_disable(&np->ldg[i].napi); } static int niu_open(struct net_device *dev) { struct niu *np = netdev_priv(dev); int err; netif_carrier_off(dev); err = niu_alloc_channels(np); if (err) goto out_err; err = niu_enable_interrupts(np, 0); if (err) goto out_free_channels; err = niu_request_irq(np); if (err) goto out_free_channels; niu_enable_napi(np); spin_lock_irq(&np->lock); err = niu_init_hw(np); if (!err) { timer_setup(&np->timer, niu_timer, 0); np->timer.expires = jiffies + HZ; err = niu_enable_interrupts(np, 1); if (err) niu_stop_hw(np); } spin_unlock_irq(&np->lock); if (err) { niu_disable_napi(np); goto out_free_irq; } netif_tx_start_all_queues(dev); if (np->link_config.loopback_mode != LOOPBACK_DISABLED) netif_carrier_on(dev); add_timer(&np->timer); return 0; out_free_irq: niu_free_irq(np); out_free_channels: niu_free_channels(np); out_err: return err; } static void niu_full_shutdown(struct niu *np, struct net_device *dev) { cancel_work_sync(&np->reset_task); niu_disable_napi(np); netif_tx_stop_all_queues(dev); del_timer_sync(&np->timer); spin_lock_irq(&np->lock); niu_stop_hw(np); spin_unlock_irq(&np->lock); } static int niu_close(struct net_device *dev) { struct niu *np = netdev_priv(dev); niu_full_shutdown(np, dev); niu_free_irq(np); niu_free_channels(np); niu_handle_led(np, 0); return 0; } static void niu_sync_xmac_stats(struct niu *np) { struct niu_xmac_stats *mp = &np->mac_stats.xmac; mp->tx_frames += nr64_mac(TXMAC_FRM_CNT); mp->tx_bytes += nr64_mac(TXMAC_BYTE_CNT); mp->rx_link_faults += nr64_mac(LINK_FAULT_CNT); mp->rx_align_errors += nr64_mac(RXMAC_ALIGN_ERR_CNT); mp->rx_frags += nr64_mac(RXMAC_FRAG_CNT); mp->rx_mcasts += nr64_mac(RXMAC_MC_FRM_CNT); mp->rx_bcasts += nr64_mac(RXMAC_BC_FRM_CNT); mp->rx_hist_cnt1 += nr64_mac(RXMAC_HIST_CNT1); mp->rx_hist_cnt2 += nr64_mac(RXMAC_HIST_CNT2); mp->rx_hist_cnt3 += nr64_mac(RXMAC_HIST_CNT3); mp->rx_hist_cnt4 += nr64_mac(RXMAC_HIST_CNT4); mp->rx_hist_cnt5 += nr64_mac(RXMAC_HIST_CNT5); mp->rx_hist_cnt6 += nr64_mac(RXMAC_HIST_CNT6); mp->rx_hist_cnt7 += nr64_mac(RXMAC_HIST_CNT7); mp->rx_octets += nr64_mac(RXMAC_BT_CNT); mp->rx_code_violations += nr64_mac(RXMAC_CD_VIO_CNT); mp->rx_len_errors += nr64_mac(RXMAC_MPSZER_CNT); mp->rx_crc_errors += nr64_mac(RXMAC_CRC_ER_CNT); } static void niu_sync_bmac_stats(struct niu *np) { struct niu_bmac_stats *mp = &np->mac_stats.bmac; mp->tx_bytes += nr64_mac(BTXMAC_BYTE_CNT); mp->tx_frames += nr64_mac(BTXMAC_FRM_CNT); mp->rx_frames += nr64_mac(BRXMAC_FRAME_CNT); mp->rx_align_errors += nr64_mac(BRXMAC_ALIGN_ERR_CNT); mp->rx_crc_errors += nr64_mac(BRXMAC_ALIGN_ERR_CNT); mp->rx_len_errors += nr64_mac(BRXMAC_CODE_VIOL_ERR_CNT); } static void niu_sync_mac_stats(struct niu *np) { if (np->flags & NIU_FLAGS_XMAC) niu_sync_xmac_stats(np); else niu_sync_bmac_stats(np); } static void niu_get_rx_stats(struct niu *np, struct rtnl_link_stats64 *stats) { u64 pkts, dropped, errors, bytes; struct rx_ring_info *rx_rings; int i; pkts = dropped = errors = bytes = 0; rx_rings = READ_ONCE(np->rx_rings); if (!rx_rings) goto no_rings; for (i = 0; i < np->num_rx_rings; i++) { struct rx_ring_info *rp = &rx_rings[i]; niu_sync_rx_discard_stats(np, rp, 0); pkts += rp->rx_packets; bytes += rp->rx_bytes; dropped += rp->rx_dropped; errors += rp->rx_errors; } no_rings: stats->rx_packets = pkts; stats->rx_bytes = bytes; stats->rx_dropped = dropped; stats->rx_errors = errors; } static void niu_get_tx_stats(struct niu *np, struct rtnl_link_stats64 *stats) { u64 pkts, errors, bytes; struct tx_ring_info *tx_rings; int i; pkts = errors = bytes = 0; tx_rings = READ_ONCE(np->tx_rings); if (!tx_rings) goto no_rings; for (i = 0; i < np->num_tx_rings; i++) { struct tx_ring_info *rp = &tx_rings[i]; pkts += rp->tx_packets; bytes += rp->tx_bytes; errors += rp->tx_errors; } no_rings: stats->tx_packets = pkts; stats->tx_bytes = bytes; stats->tx_errors = errors; } static void niu_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats) { struct niu *np = netdev_priv(dev); if (netif_running(dev)) { niu_get_rx_stats(np, stats); niu_get_tx_stats(np, stats); } } static void niu_load_hash_xmac(struct niu *np, u16 *hash) { int i; for (i = 0; i < 16; i++) nw64_mac(XMAC_HASH_TBL(i), hash[i]); } static void niu_load_hash_bmac(struct niu *np, u16 *hash) { int i; for (i = 0; i < 16; i++) nw64_mac(BMAC_HASH_TBL(i), hash[i]); } static void niu_load_hash(struct niu *np, u16 *hash) { if (np->flags & NIU_FLAGS_XMAC) niu_load_hash_xmac(np, hash); else niu_load_hash_bmac(np, hash); } static void niu_set_rx_mode(struct net_device *dev) { struct niu *np = netdev_priv(dev); int i, alt_cnt, err; struct netdev_hw_addr *ha; unsigned long flags; u16 hash[16] = { 0, }; spin_lock_irqsave(&np->lock, flags); niu_enable_rx_mac(np, 0); np->flags &= ~(NIU_FLAGS_MCAST | NIU_FLAGS_PROMISC); if (dev->flags & IFF_PROMISC) np->flags |= NIU_FLAGS_PROMISC; if ((dev->flags & IFF_ALLMULTI) || (!netdev_mc_empty(dev))) np->flags |= NIU_FLAGS_MCAST; alt_cnt = netdev_uc_count(dev); if (alt_cnt > niu_num_alt_addr(np)) { alt_cnt = 0; np->flags |= NIU_FLAGS_PROMISC; } if (alt_cnt) { int index = 0; netdev_for_each_uc_addr(ha, dev) { err = niu_set_alt_mac(np, index, ha->addr); if (err) netdev_warn(dev, "Error %d adding alt mac %d\n", err, index); err = niu_enable_alt_mac(np, index, 1); if (err) netdev_warn(dev, "Error %d enabling alt mac %d\n", err, index); index++; } } else { int alt_start; if (np->flags & NIU_FLAGS_XMAC) alt_start = 0; else alt_start = 1; for (i = alt_start; i < niu_num_alt_addr(np); i++) { err = niu_enable_alt_mac(np, i, 0); if (err) netdev_warn(dev, "Error %d disabling alt mac %d\n", err, i); } } if (dev->flags & IFF_ALLMULTI) { for (i = 0; i < 16; i++) hash[i] = 0xffff; } else if (!netdev_mc_empty(dev)) { netdev_for_each_mc_addr(ha, dev) { u32 crc = ether_crc_le(ETH_ALEN, ha->addr); crc >>= 24; hash[crc >> 4] |= (1 << (15 - (crc & 0xf))); } } if (np->flags & NIU_FLAGS_MCAST) niu_load_hash(np, hash); niu_enable_rx_mac(np, 1); spin_unlock_irqrestore(&np->lock, flags); } static int niu_set_mac_addr(struct net_device *dev, void *p) { struct niu *np = netdev_priv(dev); struct sockaddr *addr = p; unsigned long flags; if (!is_valid_ether_addr(addr->sa_data)) return -EADDRNOTAVAIL; eth_hw_addr_set(dev, addr->sa_data); if (!netif_running(dev)) return 0; spin_lock_irqsave(&np->lock, flags); niu_enable_rx_mac(np, 0); niu_set_primary_mac(np, dev->dev_addr); niu_enable_rx_mac(np, 1); spin_unlock_irqrestore(&np->lock, flags); return 0; } static int niu_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) { return -EOPNOTSUPP; } static void niu_netif_stop(struct niu *np) { netif_trans_update(np->dev); /* prevent tx timeout */ niu_disable_napi(np); netif_tx_disable(np->dev); } static void niu_netif_start(struct niu *np) { /* NOTE: unconditional netif_wake_queue is only appropriate * so long as all callers are assured to have free tx slots * (such as after niu_init_hw). */ netif_tx_wake_all_queues(np->dev); niu_enable_napi(np); niu_enable_interrupts(np, 1); } static void niu_reset_buffers(struct niu *np) { int i, j, k, err; if (np->rx_rings) { for (i = 0; i < np->num_rx_rings; i++) { struct rx_ring_info *rp = &np->rx_rings[i]; for (j = 0, k = 0; j < MAX_RBR_RING_SIZE; j++) { struct page *page; page = rp->rxhash[j]; while (page) { struct page *next = niu_next_page(page); u64 base = page->index; base = base >> RBR_DESCR_ADDR_SHIFT; rp->rbr[k++] = cpu_to_le32(base); page = next; } } for (; k < MAX_RBR_RING_SIZE; k++) { err = niu_rbr_add_page(np, rp, GFP_ATOMIC, k); if (unlikely(err)) break; } rp->rbr_index = rp->rbr_table_size - 1; rp->rcr_index = 0; rp->rbr_pending = 0; rp->rbr_refill_pending = 0; } } if (np->tx_rings) { for (i = 0; i < np->num_tx_rings; i++) { struct tx_ring_info *rp = &np->tx_rings[i]; for (j = 0; j < MAX_TX_RING_SIZE; j++) { if (rp->tx_buffs[j].skb) (void) release_tx_packet(np, rp, j); } rp->pending = MAX_TX_RING_SIZE; rp->prod = 0; rp->cons = 0; rp->wrap_bit = 0; } } } static void niu_reset_task(struct work_struct *work) { struct niu *np = container_of(work, struct niu, reset_task); unsigned long flags; int err; spin_lock_irqsave(&np->lock, flags); if (!netif_running(np->dev)) { spin_unlock_irqrestore(&np->lock, flags); return; } spin_unlock_irqrestore(&np->lock, flags); del_timer_sync(&np->timer); niu_netif_stop(np); spin_lock_irqsave(&np->lock, flags); niu_stop_hw(np); spin_unlock_irqrestore(&np->lock, flags); niu_reset_buffers(np); spin_lock_irqsave(&np->lock, flags); err = niu_init_hw(np); if (!err) { np->timer.expires = jiffies + HZ; add_timer(&np->timer); niu_netif_start(np); } spin_unlock_irqrestore(&np->lock, flags); } static void niu_tx_timeout(struct net_device *dev, unsigned int txqueue) { struct niu *np = netdev_priv(dev); dev_err(np->device, "%s: Transmit timed out, resetting\n", dev->name); schedule_work(&np->reset_task); } static void niu_set_txd(struct tx_ring_info *rp, int index, u64 mapping, u64 len, u64 mark, u64 n_frags) { __le64 *desc = &rp->descr[index]; *desc = cpu_to_le64(mark | (n_frags << TX_DESC_NUM_PTR_SHIFT) | (len << TX_DESC_TR_LEN_SHIFT) | (mapping & TX_DESC_SAD)); } static u64 niu_compute_tx_flags(struct sk_buff *skb, struct ethhdr *ehdr, u64 pad_bytes, u64 len) { u16 eth_proto, eth_proto_inner; u64 csum_bits, l3off, ihl, ret; u8 ip_proto; int ipv6; eth_proto = be16_to_cpu(ehdr->h_proto); eth_proto_inner = eth_proto; if (eth_proto == ETH_P_8021Q) { struct vlan_ethhdr *vp = (struct vlan_ethhdr *) ehdr; __be16 val = vp->h_vlan_encapsulated_proto; eth_proto_inner = be16_to_cpu(val); } ipv6 = ihl = 0; switch (skb->protocol) { case cpu_to_be16(ETH_P_IP): ip_proto = ip_hdr(skb)->protocol; ihl = ip_hdr(skb)->ihl; break; case cpu_to_be16(ETH_P_IPV6): ip_proto = ipv6_hdr(skb)->nexthdr; ihl = (40 >> 2); ipv6 = 1; break; default: ip_proto = ihl = 0; break; } csum_bits = TXHDR_CSUM_NONE; if (skb->ip_summed == CHECKSUM_PARTIAL) { u64 start, stuff; csum_bits = (ip_proto == IPPROTO_TCP ? TXHDR_CSUM_TCP : (ip_proto == IPPROTO_UDP ? TXHDR_CSUM_UDP : TXHDR_CSUM_SCTP)); start = skb_checksum_start_offset(skb) - (pad_bytes + sizeof(struct tx_pkt_hdr)); stuff = start + skb->csum_offset; csum_bits |= (start / 2) << TXHDR_L4START_SHIFT; csum_bits |= (stuff / 2) << TXHDR_L4STUFF_SHIFT; } l3off = skb_network_offset(skb) - (pad_bytes + sizeof(struct tx_pkt_hdr)); ret = (((pad_bytes / 2) << TXHDR_PAD_SHIFT) | (len << TXHDR_LEN_SHIFT) | ((l3off / 2) << TXHDR_L3START_SHIFT) | (ihl << TXHDR_IHL_SHIFT) | ((eth_proto_inner < ETH_P_802_3_MIN) ? TXHDR_LLC : 0) | ((eth_proto == ETH_P_8021Q) ? TXHDR_VLAN : 0) | (ipv6 ? TXHDR_IP_VER : 0) | csum_bits); return ret; } static netdev_tx_t niu_start_xmit(struct sk_buff *skb, struct net_device *dev) { struct niu *np = netdev_priv(dev); unsigned long align, headroom; struct netdev_queue *txq; struct tx_ring_info *rp; struct tx_pkt_hdr *tp; unsigned int len, nfg; struct ethhdr *ehdr; int prod, i, tlen; u64 mapping, mrk; i = skb_get_queue_mapping(skb); rp = &np->tx_rings[i]; txq = netdev_get_tx_queue(dev, i); if (niu_tx_avail(rp) <= (skb_shinfo(skb)->nr_frags + 1)) { netif_tx_stop_queue(txq); dev_err(np->device, "%s: BUG! Tx ring full when queue awake!\n", dev->name); rp->tx_errors++; return NETDEV_TX_BUSY; } if (eth_skb_pad(skb)) goto out; len = sizeof(struct tx_pkt_hdr) + 15; if (skb_headroom(skb) < len) { struct sk_buff *skb_new; skb_new = skb_realloc_headroom(skb, len); if (!skb_new) goto out_drop; kfree_skb(skb); skb = skb_new; } else skb_orphan(skb); align = ((unsigned long) skb->data & (16 - 1)); headroom = align + sizeof(struct tx_pkt_hdr); ehdr = (struct ethhdr *) skb->data; tp = skb_push(skb, headroom); len = skb->len - sizeof(struct tx_pkt_hdr); tp->flags = cpu_to_le64(niu_compute_tx_flags(skb, ehdr, align, len)); tp->resv = 0; len = skb_headlen(skb); mapping = np->ops->map_single(np->device, skb->data, len, DMA_TO_DEVICE); prod = rp->prod; rp->tx_buffs[prod].skb = skb; rp->tx_buffs[prod].mapping = mapping; mrk = TX_DESC_SOP; if (++rp->mark_counter == rp->mark_freq) { rp->mark_counter = 0; mrk |= TX_DESC_MARK; rp->mark_pending++; } tlen = len; nfg = skb_shinfo(skb)->nr_frags; while (tlen > 0) { tlen -= MAX_TX_DESC_LEN; nfg++; } while (len > 0) { unsigned int this_len = len; if (this_len > MAX_TX_DESC_LEN) this_len = MAX_TX_DESC_LEN; niu_set_txd(rp, prod, mapping, this_len, mrk, nfg); mrk = nfg = 0; prod = NEXT_TX(rp, prod); mapping += this_len; len -= this_len; } for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; len = skb_frag_size(frag); mapping = np->ops->map_page(np->device, skb_frag_page(frag), skb_frag_off(frag), len, DMA_TO_DEVICE); rp->tx_buffs[prod].skb = NULL; rp->tx_buffs[prod].mapping = mapping; niu_set_txd(rp, prod, mapping, len, 0, 0); prod = NEXT_TX(rp, prod); } if (prod < rp->prod) rp->wrap_bit ^= TX_RING_KICK_WRAP; rp->prod = prod; nw64(TX_RING_KICK(rp->tx_channel), rp->wrap_bit | (prod << 3)); if (unlikely(niu_tx_avail(rp) <= (MAX_SKB_FRAGS + 1))) { netif_tx_stop_queue(txq); if (niu_tx_avail(rp) > NIU_TX_WAKEUP_THRESH(rp)) netif_tx_wake_queue(txq); } out: return NETDEV_TX_OK; out_drop: rp->tx_errors++; kfree_skb(skb); goto out; } static int niu_change_mtu(struct net_device *dev, int new_mtu) { struct niu *np = netdev_priv(dev); int err, orig_jumbo, new_jumbo; orig_jumbo = (dev->mtu > ETH_DATA_LEN); new_jumbo = (new_mtu > ETH_DATA_LEN); dev->mtu = new_mtu; if (!netif_running(dev) || (orig_jumbo == new_jumbo)) return 0; niu_full_shutdown(np, dev); niu_free_channels(np); niu_enable_napi(np); err = niu_alloc_channels(np); if (err) return err; spin_lock_irq(&np->lock); err = niu_init_hw(np); if (!err) { timer_setup(&np->timer, niu_timer, 0); np->timer.expires = jiffies + HZ; err = niu_enable_interrupts(np, 1); if (err) niu_stop_hw(np); } spin_unlock_irq(&np->lock); if (!err) { netif_tx_start_all_queues(dev); if (np->link_config.loopback_mode != LOOPBACK_DISABLED) netif_carrier_on(dev); add_timer(&np->timer); } return err; } static void niu_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) { struct niu *np = netdev_priv(dev); struct niu_vpd *vpd = &np->vpd; strscpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver)); strscpy(info->version, DRV_MODULE_VERSION, sizeof(info->version)); snprintf(info->fw_version, sizeof(info->fw_version), "%d.%d", vpd->fcode_major, vpd->fcode_minor); if (np->parent->plat_type != PLAT_TYPE_NIU) strscpy(info->bus_info, pci_name(np->pdev), sizeof(info->bus_info)); } static int niu_get_link_ksettings(struct net_device *dev, struct ethtool_link_ksettings *cmd) { struct niu *np = netdev_priv(dev); struct niu_link_config *lp; lp = &np->link_config; memset(cmd, 0, sizeof(*cmd)); cmd->base.phy_address = np->phy_addr; ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported, lp->supported); ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising, lp->active_advertising); cmd->base.autoneg = lp->active_autoneg; cmd->base.speed = lp->active_speed; cmd->base.duplex = lp->active_duplex; cmd->base.port = (np->flags & NIU_FLAGS_FIBER) ? PORT_FIBRE : PORT_TP; return 0; } static int niu_set_link_ksettings(struct net_device *dev, const struct ethtool_link_ksettings *cmd) { struct niu *np = netdev_priv(dev); struct niu_link_config *lp = &np->link_config; ethtool_convert_link_mode_to_legacy_u32(&lp->advertising, cmd->link_modes.advertising); lp->speed = cmd->base.speed; lp->duplex = cmd->base.duplex; lp->autoneg = cmd->base.autoneg; return niu_init_link(np); } static u32 niu_get_msglevel(struct net_device *dev) { struct niu *np = netdev_priv(dev); return np->msg_enable; } static void niu_set_msglevel(struct net_device *dev, u32 value) { struct niu *np = netdev_priv(dev); np->msg_enable = value; } static int niu_nway_reset(struct net_device *dev) { struct niu *np = netdev_priv(dev); if (np->link_config.autoneg) return niu_init_link(np); return 0; } static int niu_get_eeprom_len(struct net_device *dev) { struct niu *np = netdev_priv(dev); return np->eeprom_len; } static int niu_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data) { struct niu *np = netdev_priv(dev); u32 offset, len, val; offset = eeprom->offset; len = eeprom->len; if (offset + len < offset) return -EINVAL; if (offset >= np->eeprom_len) return -EINVAL; if (offset + len > np->eeprom_len) len = eeprom->len = np->eeprom_len - offset; if (offset & 3) { u32 b_offset, b_count; b_offset = offset & 3; b_count = 4 - b_offset; if (b_count > len) b_count = len; val = nr64(ESPC_NCR((offset - b_offset) / 4)); memcpy(data, ((char *)&val) + b_offset, b_count); data += b_count; len -= b_count; offset += b_count; } while (len >= 4) { val = nr64(ESPC_NCR(offset / 4)); memcpy(data, &val, 4); data += 4; len -= 4; offset += 4; } if (len) { val = nr64(ESPC_NCR(offset / 4)); memcpy(data, &val, len); } return 0; } static void niu_ethflow_to_l3proto(int flow_type, u8 *pid) { switch (flow_type) { case TCP_V4_FLOW: case TCP_V6_FLOW: *pid = IPPROTO_TCP; break; case UDP_V4_FLOW: case UDP_V6_FLOW: *pid = IPPROTO_UDP; break; case SCTP_V4_FLOW: case SCTP_V6_FLOW: *pid = IPPROTO_SCTP; break; case AH_V4_FLOW: case AH_V6_FLOW: *pid = IPPROTO_AH; break; case ESP_V4_FLOW: case ESP_V6_FLOW: *pid = IPPROTO_ESP; break; default: *pid = 0; break; } } static int niu_class_to_ethflow(u64 class, int *flow_type) { switch (class) { case CLASS_CODE_TCP_IPV4: *flow_type = TCP_V4_FLOW; break; case CLASS_CODE_UDP_IPV4: *flow_type = UDP_V4_FLOW; break; case CLASS_CODE_AH_ESP_IPV4: *flow_type = AH_V4_FLOW; break; case CLASS_CODE_SCTP_IPV4: *flow_type = SCTP_V4_FLOW; break; case CLASS_CODE_TCP_IPV6: *flow_type = TCP_V6_FLOW; break; case CLASS_CODE_UDP_IPV6: *flow_type = UDP_V6_FLOW; break; case CLASS_CODE_AH_ESP_IPV6: *flow_type = AH_V6_FLOW; break; case CLASS_CODE_SCTP_IPV6: *flow_type = SCTP_V6_FLOW; break; case CLASS_CODE_USER_PROG1: case CLASS_CODE_USER_PROG2: case CLASS_CODE_USER_PROG3: case CLASS_CODE_USER_PROG4: *flow_type = IP_USER_FLOW; break; default: return -EINVAL; } return 0; } static int niu_ethflow_to_class(int flow_type, u64 *class) { switch (flow_type) { case TCP_V4_FLOW: *class = CLASS_CODE_TCP_IPV4; break; case UDP_V4_FLOW: *class = CLASS_CODE_UDP_IPV4; break; case AH_ESP_V4_FLOW: case AH_V4_FLOW: case ESP_V4_FLOW: *class = CLASS_CODE_AH_ESP_IPV4; break; case SCTP_V4_FLOW: *class = CLASS_CODE_SCTP_IPV4; break; case TCP_V6_FLOW: *class = CLASS_CODE_TCP_IPV6; break; case UDP_V6_FLOW: *class = CLASS_CODE_UDP_IPV6; break; case AH_ESP_V6_FLOW: case AH_V6_FLOW: case ESP_V6_FLOW: *class = CLASS_CODE_AH_ESP_IPV6; break; case SCTP_V6_FLOW: *class = CLASS_CODE_SCTP_IPV6; break; default: return 0; } return 1; } static u64 niu_flowkey_to_ethflow(u64 flow_key) { u64 ethflow = 0; if (flow_key & FLOW_KEY_L2DA) ethflow |= RXH_L2DA; if (flow_key & FLOW_KEY_VLAN) ethflow |= RXH_VLAN; if (flow_key & FLOW_KEY_IPSA) ethflow |= RXH_IP_SRC; if (flow_key & FLOW_KEY_IPDA) ethflow |= RXH_IP_DST; if (flow_key & FLOW_KEY_PROTO) ethflow |= RXH_L3_PROTO; if (flow_key & (FLOW_KEY_L4_BYTE12 << FLOW_KEY_L4_0_SHIFT)) ethflow |= RXH_L4_B_0_1; if (flow_key & (FLOW_KEY_L4_BYTE12 << FLOW_KEY_L4_1_SHIFT)) ethflow |= RXH_L4_B_2_3; return ethflow; } static int niu_ethflow_to_flowkey(u64 ethflow, u64 *flow_key) { u64 key = 0; if (ethflow & RXH_L2DA) key |= FLOW_KEY_L2DA; if (ethflow & RXH_VLAN) key |= FLOW_KEY_VLAN; if (ethflow & RXH_IP_SRC) key |= FLOW_KEY_IPSA; if (ethflow & RXH_IP_DST) key |= FLOW_KEY_IPDA; if (ethflow & RXH_L3_PROTO) key |= FLOW_KEY_PROTO; if (ethflow & RXH_L4_B_0_1) key |= (FLOW_KEY_L4_BYTE12 << FLOW_KEY_L4_0_SHIFT); if (ethflow & RXH_L4_B_2_3) key |= (FLOW_KEY_L4_BYTE12 << FLOW_KEY_L4_1_SHIFT); *flow_key = key; return 1; } static int niu_get_hash_opts(struct niu *np, struct ethtool_rxnfc *nfc) { u64 class; nfc->data = 0; if (!niu_ethflow_to_class(nfc->flow_type, &class)) return -EINVAL; if (np->parent->tcam_key[class - CLASS_CODE_USER_PROG1] & TCAM_KEY_DISC) nfc->data = RXH_DISCARD; else nfc->data = niu_flowkey_to_ethflow(np->parent->flow_key[class - CLASS_CODE_USER_PROG1]); return 0; } static void niu_get_ip4fs_from_tcam_key(struct niu_tcam_entry *tp, struct ethtool_rx_flow_spec *fsp) { u32 tmp; u16 prt; tmp = (tp->key[3] & TCAM_V4KEY3_SADDR) >> TCAM_V4KEY3_SADDR_SHIFT; fsp->h_u.tcp_ip4_spec.ip4src = cpu_to_be32(tmp); tmp = (tp->key[3] & TCAM_V4KEY3_DADDR) >> TCAM_V4KEY3_DADDR_SHIFT; fsp->h_u.tcp_ip4_spec.ip4dst = cpu_to_be32(tmp); tmp = (tp->key_mask[3] & TCAM_V4KEY3_SADDR) >> TCAM_V4KEY3_SADDR_SHIFT; fsp->m_u.tcp_ip4_spec.ip4src = cpu_to_be32(tmp); tmp = (tp->key_mask[3] & TCAM_V4KEY3_DADDR) >> TCAM_V4KEY3_DADDR_SHIFT; fsp->m_u.tcp_ip4_spec.ip4dst = cpu_to_be32(tmp); fsp->h_u.tcp_ip4_spec.tos = (tp->key[2] & TCAM_V4KEY2_TOS) >> TCAM_V4KEY2_TOS_SHIFT; fsp->m_u.tcp_ip4_spec.tos = (tp->key_mask[2] & TCAM_V4KEY2_TOS) >> TCAM_V4KEY2_TOS_SHIFT; switch (fsp->flow_type) { case TCP_V4_FLOW: case UDP_V4_FLOW: case SCTP_V4_FLOW: prt = ((tp->key[2] & TCAM_V4KEY2_PORT_SPI) >> TCAM_V4KEY2_PORT_SPI_SHIFT) >> 16; fsp->h_u.tcp_ip4_spec.psrc = cpu_to_be16(prt); prt = ((tp->key[2] & TCAM_V4KEY2_PORT_SPI) >> TCAM_V4KEY2_PORT_SPI_SHIFT) & 0xffff; fsp->h_u.tcp_ip4_spec.pdst = cpu_to_be16(prt); prt = ((tp->key_mask[2] & TCAM_V4KEY2_PORT_SPI) >> TCAM_V4KEY2_PORT_SPI_SHIFT) >> 16; fsp->m_u.tcp_ip4_spec.psrc = cpu_to_be16(prt); prt = ((tp->key_mask[2] & TCAM_V4KEY2_PORT_SPI) >> TCAM_V4KEY2_PORT_SPI_SHIFT) & 0xffff; fsp->m_u.tcp_ip4_spec.pdst = cpu_to_be16(prt); break; case AH_V4_FLOW: case ESP_V4_FLOW: tmp = (tp->key[2] & TCAM_V4KEY2_PORT_SPI) >> TCAM_V4KEY2_PORT_SPI_SHIFT; fsp->h_u.ah_ip4_spec.spi = cpu_to_be32(tmp); tmp = (tp->key_mask[2] & TCAM_V4KEY2_PORT_SPI) >> TCAM_V4KEY2_PORT_SPI_SHIFT; fsp->m_u.ah_ip4_spec.spi = cpu_to_be32(tmp); break; case IP_USER_FLOW: tmp = (tp->key[2] & TCAM_V4KEY2_PORT_SPI) >> TCAM_V4KEY2_PORT_SPI_SHIFT; fsp->h_u.usr_ip4_spec.l4_4_bytes = cpu_to_be32(tmp); tmp = (tp->key_mask[2] & TCAM_V4KEY2_PORT_SPI) >> TCAM_V4KEY2_PORT_SPI_SHIFT; fsp->m_u.usr_ip4_spec.l4_4_bytes = cpu_to_be32(tmp); fsp->h_u.usr_ip4_spec.proto = (tp->key[2] & TCAM_V4KEY2_PROTO) >> TCAM_V4KEY2_PROTO_SHIFT; fsp->m_u.usr_ip4_spec.proto = (tp->key_mask[2] & TCAM_V4KEY2_PROTO) >> TCAM_V4KEY2_PROTO_SHIFT; fsp->h_u.usr_ip4_spec.ip_ver = ETH_RX_NFC_IP4; break; default: break; } } static int niu_get_ethtool_tcam_entry(struct niu *np, struct ethtool_rxnfc *nfc) { struct niu_parent *parent = np->parent; struct niu_tcam_entry *tp; struct ethtool_rx_flow_spec *fsp = &nfc->fs; u16 idx; u64 class; int ret = 0; idx = tcam_get_index(np, (u16)nfc->fs.location); tp = &parent->tcam[idx]; if (!tp->valid) { netdev_info(np->dev, "niu%d: entry [%d] invalid for idx[%d]\n", parent->index, (u16)nfc->fs.location, idx); return -EINVAL; } /* fill the flow spec entry */ class = (tp->key[0] & TCAM_V4KEY0_CLASS_CODE) >> TCAM_V4KEY0_CLASS_CODE_SHIFT; ret = niu_class_to_ethflow(class, &fsp->flow_type); if (ret < 0) { netdev_info(np->dev, "niu%d: niu_class_to_ethflow failed\n", parent->index); goto out; } if (fsp->flow_type == AH_V4_FLOW || fsp->flow_type == AH_V6_FLOW) { u32 proto = (tp->key[2] & TCAM_V4KEY2_PROTO) >> TCAM_V4KEY2_PROTO_SHIFT; if (proto == IPPROTO_ESP) { if (fsp->flow_type == AH_V4_FLOW) fsp->flow_type = ESP_V4_FLOW; else fsp->flow_type = ESP_V6_FLOW; } } switch (fsp->flow_type) { case TCP_V4_FLOW: case UDP_V4_FLOW: case SCTP_V4_FLOW: case AH_V4_FLOW: case ESP_V4_FLOW: niu_get_ip4fs_from_tcam_key(tp, fsp); break; case TCP_V6_FLOW: case UDP_V6_FLOW: case SCTP_V6_FLOW: case AH_V6_FLOW: case ESP_V6_FLOW: /* Not yet implemented */ ret = -EINVAL; break; case IP_USER_FLOW: niu_get_ip4fs_from_tcam_key(tp, fsp); break; default: ret = -EINVAL; break; } if (ret < 0) goto out; if (tp->assoc_data & TCAM_ASSOCDATA_DISC) fsp->ring_cookie = RX_CLS_FLOW_DISC; else fsp->ring_cookie = (tp->assoc_data & TCAM_ASSOCDATA_OFFSET) >> TCAM_ASSOCDATA_OFFSET_SHIFT; /* put the tcam size here */ nfc->data = tcam_get_size(np); out: return ret; } static int niu_get_ethtool_tcam_all(struct niu *np, struct ethtool_rxnfc *nfc, u32 *rule_locs) { struct niu_parent *parent = np->parent; struct niu_tcam_entry *tp; int i, idx, cnt; unsigned long flags; int ret = 0; /* put the tcam size here */ nfc->data = tcam_get_size(np); niu_lock_parent(np, flags); for (cnt = 0, i = 0; i < nfc->data; i++) { idx = tcam_get_index(np, i); tp = &parent->tcam[idx]; if (!tp->valid) continue; if (cnt == nfc->rule_cnt) { ret = -EMSGSIZE; break; } rule_locs[cnt] = i; cnt++; } niu_unlock_parent(np, flags); nfc->rule_cnt = cnt; return ret; } static int niu_get_nfc(struct net_device *dev, struct ethtool_rxnfc *cmd, u32 *rule_locs) { struct niu *np = netdev_priv(dev); int ret = 0; switch (cmd->cmd) { case ETHTOOL_GRXFH: ret = niu_get_hash_opts(np, cmd); break; case ETHTOOL_GRXRINGS: cmd->data = np->num_rx_rings; break; case ETHTOOL_GRXCLSRLCNT: cmd->rule_cnt = tcam_get_valid_entry_cnt(np); break; case ETHTOOL_GRXCLSRULE: ret = niu_get_ethtool_tcam_entry(np, cmd); break; case ETHTOOL_GRXCLSRLALL: ret = niu_get_ethtool_tcam_all(np, cmd, rule_locs); break; default: ret = -EINVAL; break; } return ret; } static int niu_set_hash_opts(struct niu *np, struct ethtool_rxnfc *nfc) { u64 class; u64 flow_key = 0; unsigned long flags; if (!niu_ethflow_to_class(nfc->flow_type, &class)) return -EINVAL; if (class < CLASS_CODE_USER_PROG1 || class > CLASS_CODE_SCTP_IPV6) return -EINVAL; if (nfc->data & RXH_DISCARD) { niu_lock_parent(np, flags); flow_key = np->parent->tcam_key[class - CLASS_CODE_USER_PROG1]; flow_key |= TCAM_KEY_DISC; nw64(TCAM_KEY(class - CLASS_CODE_USER_PROG1), flow_key); np->parent->tcam_key[class - CLASS_CODE_USER_PROG1] = flow_key; niu_unlock_parent(np, flags); return 0; } else { /* Discard was set before, but is not set now */ if (np->parent->tcam_key[class - CLASS_CODE_USER_PROG1] & TCAM_KEY_DISC) { niu_lock_parent(np, flags); flow_key = np->parent->tcam_key[class - CLASS_CODE_USER_PROG1]; flow_key &= ~TCAM_KEY_DISC; nw64(TCAM_KEY(class - CLASS_CODE_USER_PROG1), flow_key); np->parent->tcam_key[class - CLASS_CODE_USER_PROG1] = flow_key; niu_unlock_parent(np, flags); } } if (!niu_ethflow_to_flowkey(nfc->data, &flow_key)) return -EINVAL; niu_lock_parent(np, flags); nw64(FLOW_KEY(class - CLASS_CODE_USER_PROG1), flow_key); np->parent->flow_key[class - CLASS_CODE_USER_PROG1] = flow_key; niu_unlock_parent(np, flags); return 0; } static void niu_get_tcamkey_from_ip4fs(struct ethtool_rx_flow_spec *fsp, struct niu_tcam_entry *tp, int l2_rdc_tab, u64 class) { u8 pid = 0; u32 sip, dip, sipm, dipm, spi, spim; u16 sport, dport, spm, dpm; sip = be32_to_cpu(fsp->h_u.tcp_ip4_spec.ip4src); sipm = be32_to_cpu(fsp->m_u.tcp_ip4_spec.ip4src); dip = be32_to_cpu(fsp->h_u.tcp_ip4_spec.ip4dst); dipm = be32_to_cpu(fsp->m_u.tcp_ip4_spec.ip4dst); tp->key[0] = class << TCAM_V4KEY0_CLASS_CODE_SHIFT; tp->key_mask[0] = TCAM_V4KEY0_CLASS_CODE; tp->key[1] = (u64)l2_rdc_tab << TCAM_V4KEY1_L2RDCNUM_SHIFT; tp->key_mask[1] = TCAM_V4KEY1_L2RDCNUM; tp->key[3] = (u64)sip << TCAM_V4KEY3_SADDR_SHIFT; tp->key[3] |= dip; tp->key_mask[3] = (u64)sipm << TCAM_V4KEY3_SADDR_SHIFT; tp->key_mask[3] |= dipm; tp->key[2] |= ((u64)fsp->h_u.tcp_ip4_spec.tos << TCAM_V4KEY2_TOS_SHIFT); tp->key_mask[2] |= ((u64)fsp->m_u.tcp_ip4_spec.tos << TCAM_V4KEY2_TOS_SHIFT); switch (fsp->flow_type) { case TCP_V4_FLOW: case UDP_V4_FLOW: case SCTP_V4_FLOW: sport = be16_to_cpu(fsp->h_u.tcp_ip4_spec.psrc); spm = be16_to_cpu(fsp->m_u.tcp_ip4_spec.psrc); dport = be16_to_cpu(fsp->h_u.tcp_ip4_spec.pdst); dpm = be16_to_cpu(fsp->m_u.tcp_ip4_spec.pdst); tp->key[2] |= (((u64)sport << 16) | dport); tp->key_mask[2] |= (((u64)spm << 16) | dpm); niu_ethflow_to_l3proto(fsp->flow_type, &pid); break; case AH_V4_FLOW: case ESP_V4_FLOW: spi = be32_to_cpu(fsp->h_u.ah_ip4_spec.spi); spim = be32_to_cpu(fsp->m_u.ah_ip4_spec.spi); tp->key[2] |= spi; tp->key_mask[2] |= spim; niu_ethflow_to_l3proto(fsp->flow_type, &pid); break; case IP_USER_FLOW: spi = be32_to_cpu(fsp->h_u.usr_ip4_spec.l4_4_bytes); spim = be32_to_cpu(fsp->m_u.usr_ip4_spec.l4_4_bytes); tp->key[2] |= spi; tp->key_mask[2] |= spim; pid = fsp->h_u.usr_ip4_spec.proto; break; default: break; } tp->key[2] |= ((u64)pid << TCAM_V4KEY2_PROTO_SHIFT); if (pid) { tp->key_mask[2] |= TCAM_V4KEY2_PROTO; } } static int niu_add_ethtool_tcam_entry(struct niu *np, struct ethtool_rxnfc *nfc) { struct niu_parent *parent = np->parent; struct niu_tcam_entry *tp; struct ethtool_rx_flow_spec *fsp = &nfc->fs; struct niu_rdc_tables *rdc_table = &parent->rdc_group_cfg[np->port]; int l2_rdc_table = rdc_table->first_table_num; u16 idx; u64 class; unsigned long flags; int err, ret; ret = 0; idx = nfc->fs.location; if (idx >= tcam_get_size(np)) return -EINVAL; if (fsp->flow_type == IP_USER_FLOW) { int i; int add_usr_cls = 0; struct ethtool_usrip4_spec *uspec = &fsp->h_u.usr_ip4_spec; struct ethtool_usrip4_spec *umask = &fsp->m_u.usr_ip4_spec; if (uspec->ip_ver != ETH_RX_NFC_IP4) return -EINVAL; niu_lock_parent(np, flags); for (i = 0; i < NIU_L3_PROG_CLS; i++) { if (parent->l3_cls[i]) { if (uspec->proto == parent->l3_cls_pid[i]) { class = parent->l3_cls[i]; parent->l3_cls_refcnt[i]++; add_usr_cls = 1; break; } } else { /* Program new user IP class */ switch (i) { case 0: class = CLASS_CODE_USER_PROG1; break; case 1: class = CLASS_CODE_USER_PROG2; break; case 2: class = CLASS_CODE_USER_PROG3; break; case 3: class = CLASS_CODE_USER_PROG4; break; default: class = CLASS_CODE_UNRECOG; break; } ret = tcam_user_ip_class_set(np, class, 0, uspec->proto, uspec->tos, umask->tos); if (ret) goto out; ret = tcam_user_ip_class_enable(np, class, 1); if (ret) goto out; parent->l3_cls[i] = class; parent->l3_cls_pid[i] = uspec->proto; parent->l3_cls_refcnt[i]++; add_usr_cls = 1; break; } } if (!add_usr_cls) { netdev_info(np->dev, "niu%d: %s(): Could not find/insert class for pid %d\n", parent->index, __func__, uspec->proto); ret = -EINVAL; goto out; } niu_unlock_parent(np, flags); } else { if (!niu_ethflow_to_class(fsp->flow_type, &class)) { return -EINVAL; } } niu_lock_parent(np, flags); idx = tcam_get_index(np, idx); tp = &parent->tcam[idx]; memset(tp, 0, sizeof(*tp)); /* fill in the tcam key and mask */ switch (fsp->flow_type) { case TCP_V4_FLOW: case UDP_V4_FLOW: case SCTP_V4_FLOW: case AH_V4_FLOW: case ESP_V4_FLOW: niu_get_tcamkey_from_ip4fs(fsp, tp, l2_rdc_table, class); break; case TCP_V6_FLOW: case UDP_V6_FLOW: case SCTP_V6_FLOW: case AH_V6_FLOW: case ESP_V6_FLOW: /* Not yet implemented */ netdev_info(np->dev, "niu%d: In %s(): flow %d for IPv6 not implemented\n", parent->index, __func__, fsp->flow_type); ret = -EINVAL; goto out; case IP_USER_FLOW: niu_get_tcamkey_from_ip4fs(fsp, tp, l2_rdc_table, class); break; default: netdev_info(np->dev, "niu%d: In %s(): Unknown flow type %d\n", parent->index, __func__, fsp->flow_type); ret = -EINVAL; goto out; } /* fill in the assoc data */ if (fsp->ring_cookie == RX_CLS_FLOW_DISC) { tp->assoc_data = TCAM_ASSOCDATA_DISC; } else { if (fsp->ring_cookie >= np->num_rx_rings) { netdev_info(np->dev, "niu%d: In %s(): Invalid RX ring %lld\n", parent->index, __func__, (long long)fsp->ring_cookie); ret = -EINVAL; goto out; } tp->assoc_data = (TCAM_ASSOCDATA_TRES_USE_OFFSET | (fsp->ring_cookie << TCAM_ASSOCDATA_OFFSET_SHIFT)); } err = tcam_write(np, idx, tp->key, tp->key_mask); if (err) { ret = -EINVAL; goto out; } err = tcam_assoc_write(np, idx, tp->assoc_data); if (err) { ret = -EINVAL; goto out; } /* validate the entry */ tp->valid = 1; np->clas.tcam_valid_entries++; out: niu_unlock_parent(np, flags); return ret; } static int niu_del_ethtool_tcam_entry(struct niu *np, u32 loc) { struct niu_parent *parent = np->parent; struct niu_tcam_entry *tp; u16 idx; unsigned long flags; u64 class; int ret = 0; if (loc >= tcam_get_size(np)) return -EINVAL; niu_lock_parent(np, flags); idx = tcam_get_index(np, loc); tp = &parent->tcam[idx]; /* if the entry is of a user defined class, then update*/ class = (tp->key[0] & TCAM_V4KEY0_CLASS_CODE) >> TCAM_V4KEY0_CLASS_CODE_SHIFT; if (class >= CLASS_CODE_USER_PROG1 && class <= CLASS_CODE_USER_PROG4) { int i; for (i = 0; i < NIU_L3_PROG_CLS; i++) { if (parent->l3_cls[i] == class) { parent->l3_cls_refcnt[i]--; if (!parent->l3_cls_refcnt[i]) { /* disable class */ ret = tcam_user_ip_class_enable(np, class, 0); if (ret) goto out; parent->l3_cls[i] = 0; parent->l3_cls_pid[i] = 0; } break; } } if (i == NIU_L3_PROG_CLS) { netdev_info(np->dev, "niu%d: In %s(): Usr class 0x%llx not found\n", parent->index, __func__, (unsigned long long)class); ret = -EINVAL; goto out; } } ret = tcam_flush(np, idx); if (ret) goto out; /* invalidate the entry */ tp->valid = 0; np->clas.tcam_valid_entries--; out: niu_unlock_parent(np, flags); return ret; } static int niu_set_nfc(struct net_device *dev, struct ethtool_rxnfc *cmd) { struct niu *np = netdev_priv(dev); int ret = 0; switch (cmd->cmd) { case ETHTOOL_SRXFH: ret = niu_set_hash_opts(np, cmd); break; case ETHTOOL_SRXCLSRLINS: ret = niu_add_ethtool_tcam_entry(np, cmd); break; case ETHTOOL_SRXCLSRLDEL: ret = niu_del_ethtool_tcam_entry(np, cmd->fs.location); break; default: ret = -EINVAL; break; } return ret; } static const struct { const char string[ETH_GSTRING_LEN]; } niu_xmac_stat_keys[] = { { "tx_frames" }, { "tx_bytes" }, { "tx_fifo_errors" }, { "tx_overflow_errors" }, { "tx_max_pkt_size_errors" }, { "tx_underflow_errors" }, { "rx_local_faults" }, { "rx_remote_faults" }, { "rx_link_faults" }, { "rx_align_errors" }, { "rx_frags" }, { "rx_mcasts" }, { "rx_bcasts" }, { "rx_hist_cnt1" }, { "rx_hist_cnt2" }, { "rx_hist_cnt3" }, { "rx_hist_cnt4" }, { "rx_hist_cnt5" }, { "rx_hist_cnt6" }, { "rx_hist_cnt7" }, { "rx_octets" }, { "rx_code_violations" }, { "rx_len_errors" }, { "rx_crc_errors" }, { "rx_underflows" }, { "rx_overflows" }, { "pause_off_state" }, { "pause_on_state" }, { "pause_received" }, }; #define NUM_XMAC_STAT_KEYS ARRAY_SIZE(niu_xmac_stat_keys) static const struct { const char string[ETH_GSTRING_LEN]; } niu_bmac_stat_keys[] = { { "tx_underflow_errors" }, { "tx_max_pkt_size_errors" }, { "tx_bytes" }, { "tx_frames" }, { "rx_overflows" }, { "rx_frames" }, { "rx_align_errors" }, { "rx_crc_errors" }, { "rx_len_errors" }, { "pause_off_state" }, { "pause_on_state" }, { "pause_received" }, }; #define NUM_BMAC_STAT_KEYS ARRAY_SIZE(niu_bmac_stat_keys) static const struct { const char string[ETH_GSTRING_LEN]; } niu_rxchan_stat_keys[] = { { "rx_channel" }, { "rx_packets" }, { "rx_bytes" }, { "rx_dropped" }, { "rx_errors" }, }; #define NUM_RXCHAN_STAT_KEYS ARRAY_SIZE(niu_rxchan_stat_keys) static const struct { const char string[ETH_GSTRING_LEN]; } niu_txchan_stat_keys[] = { { "tx_channel" }, { "tx_packets" }, { "tx_bytes" }, { "tx_errors" }, }; #define NUM_TXCHAN_STAT_KEYS ARRAY_SIZE(niu_txchan_stat_keys) static void niu_get_strings(struct net_device *dev, u32 stringset, u8 *data) { struct niu *np = netdev_priv(dev); int i; if (stringset != ETH_SS_STATS) return; if (np->flags & NIU_FLAGS_XMAC) { memcpy(data, niu_xmac_stat_keys, sizeof(niu_xmac_stat_keys)); data += sizeof(niu_xmac_stat_keys); } else { memcpy(data, niu_bmac_stat_keys, sizeof(niu_bmac_stat_keys)); data += sizeof(niu_bmac_stat_keys); } for (i = 0; i < np->num_rx_rings; i++) { memcpy(data, niu_rxchan_stat_keys, sizeof(niu_rxchan_stat_keys)); data += sizeof(niu_rxchan_stat_keys); } for (i = 0; i < np->num_tx_rings; i++) { memcpy(data, niu_txchan_stat_keys, sizeof(niu_txchan_stat_keys)); data += sizeof(niu_txchan_stat_keys); } } static int niu_get_sset_count(struct net_device *dev, int stringset) { struct niu *np = netdev_priv(dev); if (stringset != ETH_SS_STATS) return -EINVAL; return (np->flags & NIU_FLAGS_XMAC ? NUM_XMAC_STAT_KEYS : NUM_BMAC_STAT_KEYS) + (np->num_rx_rings * NUM_RXCHAN_STAT_KEYS) + (np->num_tx_rings * NUM_TXCHAN_STAT_KEYS); } static void niu_get_ethtool_stats(struct net_device *dev, struct ethtool_stats *stats, u64 *data) { struct niu *np = netdev_priv(dev); int i; niu_sync_mac_stats(np); if (np->flags & NIU_FLAGS_XMAC) { memcpy(data, &np->mac_stats.xmac, sizeof(struct niu_xmac_stats)); data += (sizeof(struct niu_xmac_stats) / sizeof(u64)); } else { memcpy(data, &np->mac_stats.bmac, sizeof(struct niu_bmac_stats)); data += (sizeof(struct niu_bmac_stats) / sizeof(u64)); } for (i = 0; i < np->num_rx_rings; i++) { struct rx_ring_info *rp = &np->rx_rings[i]; niu_sync_rx_discard_stats(np, rp, 0); data[0] = rp->rx_channel; data[1] = rp->rx_packets; data[2] = rp->rx_bytes; data[3] = rp->rx_dropped; data[4] = rp->rx_errors; data += 5; } for (i = 0; i < np->num_tx_rings; i++) { struct tx_ring_info *rp = &np->tx_rings[i]; data[0] = rp->tx_channel; data[1] = rp->tx_packets; data[2] = rp->tx_bytes; data[3] = rp->tx_errors; data += 4; } } static u64 niu_led_state_save(struct niu *np) { if (np->flags & NIU_FLAGS_XMAC) return nr64_mac(XMAC_CONFIG); else return nr64_mac(BMAC_XIF_CONFIG); } static void niu_led_state_restore(struct niu *np, u64 val) { if (np->flags & NIU_FLAGS_XMAC) nw64_mac(XMAC_CONFIG, val); else nw64_mac(BMAC_XIF_CONFIG, val); } static void niu_force_led(struct niu *np, int on) { u64 val, reg, bit; if (np->flags & NIU_FLAGS_XMAC) { reg = XMAC_CONFIG; bit = XMAC_CONFIG_FORCE_LED_ON; } else { reg = BMAC_XIF_CONFIG; bit = BMAC_XIF_CONFIG_LINK_LED; } val = nr64_mac(reg); if (on) val |= bit; else val &= ~bit; nw64_mac(reg, val); } static int niu_set_phys_id(struct net_device *dev, enum ethtool_phys_id_state state) { struct niu *np = netdev_priv(dev); if (!netif_running(dev)) return -EAGAIN; switch (state) { case ETHTOOL_ID_ACTIVE: np->orig_led_state = niu_led_state_save(np); return 1; /* cycle on/off once per second */ case ETHTOOL_ID_ON: niu_force_led(np, 1); break; case ETHTOOL_ID_OFF: niu_force_led(np, 0); break; case ETHTOOL_ID_INACTIVE: niu_led_state_restore(np, np->orig_led_state); } return 0; } static const struct ethtool_ops niu_ethtool_ops = { .get_drvinfo = niu_get_drvinfo, .get_link = ethtool_op_get_link, .get_msglevel = niu_get_msglevel, .set_msglevel = niu_set_msglevel, .nway_reset = niu_nway_reset, .get_eeprom_len = niu_get_eeprom_len, .get_eeprom = niu_get_eeprom, .get_strings = niu_get_strings, .get_sset_count = niu_get_sset_count, .get_ethtool_stats = niu_get_ethtool_stats, .set_phys_id = niu_set_phys_id, .get_rxnfc = niu_get_nfc, .set_rxnfc = niu_set_nfc, .get_link_ksettings = niu_get_link_ksettings, .set_link_ksettings = niu_set_link_ksettings, }; static int niu_ldg_assign_ldn(struct niu *np, struct niu_parent *parent, int ldg, int ldn) { if (ldg < NIU_LDG_MIN || ldg > NIU_LDG_MAX) return -EINVAL; if (ldn < 0 || ldn > LDN_MAX) return -EINVAL; parent->ldg_map[ldn] = ldg; if (np->parent->plat_type == PLAT_TYPE_NIU) { /* On N2 NIU, the ldn-->ldg assignments are setup and fixed by * the firmware, and we're not supposed to change them. * Validate the mapping, because if it's wrong we probably * won't get any interrupts and that's painful to debug. */ if (nr64(LDG_NUM(ldn)) != ldg) { dev_err(np->device, "Port %u, mismatched LDG assignment for ldn %d, should be %d is %llu\n", np->port, ldn, ldg, (unsigned long long) nr64(LDG_NUM(ldn))); return -EINVAL; } } else nw64(LDG_NUM(ldn), ldg); return 0; } static int niu_set_ldg_timer_res(struct niu *np, int res) { if (res < 0 || res > LDG_TIMER_RES_VAL) return -EINVAL; nw64(LDG_TIMER_RES, res); return 0; } static int niu_set_ldg_sid(struct niu *np, int ldg, int func, int vector) { if ((ldg < NIU_LDG_MIN || ldg > NIU_LDG_MAX) || (func < 0 || func > 3) || (vector < 0 || vector > 0x1f)) return -EINVAL; nw64(SID(ldg), (func << SID_FUNC_SHIFT) | vector); return 0; } static int niu_pci_eeprom_read(struct niu *np, u32 addr) { u64 frame, frame_base = (ESPC_PIO_STAT_READ_START | (addr << ESPC_PIO_STAT_ADDR_SHIFT)); int limit; if (addr > (ESPC_PIO_STAT_ADDR >> ESPC_PIO_STAT_ADDR_SHIFT)) return -EINVAL; frame = frame_base; nw64(ESPC_PIO_STAT, frame); limit = 64; do { udelay(5); frame = nr64(ESPC_PIO_STAT); if (frame & ESPC_PIO_STAT_READ_END) break; } while (limit--); if (!(frame & ESPC_PIO_STAT_READ_END)) { dev_err(np->device, "EEPROM read timeout frame[%llx]\n", (unsigned long long) frame); return -ENODEV; } frame = frame_base; nw64(ESPC_PIO_STAT, frame); limit = 64; do { udelay(5); frame = nr64(ESPC_PIO_STAT); if (frame & ESPC_PIO_STAT_READ_END) break; } while (limit--); if (!(frame & ESPC_PIO_STAT_READ_END)) { dev_err(np->device, "EEPROM read timeout frame[%llx]\n", (unsigned long long) frame); return -ENODEV; } frame = nr64(ESPC_PIO_STAT); return (frame & ESPC_PIO_STAT_DATA) >> ESPC_PIO_STAT_DATA_SHIFT; } static int niu_pci_eeprom_read16(struct niu *np, u32 off) { int err = niu_pci_eeprom_read(np, off); u16 val; if (err < 0) return err; val = (err << 8); err = niu_pci_eeprom_read(np, off + 1); if (err < 0) return err; val |= (err & 0xff); return val; } static int niu_pci_eeprom_read16_swp(struct niu *np, u32 off) { int err = niu_pci_eeprom_read(np, off); u16 val; if (err < 0) return err; val = (err & 0xff); err = niu_pci_eeprom_read(np, off + 1); if (err < 0) return err; val |= (err & 0xff) << 8; return val; } static int niu_pci_vpd_get_propname(struct niu *np, u32 off, char *namebuf, int namebuf_len) { int i; for (i = 0; i < namebuf_len; i++) { int err = niu_pci_eeprom_read(np, off + i); if (err < 0) return err; *namebuf++ = err; if (!err) break; } if (i >= namebuf_len) return -EINVAL; return i + 1; } static void niu_vpd_parse_version(struct niu *np) { struct niu_vpd *vpd = &np->vpd; int len = strlen(vpd->version) + 1; const char *s = vpd->version; int i; for (i = 0; i < len - 5; i++) { if (!strncmp(s + i, "FCode ", 6)) break; } if (i >= len - 5) return; s += i + 5; sscanf(s, "%d.%d", &vpd->fcode_major, &vpd->fcode_minor); netif_printk(np, probe, KERN_DEBUG, np->dev, "VPD_SCAN: FCODE major(%d) minor(%d)\n", vpd->fcode_major, vpd->fcode_minor); if (vpd->fcode_major > NIU_VPD_MIN_MAJOR || (vpd->fcode_major == NIU_VPD_MIN_MAJOR && vpd->fcode_minor >= NIU_VPD_MIN_MINOR)) np->flags |= NIU_FLAGS_VPD_VALID; } /* ESPC_PIO_EN_ENABLE must be set */ static int niu_pci_vpd_scan_props(struct niu *np, u32 start, u32 end) { unsigned int found_mask = 0; #define FOUND_MASK_MODEL 0x00000001 #define FOUND_MASK_BMODEL 0x00000002 #define FOUND_MASK_VERS 0x00000004 #define FOUND_MASK_MAC 0x00000008 #define FOUND_MASK_NMAC 0x00000010 #define FOUND_MASK_PHY 0x00000020 #define FOUND_MASK_ALL 0x0000003f netif_printk(np, probe, KERN_DEBUG, np->dev, "VPD_SCAN: start[%x] end[%x]\n", start, end); while (start < end) { int len, err, prop_len; char namebuf[64]; u8 *prop_buf; int max_len; if (found_mask == FOUND_MASK_ALL) { niu_vpd_parse_version(np); return 1; } err = niu_pci_eeprom_read(np, start + 2); if (err < 0) return err; len = err; start += 3; prop_len = niu_pci_eeprom_read(np, start + 4); if (prop_len < 0) return prop_len; err = niu_pci_vpd_get_propname(np, start + 5, namebuf, 64); if (err < 0) return err; prop_buf = NULL; max_len = 0; if (!strcmp(namebuf, "model")) { prop_buf = np->vpd.model; max_len = NIU_VPD_MODEL_MAX; found_mask |= FOUND_MASK_MODEL; } else if (!strcmp(namebuf, "board-model")) { prop_buf = np->vpd.board_model; max_len = NIU_VPD_BD_MODEL_MAX; found_mask |= FOUND_MASK_BMODEL; } else if (!strcmp(namebuf, "version")) { prop_buf = np->vpd.version; max_len = NIU_VPD_VERSION_MAX; found_mask |= FOUND_MASK_VERS; } else if (!strcmp(namebuf, "local-mac-address")) { prop_buf = np->vpd.local_mac; max_len = ETH_ALEN; found_mask |= FOUND_MASK_MAC; } else if (!strcmp(namebuf, "num-mac-addresses")) { prop_buf = &np->vpd.mac_num; max_len = 1; found_mask |= FOUND_MASK_NMAC; } else if (!strcmp(namebuf, "phy-type")) { prop_buf = np->vpd.phy_type; max_len = NIU_VPD_PHY_TYPE_MAX; found_mask |= FOUND_MASK_PHY; } if (max_len && prop_len > max_len) { dev_err(np->device, "Property '%s' length (%d) is too long\n", namebuf, prop_len); return -EINVAL; } if (prop_buf) { u32 off = start + 5 + err; int i; netif_printk(np, probe, KERN_DEBUG, np->dev, "VPD_SCAN: Reading in property [%s] len[%d]\n", namebuf, prop_len); for (i = 0; i < prop_len; i++) { err = niu_pci_eeprom_read(np, off + i); if (err < 0) return err; *prop_buf++ = err; } } start += len; } return 0; } /* ESPC_PIO_EN_ENABLE must be set */ static int niu_pci_vpd_fetch(struct niu *np, u32 start) { u32 offset; int err; err = niu_pci_eeprom_read16_swp(np, start + 1); if (err < 0) return err; offset = err + 3; while (start + offset < ESPC_EEPROM_SIZE) { u32 here = start + offset; u32 end; err = niu_pci_eeprom_read(np, here); if (err < 0) return err; if (err != 0x90) return -EINVAL; err = niu_pci_eeprom_read16_swp(np, here + 1); if (err < 0) return err; here = start + offset + 3; end = start + offset + err; offset += err; err = niu_pci_vpd_scan_props(np, here, end); if (err < 0) return err; /* ret == 1 is not an error */ if (err == 1) return 0; } return 0; } /* ESPC_PIO_EN_ENABLE must be set */ static u32 niu_pci_vpd_offset(struct niu *np) { u32 start = 0, end = ESPC_EEPROM_SIZE, ret; int err; while (start < end) { ret = start; /* ROM header signature? */ err = niu_pci_eeprom_read16(np, start + 0); if (err != 0x55aa) return 0; /* Apply offset to PCI data structure. */ err = niu_pci_eeprom_read16(np, start + 23); if (err < 0) return 0; start += err; /* Check for "PCIR" signature. */ err = niu_pci_eeprom_read16(np, start + 0); if (err != 0x5043) return 0; err = niu_pci_eeprom_read16(np, start + 2); if (err != 0x4952) return 0; /* Check for OBP image type. */ err = niu_pci_eeprom_read(np, start + 20); if (err < 0) return 0; if (err != 0x01) { err = niu_pci_eeprom_read(np, ret + 2); if (err < 0) return 0; start = ret + (err * 512); continue; } err = niu_pci_eeprom_read16_swp(np, start + 8); if (err < 0) return err; ret += err; err = niu_pci_eeprom_read(np, ret + 0); if (err != 0x82) return 0; return ret; } return 0; } static int niu_phy_type_prop_decode(struct niu *np, const char *phy_prop) { if (!strcmp(phy_prop, "mif")) { /* 1G copper, MII */ np->flags &= ~(NIU_FLAGS_FIBER | NIU_FLAGS_10G); np->mac_xcvr = MAC_XCVR_MII; } else if (!strcmp(phy_prop, "xgf")) { /* 10G fiber, XPCS */ np->flags |= (NIU_FLAGS_10G | NIU_FLAGS_FIBER); np->mac_xcvr = MAC_XCVR_XPCS; } else if (!strcmp(phy_prop, "pcs")) { /* 1G fiber, PCS */ np->flags &= ~NIU_FLAGS_10G; np->flags |= NIU_FLAGS_FIBER; np->mac_xcvr = MAC_XCVR_PCS; } else if (!strcmp(phy_prop, "xgc")) { /* 10G copper, XPCS */ np->flags |= NIU_FLAGS_10G; np->flags &= ~NIU_FLAGS_FIBER; np->mac_xcvr = MAC_XCVR_XPCS; } else if (!strcmp(phy_prop, "xgsd") || !strcmp(phy_prop, "gsd")) { /* 10G Serdes or 1G Serdes, default to 10G */ np->flags |= NIU_FLAGS_10G; np->flags &= ~NIU_FLAGS_FIBER; np->flags |= NIU_FLAGS_XCVR_SERDES; np->mac_xcvr = MAC_XCVR_XPCS; } else { return -EINVAL; } return 0; } static int niu_pci_vpd_get_nports(struct niu *np) { int ports = 0; if ((!strcmp(np->vpd.model, NIU_QGC_LP_MDL_STR)) || (!strcmp(np->vpd.model, NIU_QGC_PEM_MDL_STR)) || (!strcmp(np->vpd.model, NIU_MARAMBA_MDL_STR)) || (!strcmp(np->vpd.model, NIU_KIMI_MDL_STR)) || (!strcmp(np->vpd.model, NIU_ALONSO_MDL_STR))) { ports = 4; } else if ((!strcmp(np->vpd.model, NIU_2XGF_LP_MDL_STR)) || (!strcmp(np->vpd.model, NIU_2XGF_PEM_MDL_STR)) || (!strcmp(np->vpd.model, NIU_FOXXY_MDL_STR)) || (!strcmp(np->vpd.model, NIU_2XGF_MRVL_MDL_STR))) { ports = 2; } return ports; } static void niu_pci_vpd_validate(struct niu *np) { struct net_device *dev = np->dev; struct niu_vpd *vpd = &np->vpd; u8 addr[ETH_ALEN]; u8 val8; if (!is_valid_ether_addr(&vpd->local_mac[0])) { dev_err(np->device, "VPD MAC invalid, falling back to SPROM\n"); np->flags &= ~NIU_FLAGS_VPD_VALID; return; } if (!strcmp(np->vpd.model, NIU_ALONSO_MDL_STR) || !strcmp(np->vpd.model, NIU_KIMI_MDL_STR)) { np->flags |= NIU_FLAGS_10G; np->flags &= ~NIU_FLAGS_FIBER; np->flags |= NIU_FLAGS_XCVR_SERDES; np->mac_xcvr = MAC_XCVR_PCS; if (np->port > 1) { np->flags |= NIU_FLAGS_FIBER; np->flags &= ~NIU_FLAGS_10G; } if (np->flags & NIU_FLAGS_10G) np->mac_xcvr = MAC_XCVR_XPCS; } else if (!strcmp(np->vpd.model, NIU_FOXXY_MDL_STR)) { np->flags |= (NIU_FLAGS_10G | NIU_FLAGS_FIBER | NIU_FLAGS_HOTPLUG_PHY); } else if (niu_phy_type_prop_decode(np, np->vpd.phy_type)) { dev_err(np->device, "Illegal phy string [%s]\n", np->vpd.phy_type); dev_err(np->device, "Falling back to SPROM\n"); np->flags &= ~NIU_FLAGS_VPD_VALID; return; } ether_addr_copy(addr, vpd->local_mac); val8 = addr[5]; addr[5] += np->port; if (addr[5] < val8) addr[4]++; eth_hw_addr_set(dev, addr); } static int niu_pci_probe_sprom(struct niu *np) { struct net_device *dev = np->dev; u8 addr[ETH_ALEN]; int len, i; u64 val, sum; u8 val8; val = (nr64(ESPC_VER_IMGSZ) & ESPC_VER_IMGSZ_IMGSZ); val >>= ESPC_VER_IMGSZ_IMGSZ_SHIFT; len = val / 4; np->eeprom_len = len; netif_printk(np, probe, KERN_DEBUG, np->dev, "SPROM: Image size %llu\n", (unsigned long long)val); sum = 0; for (i = 0; i < len; i++) { val = nr64(ESPC_NCR(i)); sum += (val >> 0) & 0xff; sum += (val >> 8) & 0xff; sum += (val >> 16) & 0xff; sum += (val >> 24) & 0xff; } netif_printk(np, probe, KERN_DEBUG, np->dev, "SPROM: Checksum %x\n", (int)(sum & 0xff)); if ((sum & 0xff) != 0xab) { dev_err(np->device, "Bad SPROM checksum (%x, should be 0xab)\n", (int)(sum & 0xff)); return -EINVAL; } val = nr64(ESPC_PHY_TYPE); switch (np->port) { case 0: val8 = (val & ESPC_PHY_TYPE_PORT0) >> ESPC_PHY_TYPE_PORT0_SHIFT; break; case 1: val8 = (val & ESPC_PHY_TYPE_PORT1) >> ESPC_PHY_TYPE_PORT1_SHIFT; break; case 2: val8 = (val & ESPC_PHY_TYPE_PORT2) >> ESPC_PHY_TYPE_PORT2_SHIFT; break; case 3: val8 = (val & ESPC_PHY_TYPE_PORT3) >> ESPC_PHY_TYPE_PORT3_SHIFT; break; default: dev_err(np->device, "Bogus port number %u\n", np->port); return -EINVAL; } netif_printk(np, probe, KERN_DEBUG, np->dev, "SPROM: PHY type %x\n", val8); switch (val8) { case ESPC_PHY_TYPE_1G_COPPER: /* 1G copper, MII */ np->flags &= ~(NIU_FLAGS_FIBER | NIU_FLAGS_10G); np->mac_xcvr = MAC_XCVR_MII; break; case ESPC_PHY_TYPE_1G_FIBER: /* 1G fiber, PCS */ np->flags &= ~NIU_FLAGS_10G; np->flags |= NIU_FLAGS_FIBER; np->mac_xcvr = MAC_XCVR_PCS; break; case ESPC_PHY_TYPE_10G_COPPER: /* 10G copper, XPCS */ np->flags |= NIU_FLAGS_10G; np->flags &= ~NIU_FLAGS_FIBER; np->mac_xcvr = MAC_XCVR_XPCS; break; case ESPC_PHY_TYPE_10G_FIBER: /* 10G fiber, XPCS */ np->flags |= (NIU_FLAGS_10G | NIU_FLAGS_FIBER); np->mac_xcvr = MAC_XCVR_XPCS; break; default: dev_err(np->device, "Bogus SPROM phy type %u\n", val8); return -EINVAL; } val = nr64(ESPC_MAC_ADDR0); netif_printk(np, probe, KERN_DEBUG, np->dev, "SPROM: MAC_ADDR0[%08llx]\n", (unsigned long long)val); addr[0] = (val >> 0) & 0xff; addr[1] = (val >> 8) & 0xff; addr[2] = (val >> 16) & 0xff; addr[3] = (val >> 24) & 0xff; val = nr64(ESPC_MAC_ADDR1); netif_printk(np, probe, KERN_DEBUG, np->dev, "SPROM: MAC_ADDR1[%08llx]\n", (unsigned long long)val); addr[4] = (val >> 0) & 0xff; addr[5] = (val >> 8) & 0xff; if (!is_valid_ether_addr(addr)) { dev_err(np->device, "SPROM MAC address invalid [ %pM ]\n", addr); return -EINVAL; } val8 = addr[5]; addr[5] += np->port; if (addr[5] < val8) addr[4]++; eth_hw_addr_set(dev, addr); val = nr64(ESPC_MOD_STR_LEN); netif_printk(np, probe, KERN_DEBUG, np->dev, "SPROM: MOD_STR_LEN[%llu]\n", (unsigned long long)val); if (val >= 8 * 4) return -EINVAL; for (i = 0; i < val; i += 4) { u64 tmp = nr64(ESPC_NCR(5 + (i / 4))); np->vpd.model[i + 3] = (tmp >> 0) & 0xff; np->vpd.model[i + 2] = (tmp >> 8) & 0xff; np->vpd.model[i + 1] = (tmp >> 16) & 0xff; np->vpd.model[i + 0] = (tmp >> 24) & 0xff; } np->vpd.model[val] = '\0'; val = nr64(ESPC_BD_MOD_STR_LEN); netif_printk(np, probe, KERN_DEBUG, np->dev, "SPROM: BD_MOD_STR_LEN[%llu]\n", (unsigned long long)val); if (val >= 4 * 4) return -EINVAL; for (i = 0; i < val; i += 4) { u64 tmp = nr64(ESPC_NCR(14 + (i / 4))); np->vpd.board_model[i + 3] = (tmp >> 0) & 0xff; np->vpd.board_model[i + 2] = (tmp >> 8) & 0xff; np->vpd.board_model[i + 1] = (tmp >> 16) & 0xff; np->vpd.board_model[i + 0] = (tmp >> 24) & 0xff; } np->vpd.board_model[val] = '\0'; np->vpd.mac_num = nr64(ESPC_NUM_PORTS_MACS) & ESPC_NUM_PORTS_MACS_VAL; netif_printk(np, probe, KERN_DEBUG, np->dev, "SPROM: NUM_PORTS_MACS[%d]\n", np->vpd.mac_num); return 0; } static int niu_get_and_validate_port(struct niu *np) { struct niu_parent *parent = np->parent; if (np->port <= 1) np->flags |= NIU_FLAGS_XMAC; if (!parent->num_ports) { if (parent->plat_type == PLAT_TYPE_NIU) { parent->num_ports = 2; } else { parent->num_ports = niu_pci_vpd_get_nports(np); if (!parent->num_ports) { /* Fall back to SPROM as last resort. * This will fail on most cards. */ parent->num_ports = nr64(ESPC_NUM_PORTS_MACS) & ESPC_NUM_PORTS_MACS_VAL; /* All of the current probing methods fail on * Maramba on-board parts. */ if (!parent->num_ports) parent->num_ports = 4; } } } if (np->port >= parent->num_ports) return -ENODEV; return 0; } static int phy_record(struct niu_parent *parent, struct phy_probe_info *p, int dev_id_1, int dev_id_2, u8 phy_port, int type) { u32 id = (dev_id_1 << 16) | dev_id_2; u8 idx; if (dev_id_1 < 0 || dev_id_2 < 0) return 0; if (type == PHY_TYPE_PMA_PMD || type == PHY_TYPE_PCS) { /* Because of the NIU_PHY_ID_MASK being applied, the 8704 * test covers the 8706 as well. */ if (((id & NIU_PHY_ID_MASK) != NIU_PHY_ID_BCM8704) && ((id & NIU_PHY_ID_MASK) != NIU_PHY_ID_MRVL88X2011)) return 0; } else { if ((id & NIU_PHY_ID_MASK) != NIU_PHY_ID_BCM5464R) return 0; } pr_info("niu%d: Found PHY %08x type %s at phy_port %u\n", parent->index, id, type == PHY_TYPE_PMA_PMD ? "PMA/PMD" : type == PHY_TYPE_PCS ? "PCS" : "MII", phy_port); if (p->cur[type] >= NIU_MAX_PORTS) { pr_err("Too many PHY ports\n"); return -EINVAL; } idx = p->cur[type]; p->phy_id[type][idx] = id; p->phy_port[type][idx] = phy_port; p->cur[type] = idx + 1; return 0; } static int port_has_10g(struct phy_probe_info *p, int port) { int i; for (i = 0; i < p->cur[PHY_TYPE_PMA_PMD]; i++) { if (p->phy_port[PHY_TYPE_PMA_PMD][i] == port) return 1; } for (i = 0; i < p->cur[PHY_TYPE_PCS]; i++) { if (p->phy_port[PHY_TYPE_PCS][i] == port) return 1; } return 0; } static int count_10g_ports(struct phy_probe_info *p, int *lowest) { int port, cnt; cnt = 0; *lowest = 32; for (port = 8; port < 32; port++) { if (port_has_10g(p, port)) { if (!cnt) *lowest = port; cnt++; } } return cnt; } static int count_1g_ports(struct phy_probe_info *p, int *lowest) { *lowest = 32; if (p->cur[PHY_TYPE_MII]) *lowest = p->phy_port[PHY_TYPE_MII][0]; return p->cur[PHY_TYPE_MII]; } static void niu_n2_divide_channels(struct niu_parent *parent) { int num_ports = parent->num_ports; int i; for (i = 0; i < num_ports; i++) { parent->rxchan_per_port[i] = (16 / num_ports); parent->txchan_per_port[i] = (16 / num_ports); pr_info("niu%d: Port %u [%u RX chans] [%u TX chans]\n", parent->index, i, parent->rxchan_per_port[i], parent->txchan_per_port[i]); } } static void niu_divide_channels(struct niu_parent *parent, int num_10g, int num_1g) { int num_ports = parent->num_ports; int rx_chans_per_10g, rx_chans_per_1g; int tx_chans_per_10g, tx_chans_per_1g; int i, tot_rx, tot_tx; if (!num_10g || !num_1g) { rx_chans_per_10g = rx_chans_per_1g = (NIU_NUM_RXCHAN / num_ports); tx_chans_per_10g = tx_chans_per_1g = (NIU_NUM_TXCHAN / num_ports); } else { rx_chans_per_1g = NIU_NUM_RXCHAN / 8; rx_chans_per_10g = (NIU_NUM_RXCHAN - (rx_chans_per_1g * num_1g)) / num_10g; tx_chans_per_1g = NIU_NUM_TXCHAN / 6; tx_chans_per_10g = (NIU_NUM_TXCHAN - (tx_chans_per_1g * num_1g)) / num_10g; } tot_rx = tot_tx = 0; for (i = 0; i < num_ports; i++) { int type = phy_decode(parent->port_phy, i); if (type == PORT_TYPE_10G) { parent->rxchan_per_port[i] = rx_chans_per_10g; parent->txchan_per_port[i] = tx_chans_per_10g; } else { parent->rxchan_per_port[i] = rx_chans_per_1g; parent->txchan_per_port[i] = tx_chans_per_1g; } pr_info("niu%d: Port %u [%u RX chans] [%u TX chans]\n", parent->index, i, parent->rxchan_per_port[i], parent->txchan_per_port[i]); tot_rx += parent->rxchan_per_port[i]; tot_tx += parent->txchan_per_port[i]; } if (tot_rx > NIU_NUM_RXCHAN) { pr_err("niu%d: Too many RX channels (%d), resetting to one per port\n", parent->index, tot_rx); for (i = 0; i < num_ports; i++) parent->rxchan_per_port[i] = 1; } if (tot_tx > NIU_NUM_TXCHAN) { pr_err("niu%d: Too many TX channels (%d), resetting to one per port\n", parent->index, tot_tx); for (i = 0; i < num_ports; i++) parent->txchan_per_port[i] = 1; } if (tot_rx < NIU_NUM_RXCHAN || tot_tx < NIU_NUM_TXCHAN) { pr_warn("niu%d: Driver bug, wasted channels, RX[%d] TX[%d]\n", parent->index, tot_rx, tot_tx); } } static void niu_divide_rdc_groups(struct niu_parent *parent, int num_10g, int num_1g) { int i, num_ports = parent->num_ports; int rdc_group, rdc_groups_per_port; int rdc_channel_base; rdc_group = 0; rdc_groups_per_port = NIU_NUM_RDC_TABLES / num_ports; rdc_channel_base = 0; for (i = 0; i < num_ports; i++) { struct niu_rdc_tables *tp = &parent->rdc_group_cfg[i]; int grp, num_channels = parent->rxchan_per_port[i]; int this_channel_offset; tp->first_table_num = rdc_group; tp->num_tables = rdc_groups_per_port; this_channel_offset = 0; for (grp = 0; grp < tp->num_tables; grp++) { struct rdc_table *rt = &tp->tables[grp]; int slot; pr_info("niu%d: Port %d RDC tbl(%d) [ ", parent->index, i, tp->first_table_num + grp); for (slot = 0; slot < NIU_RDC_TABLE_SLOTS; slot++) { rt->rxdma_channel[slot] = rdc_channel_base + this_channel_offset; pr_cont("%d ", rt->rxdma_channel[slot]); if (++this_channel_offset == num_channels) this_channel_offset = 0; } pr_cont("]\n"); } parent->rdc_default[i] = rdc_channel_base; rdc_channel_base += num_channels; rdc_group += rdc_groups_per_port; } } static int fill_phy_probe_info(struct niu *np, struct niu_parent *parent, struct phy_probe_info *info) { unsigned long flags; int port, err; memset(info, 0, sizeof(*info)); /* Port 0 to 7 are reserved for onboard Serdes, probe the rest. */ niu_lock_parent(np, flags); err = 0; for (port = 8; port < 32; port++) { int dev_id_1, dev_id_2; dev_id_1 = mdio_read(np, port, NIU_PMA_PMD_DEV_ADDR, MII_PHYSID1); dev_id_2 = mdio_read(np, port, NIU_PMA_PMD_DEV_ADDR, MII_PHYSID2); err = phy_record(parent, info, dev_id_1, dev_id_2, port, PHY_TYPE_PMA_PMD); if (err) break; dev_id_1 = mdio_read(np, port, NIU_PCS_DEV_ADDR, MII_PHYSID1); dev_id_2 = mdio_read(np, port, NIU_PCS_DEV_ADDR, MII_PHYSID2); err = phy_record(parent, info, dev_id_1, dev_id_2, port, PHY_TYPE_PCS); if (err) break; dev_id_1 = mii_read(np, port, MII_PHYSID1); dev_id_2 = mii_read(np, port, MII_PHYSID2); err = phy_record(parent, info, dev_id_1, dev_id_2, port, PHY_TYPE_MII); if (err) break; } niu_unlock_parent(np, flags); return err; } static int walk_phys(struct niu *np, struct niu_parent *parent) { struct phy_probe_info *info = &parent->phy_probe_info; int lowest_10g, lowest_1g; int num_10g, num_1g; u32 val; int err; num_10g = num_1g = 0; if (!strcmp(np->vpd.model, NIU_ALONSO_MDL_STR) || !strcmp(np->vpd.model, NIU_KIMI_MDL_STR)) { num_10g = 0; num_1g = 2; parent->plat_type = PLAT_TYPE_ATCA_CP3220; parent->num_ports = 4; val = (phy_encode(PORT_TYPE_1G, 0) | phy_encode(PORT_TYPE_1G, 1) | phy_encode(PORT_TYPE_1G, 2) | phy_encode(PORT_TYPE_1G, 3)); } else if (!strcmp(np->vpd.model, NIU_FOXXY_MDL_STR)) { num_10g = 2; num_1g = 0; parent->num_ports = 2; val = (phy_encode(PORT_TYPE_10G, 0) | phy_encode(PORT_TYPE_10G, 1)); } else if ((np->flags & NIU_FLAGS_XCVR_SERDES) && (parent->plat_type == PLAT_TYPE_NIU)) { /* this is the Monza case */ if (np->flags & NIU_FLAGS_10G) { val = (phy_encode(PORT_TYPE_10G, 0) | phy_encode(PORT_TYPE_10G, 1)); } else { val = (phy_encode(PORT_TYPE_1G, 0) | phy_encode(PORT_TYPE_1G, 1)); } } else { err = fill_phy_probe_info(np, parent, info); if (err) return err; num_10g = count_10g_ports(info, &lowest_10g); num_1g = count_1g_ports(info, &lowest_1g); switch ((num_10g << 4) | num_1g) { case 0x24: if (lowest_1g == 10) parent->plat_type = PLAT_TYPE_VF_P0; else if (lowest_1g == 26) parent->plat_type = PLAT_TYPE_VF_P1; else goto unknown_vg_1g_port; fallthrough; case 0x22: val = (phy_encode(PORT_TYPE_10G, 0) | phy_encode(PORT_TYPE_10G, 1) | phy_encode(PORT_TYPE_1G, 2) | phy_encode(PORT_TYPE_1G, 3)); break; case 0x20: val = (phy_encode(PORT_TYPE_10G, 0) | phy_encode(PORT_TYPE_10G, 1)); break; case 0x10: val = phy_encode(PORT_TYPE_10G, np->port); break; case 0x14: if (lowest_1g == 10) parent->plat_type = PLAT_TYPE_VF_P0; else if (lowest_1g == 26) parent->plat_type = PLAT_TYPE_VF_P1; else goto unknown_vg_1g_port; fallthrough; case 0x13: if ((lowest_10g & 0x7) == 0) val = (phy_encode(PORT_TYPE_10G, 0) | phy_encode(PORT_TYPE_1G, 1) | phy_encode(PORT_TYPE_1G, 2) | phy_encode(PORT_TYPE_1G, 3)); else val = (phy_encode(PORT_TYPE_1G, 0) | phy_encode(PORT_TYPE_10G, 1) | phy_encode(PORT_TYPE_1G, 2) | phy_encode(PORT_TYPE_1G, 3)); break; case 0x04: if (lowest_1g == 10) parent->plat_type = PLAT_TYPE_VF_P0; else if (lowest_1g == 26) parent->plat_type = PLAT_TYPE_VF_P1; else goto unknown_vg_1g_port; val = (phy_encode(PORT_TYPE_1G, 0) | phy_encode(PORT_TYPE_1G, 1) | phy_encode(PORT_TYPE_1G, 2) | phy_encode(PORT_TYPE_1G, 3)); break; default: pr_err("Unsupported port config 10G[%d] 1G[%d]\n", num_10g, num_1g); return -EINVAL; } } parent->port_phy = val; if (parent->plat_type == PLAT_TYPE_NIU) niu_n2_divide_channels(parent); else niu_divide_channels(parent, num_10g, num_1g); niu_divide_rdc_groups(parent, num_10g, num_1g); return 0; unknown_vg_1g_port: pr_err("Cannot identify platform type, 1gport=%d\n", lowest_1g); return -EINVAL; } static int niu_probe_ports(struct niu *np) { struct niu_parent *parent = np->parent; int err, i; if (parent->port_phy == PORT_PHY_UNKNOWN) { err = walk_phys(np, parent); if (err) return err; niu_set_ldg_timer_res(np, 2); for (i = 0; i <= LDN_MAX; i++) niu_ldn_irq_enable(np, i, 0); } if (parent->port_phy == PORT_PHY_INVALID) return -EINVAL; return 0; } static int niu_classifier_swstate_init(struct niu *np) { struct niu_classifier *cp = &np->clas; cp->tcam_top = (u16) np->port; cp->tcam_sz = np->parent->tcam_num_entries / np->parent->num_ports; cp->h1_init = 0xffffffff; cp->h2_init = 0xffff; return fflp_early_init(np); } static void niu_link_config_init(struct niu *np) { struct niu_link_config *lp = &np->link_config; lp->advertising = (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full | ADVERTISED_10000baseT_Full | ADVERTISED_Autoneg); lp->speed = lp->active_speed = SPEED_INVALID; lp->duplex = DUPLEX_FULL; lp->active_duplex = DUPLEX_INVALID; lp->autoneg = 1; #if 0 lp->loopback_mode = LOOPBACK_MAC; lp->active_speed = SPEED_10000; lp->active_duplex = DUPLEX_FULL; #else lp->loopback_mode = LOOPBACK_DISABLED; #endif } static int niu_init_mac_ipp_pcs_base(struct niu *np) { switch (np->port) { case 0: np->mac_regs = np->regs + XMAC_PORT0_OFF; np->ipp_off = 0x00000; np->pcs_off = 0x04000; np->xpcs_off = 0x02000; break; case 1: np->mac_regs = np->regs + XMAC_PORT1_OFF; np->ipp_off = 0x08000; np->pcs_off = 0x0a000; np->xpcs_off = 0x08000; break; case 2: np->mac_regs = np->regs + BMAC_PORT2_OFF; np->ipp_off = 0x04000; np->pcs_off = 0x0e000; np->xpcs_off = ~0UL; break; case 3: np->mac_regs = np->regs + BMAC_PORT3_OFF; np->ipp_off = 0x0c000; np->pcs_off = 0x12000; np->xpcs_off = ~0UL; break; default: dev_err(np->device, "Port %u is invalid, cannot compute MAC block offset\n", np->port); return -EINVAL; } return 0; } static void niu_try_msix(struct niu *np, u8 *ldg_num_map) { struct msix_entry msi_vec[NIU_NUM_LDG]; struct niu_parent *parent = np->parent; struct pci_dev *pdev = np->pdev; int i, num_irqs; u8 first_ldg; first_ldg = (NIU_NUM_LDG / parent->num_ports) * np->port; for (i = 0; i < (NIU_NUM_LDG / parent->num_ports); i++) ldg_num_map[i] = first_ldg + i; num_irqs = (parent->rxchan_per_port[np->port] + parent->txchan_per_port[np->port] + (np->port == 0 ? 3 : 1)); BUG_ON(num_irqs > (NIU_NUM_LDG / parent->num_ports)); for (i = 0; i < num_irqs; i++) { msi_vec[i].vector = 0; msi_vec[i].entry = i; } num_irqs = pci_enable_msix_range(pdev, msi_vec, 1, num_irqs); if (num_irqs < 0) { np->flags &= ~NIU_FLAGS_MSIX; return; } np->flags |= NIU_FLAGS_MSIX; for (i = 0; i < num_irqs; i++) np->ldg[i].irq = msi_vec[i].vector; np->num_ldg = num_irqs; } static int niu_n2_irq_init(struct niu *np, u8 *ldg_num_map) { #ifdef CONFIG_SPARC64 struct platform_device *op = np->op; const u32 *int_prop; int i; int_prop = of_get_property(op->dev.of_node, "interrupts", NULL); if (!int_prop) return -ENODEV; for (i = 0; i < op->archdata.num_irqs; i++) { ldg_num_map[i] = int_prop[i]; np->ldg[i].irq = op->archdata.irqs[i]; } np->num_ldg = op->archdata.num_irqs; return 0; #else return -EINVAL; #endif } static int niu_ldg_init(struct niu *np) { struct niu_parent *parent = np->parent; u8 ldg_num_map[NIU_NUM_LDG]; int first_chan, num_chan; int i, err, ldg_rotor; u8 port; np->num_ldg = 1; np->ldg[0].irq = np->dev->irq; if (parent->plat_type == PLAT_TYPE_NIU) { err = niu_n2_irq_init(np, ldg_num_map); if (err) return err; } else niu_try_msix(np, ldg_num_map); port = np->port; for (i = 0; i < np->num_ldg; i++) { struct niu_ldg *lp = &np->ldg[i]; netif_napi_add(np->dev, &lp->napi, niu_poll); lp->np = np; lp->ldg_num = ldg_num_map[i]; lp->timer = 2; /* XXX */ /* On N2 NIU the firmware has setup the SID mappings so they go * to the correct values that will route the LDG to the proper * interrupt in the NCU interrupt table. */ if (np->parent->plat_type != PLAT_TYPE_NIU) { err = niu_set_ldg_sid(np, lp->ldg_num, port, i); if (err) return err; } } /* We adopt the LDG assignment ordering used by the N2 NIU * 'interrupt' properties because that simplifies a lot of * things. This ordering is: * * MAC * MIF (if port zero) * SYSERR (if port zero) * RX channels * TX channels */ ldg_rotor = 0; err = niu_ldg_assign_ldn(np, parent, ldg_num_map[ldg_rotor], LDN_MAC(port)); if (err) return err; ldg_rotor++; if (ldg_rotor == np->num_ldg) ldg_rotor = 0; if (port == 0) { err = niu_ldg_assign_ldn(np, parent, ldg_num_map[ldg_rotor], LDN_MIF); if (err) return err; ldg_rotor++; if (ldg_rotor == np->num_ldg) ldg_rotor = 0; err = niu_ldg_assign_ldn(np, parent, ldg_num_map[ldg_rotor], LDN_DEVICE_ERROR); if (err) return err; ldg_rotor++; if (ldg_rotor == np->num_ldg) ldg_rotor = 0; } first_chan = 0; for (i = 0; i < port; i++) first_chan += parent->rxchan_per_port[i]; num_chan = parent->rxchan_per_port[port]; for (i = first_chan; i < (first_chan + num_chan); i++) { err = niu_ldg_assign_ldn(np, parent, ldg_num_map[ldg_rotor], LDN_RXDMA(i)); if (err) return err; ldg_rotor++; if (ldg_rotor == np->num_ldg) ldg_rotor = 0; } first_chan = 0; for (i = 0; i < port; i++) first_chan += parent->txchan_per_port[i]; num_chan = parent->txchan_per_port[port]; for (i = first_chan; i < (first_chan + num_chan); i++) { err = niu_ldg_assign_ldn(np, parent, ldg_num_map[ldg_rotor], LDN_TXDMA(i)); if (err) return err; ldg_rotor++; if (ldg_rotor == np->num_ldg) ldg_rotor = 0; } return 0; } static void niu_ldg_free(struct niu *np) { if (np->flags & NIU_FLAGS_MSIX) pci_disable_msix(np->pdev); } static int niu_get_of_props(struct niu *np) { #ifdef CONFIG_SPARC64 struct net_device *dev = np->dev; struct device_node *dp; const char *phy_type; const u8 *mac_addr; const char *model; int prop_len; if (np->parent->plat_type == PLAT_TYPE_NIU) dp = np->op->dev.of_node; else dp = pci_device_to_OF_node(np->pdev); phy_type = of_get_property(dp, "phy-type", NULL); if (!phy_type) { netdev_err(dev, "%pOF: OF node lacks phy-type property\n", dp); return -EINVAL; } if (!strcmp(phy_type, "none")) return -ENODEV; strcpy(np->vpd.phy_type, phy_type); if (niu_phy_type_prop_decode(np, np->vpd.phy_type)) { netdev_err(dev, "%pOF: Illegal phy string [%s]\n", dp, np->vpd.phy_type); return -EINVAL; } mac_addr = of_get_property(dp, "local-mac-address", &prop_len); if (!mac_addr) { netdev_err(dev, "%pOF: OF node lacks local-mac-address property\n", dp); return -EINVAL; } if (prop_len != dev->addr_len) { netdev_err(dev, "%pOF: OF MAC address prop len (%d) is wrong\n", dp, prop_len); } eth_hw_addr_set(dev, mac_addr); if (!is_valid_ether_addr(&dev->dev_addr[0])) { netdev_err(dev, "%pOF: OF MAC address is invalid\n", dp); netdev_err(dev, "%pOF: [ %pM ]\n", dp, dev->dev_addr); return -EINVAL; } model = of_get_property(dp, "model", NULL); if (model) strcpy(np->vpd.model, model); if (of_property_read_bool(dp, "hot-swappable-phy")) { np->flags |= (NIU_FLAGS_10G | NIU_FLAGS_FIBER | NIU_FLAGS_HOTPLUG_PHY); } return 0; #else return -EINVAL; #endif } static int niu_get_invariants(struct niu *np) { int err, have_props; u32 offset; err = niu_get_of_props(np); if (err == -ENODEV) return err; have_props = !err; err = niu_init_mac_ipp_pcs_base(np); if (err) return err; if (have_props) { err = niu_get_and_validate_port(np); if (err) return err; } else { if (np->parent->plat_type == PLAT_TYPE_NIU) return -EINVAL; nw64(ESPC_PIO_EN, ESPC_PIO_EN_ENABLE); offset = niu_pci_vpd_offset(np); netif_printk(np, probe, KERN_DEBUG, np->dev, "%s() VPD offset [%08x]\n", __func__, offset); if (offset) { err = niu_pci_vpd_fetch(np, offset); if (err < 0) return err; } nw64(ESPC_PIO_EN, 0); if (np->flags & NIU_FLAGS_VPD_VALID) { niu_pci_vpd_validate(np); err = niu_get_and_validate_port(np); if (err) return err; } if (!(np->flags & NIU_FLAGS_VPD_VALID)) { err = niu_get_and_validate_port(np); if (err) return err; err = niu_pci_probe_sprom(np); if (err) return err; } } err = niu_probe_ports(np); if (err) return err; niu_ldg_init(np); niu_classifier_swstate_init(np); niu_link_config_init(np); err = niu_determine_phy_disposition(np); if (!err) err = niu_init_link(np); return err; } static LIST_HEAD(niu_parent_list); static DEFINE_MUTEX(niu_parent_lock); static int niu_parent_index; static ssize_t show_port_phy(struct device *dev, struct device_attribute *attr, char *buf) { struct platform_device *plat_dev = to_platform_device(dev); struct niu_parent *p = dev_get_platdata(&plat_dev->dev); u32 port_phy = p->port_phy; char *orig_buf = buf; int i; if (port_phy == PORT_PHY_UNKNOWN || port_phy == PORT_PHY_INVALID) return 0; for (i = 0; i < p->num_ports; i++) { const char *type_str; int type; type = phy_decode(port_phy, i); if (type == PORT_TYPE_10G) type_str = "10G"; else type_str = "1G"; buf += sprintf(buf, (i == 0) ? "%s" : " %s", type_str); } buf += sprintf(buf, "\n"); return buf - orig_buf; } static ssize_t show_plat_type(struct device *dev, struct device_attribute *attr, char *buf) { struct platform_device *plat_dev = to_platform_device(dev); struct niu_parent *p = dev_get_platdata(&plat_dev->dev); const char *type_str; switch (p->plat_type) { case PLAT_TYPE_ATLAS: type_str = "atlas"; break; case PLAT_TYPE_NIU: type_str = "niu"; break; case PLAT_TYPE_VF_P0: type_str = "vf_p0"; break; case PLAT_TYPE_VF_P1: type_str = "vf_p1"; break; default: type_str = "unknown"; break; } return sprintf(buf, "%s\n", type_str); } static ssize_t __show_chan_per_port(struct device *dev, struct device_attribute *attr, char *buf, int rx) { struct platform_device *plat_dev = to_platform_device(dev); struct niu_parent *p = dev_get_platdata(&plat_dev->dev); char *orig_buf = buf; u8 *arr; int i; arr = (rx ? p->rxchan_per_port : p->txchan_per_port); for (i = 0; i < p->num_ports; i++) { buf += sprintf(buf, (i == 0) ? "%d" : " %d", arr[i]); } buf += sprintf(buf, "\n"); return buf - orig_buf; } static ssize_t show_rxchan_per_port(struct device *dev, struct device_attribute *attr, char *buf) { return __show_chan_per_port(dev, attr, buf, 1); } static ssize_t show_txchan_per_port(struct device *dev, struct device_attribute *attr, char *buf) { return __show_chan_per_port(dev, attr, buf, 1); } static ssize_t show_num_ports(struct device *dev, struct device_attribute *attr, char *buf) { struct platform_device *plat_dev = to_platform_device(dev); struct niu_parent *p = dev_get_platdata(&plat_dev->dev); return sprintf(buf, "%d\n", p->num_ports); } static struct device_attribute niu_parent_attributes[] = { __ATTR(port_phy, 0444, show_port_phy, NULL), __ATTR(plat_type, 0444, show_plat_type, NULL), __ATTR(rxchan_per_port, 0444, show_rxchan_per_port, NULL), __ATTR(txchan_per_port, 0444, show_txchan_per_port, NULL), __ATTR(num_ports, 0444, show_num_ports, NULL), {} }; static struct niu_parent *niu_new_parent(struct niu *np, union niu_parent_id *id, u8 ptype) { struct platform_device *plat_dev; struct niu_parent *p; int i; plat_dev = platform_device_register_simple("niu-board", niu_parent_index, NULL, 0); if (IS_ERR(plat_dev)) return NULL; for (i = 0; niu_parent_attributes[i].attr.name; i++) { int err = device_create_file(&plat_dev->dev, &niu_parent_attributes[i]); if (err) goto fail_unregister; } p = kzalloc(sizeof(*p), GFP_KERNEL); if (!p) goto fail_unregister; p->index = niu_parent_index++; plat_dev->dev.platform_data = p; p->plat_dev = plat_dev; memcpy(&p->id, id, sizeof(*id)); p->plat_type = ptype; INIT_LIST_HEAD(&p->list); atomic_set(&p->refcnt, 0); list_add(&p->list, &niu_parent_list); spin_lock_init(&p->lock); p->rxdma_clock_divider = 7500; p->tcam_num_entries = NIU_PCI_TCAM_ENTRIES; if (p->plat_type == PLAT_TYPE_NIU) p->tcam_num_entries = NIU_NONPCI_TCAM_ENTRIES; for (i = CLASS_CODE_USER_PROG1; i <= CLASS_CODE_SCTP_IPV6; i++) { int index = i - CLASS_CODE_USER_PROG1; p->tcam_key[index] = TCAM_KEY_TSEL; p->flow_key[index] = (FLOW_KEY_IPSA | FLOW_KEY_IPDA | FLOW_KEY_PROTO | (FLOW_KEY_L4_BYTE12 << FLOW_KEY_L4_0_SHIFT) | (FLOW_KEY_L4_BYTE12 << FLOW_KEY_L4_1_SHIFT)); } for (i = 0; i < LDN_MAX + 1; i++) p->ldg_map[i] = LDG_INVALID; return p; fail_unregister: platform_device_unregister(plat_dev); return NULL; } static struct niu_parent *niu_get_parent(struct niu *np, union niu_parent_id *id, u8 ptype) { struct niu_parent *p, *tmp; int port = np->port; mutex_lock(&niu_parent_lock); p = NULL; list_for_each_entry(tmp, &niu_parent_list, list) { if (!memcmp(id, &tmp->id, sizeof(*id))) { p = tmp; break; } } if (!p) p = niu_new_parent(np, id, ptype); if (p) { char port_name[8]; int err; sprintf(port_name, "port%d", port); err = sysfs_create_link(&p->plat_dev->dev.kobj, &np->device->kobj, port_name); if (!err) { p->ports[port] = np; atomic_inc(&p->refcnt); } } mutex_unlock(&niu_parent_lock); return p; } static void niu_put_parent(struct niu *np) { struct niu_parent *p = np->parent; u8 port = np->port; char port_name[8]; BUG_ON(!p || p->ports[port] != np); netif_printk(np, probe, KERN_DEBUG, np->dev, "%s() port[%u]\n", __func__, port); sprintf(port_name, "port%d", port); mutex_lock(&niu_parent_lock); sysfs_remove_link(&p->plat_dev->dev.kobj, port_name); p->ports[port] = NULL; np->parent = NULL; if (atomic_dec_and_test(&p->refcnt)) { list_del(&p->list); platform_device_unregister(p->plat_dev); } mutex_unlock(&niu_parent_lock); } static void *niu_pci_alloc_coherent(struct device *dev, size_t size, u64 *handle, gfp_t flag) { dma_addr_t dh; void *ret; ret = dma_alloc_coherent(dev, size, &dh, flag); if (ret) *handle = dh; return ret; } static void niu_pci_free_coherent(struct device *dev, size_t size, void *cpu_addr, u64 handle) { dma_free_coherent(dev, size, cpu_addr, handle); } static u64 niu_pci_map_page(struct device *dev, struct page *page, unsigned long offset, size_t size, enum dma_data_direction direction) { return dma_map_page(dev, page, offset, size, direction); } static void niu_pci_unmap_page(struct device *dev, u64 dma_address, size_t size, enum dma_data_direction direction) { dma_unmap_page(dev, dma_address, size, direction); } static u64 niu_pci_map_single(struct device *dev, void *cpu_addr, size_t size, enum dma_data_direction direction) { return dma_map_single(dev, cpu_addr, size, direction); } static void niu_pci_unmap_single(struct device *dev, u64 dma_address, size_t size, enum dma_data_direction direction) { dma_unmap_single(dev, dma_address, size, direction); } static const struct niu_ops niu_pci_ops = { .alloc_coherent = niu_pci_alloc_coherent, .free_coherent = niu_pci_free_coherent, .map_page = niu_pci_map_page, .unmap_page = niu_pci_unmap_page, .map_single = niu_pci_map_single, .unmap_single = niu_pci_unmap_single, }; static void niu_driver_version(void) { static int niu_version_printed; if (niu_version_printed++ == 0) pr_info("%s", version); } static struct net_device *niu_alloc_and_init(struct device *gen_dev, struct pci_dev *pdev, struct platform_device *op, const struct niu_ops *ops, u8 port) { struct net_device *dev; struct niu *np; dev = alloc_etherdev_mq(sizeof(struct niu), NIU_NUM_TXCHAN); if (!dev) return NULL; SET_NETDEV_DEV(dev, gen_dev); np = netdev_priv(dev); np->dev = dev; np->pdev = pdev; np->op = op; np->device = gen_dev; np->ops = ops; np->msg_enable = niu_debug; spin_lock_init(&np->lock); INIT_WORK(&np->reset_task, niu_reset_task); np->port = port; return dev; } static const struct net_device_ops niu_netdev_ops = { .ndo_open = niu_open, .ndo_stop = niu_close, .ndo_start_xmit = niu_start_xmit, .ndo_get_stats64 = niu_get_stats, .ndo_set_rx_mode = niu_set_rx_mode, .ndo_validate_addr = eth_validate_addr, .ndo_set_mac_address = niu_set_mac_addr, .ndo_eth_ioctl = niu_ioctl, .ndo_tx_timeout = niu_tx_timeout, .ndo_change_mtu = niu_change_mtu, }; static void niu_assign_netdev_ops(struct net_device *dev) { dev->netdev_ops = &niu_netdev_ops; dev->ethtool_ops = &niu_ethtool_ops; dev->watchdog_timeo = NIU_TX_TIMEOUT; } static void niu_device_announce(struct niu *np) { struct net_device *dev = np->dev; pr_info("%s: NIU Ethernet %pM\n", dev->name, dev->dev_addr); if (np->parent->plat_type == PLAT_TYPE_ATCA_CP3220) { pr_info("%s: Port type[%s] mode[%s:%s] XCVR[%s] phy[%s]\n", dev->name, (np->flags & NIU_FLAGS_XMAC ? "XMAC" : "BMAC"), (np->flags & NIU_FLAGS_10G ? "10G" : "1G"), (np->flags & NIU_FLAGS_FIBER ? "RGMII FIBER" : "SERDES"), (np->mac_xcvr == MAC_XCVR_MII ? "MII" : (np->mac_xcvr == MAC_XCVR_PCS ? "PCS" : "XPCS")), np->vpd.phy_type); } else { pr_info("%s: Port type[%s] mode[%s:%s] XCVR[%s] phy[%s]\n", dev->name, (np->flags & NIU_FLAGS_XMAC ? "XMAC" : "BMAC"), (np->flags & NIU_FLAGS_10G ? "10G" : "1G"), (np->flags & NIU_FLAGS_FIBER ? "FIBER" : (np->flags & NIU_FLAGS_XCVR_SERDES ? "SERDES" : "COPPER")), (np->mac_xcvr == MAC_XCVR_MII ? "MII" : (np->mac_xcvr == MAC_XCVR_PCS ? "PCS" : "XPCS")), np->vpd.phy_type); } } static void niu_set_basic_features(struct net_device *dev) { dev->hw_features = NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_RXHASH; dev->features |= dev->hw_features | NETIF_F_RXCSUM; } static int niu_pci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) { union niu_parent_id parent_id; struct net_device *dev; struct niu *np; int err; niu_driver_version(); err = pci_enable_device(pdev); if (err) { dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n"); return err; } if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM) || !(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) { dev_err(&pdev->dev, "Cannot find proper PCI device base addresses, aborting\n"); err = -ENODEV; goto err_out_disable_pdev; } err = pci_request_regions(pdev, DRV_MODULE_NAME); if (err) { dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n"); goto err_out_disable_pdev; } if (!pci_is_pcie(pdev)) { dev_err(&pdev->dev, "Cannot find PCI Express capability, aborting\n"); err = -ENODEV; goto err_out_free_res; } dev = niu_alloc_and_init(&pdev->dev, pdev, NULL, &niu_pci_ops, PCI_FUNC(pdev->devfn)); if (!dev) { err = -ENOMEM; goto err_out_free_res; } np = netdev_priv(dev); memset(&parent_id, 0, sizeof(parent_id)); parent_id.pci.domain = pci_domain_nr(pdev->bus); parent_id.pci.bus = pdev->bus->number; parent_id.pci.device = PCI_SLOT(pdev->devfn); np->parent = niu_get_parent(np, &parent_id, PLAT_TYPE_ATLAS); if (!np->parent) { err = -ENOMEM; goto err_out_free_dev; } pcie_capability_clear_and_set_word(pdev, PCI_EXP_DEVCTL, PCI_EXP_DEVCTL_NOSNOOP_EN, PCI_EXP_DEVCTL_CERE | PCI_EXP_DEVCTL_NFERE | PCI_EXP_DEVCTL_FERE | PCI_EXP_DEVCTL_URRE | PCI_EXP_DEVCTL_RELAX_EN); err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(44)); if (!err) dev->features |= NETIF_F_HIGHDMA; if (err) { err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)); if (err) { dev_err(&pdev->dev, "No usable DMA configuration, aborting\n"); goto err_out_release_parent; } } niu_set_basic_features(dev); dev->priv_flags |= IFF_UNICAST_FLT; np->regs = pci_ioremap_bar(pdev, 0); if (!np->regs) { dev_err(&pdev->dev, "Cannot map device registers, aborting\n"); err = -ENOMEM; goto err_out_release_parent; } pci_set_master(pdev); pci_save_state(pdev); dev->irq = pdev->irq; /* MTU range: 68 - 9216 */ dev->min_mtu = ETH_MIN_MTU; dev->max_mtu = NIU_MAX_MTU; niu_assign_netdev_ops(dev); err = niu_get_invariants(np); if (err) { if (err != -ENODEV) dev_err(&pdev->dev, "Problem fetching invariants of chip, aborting\n"); goto err_out_iounmap; } err = register_netdev(dev); if (err) { dev_err(&pdev->dev, "Cannot register net device, aborting\n"); goto err_out_iounmap; } pci_set_drvdata(pdev, dev); niu_device_announce(np); return 0; err_out_iounmap: if (np->regs) { iounmap(np->regs); np->regs = NULL; } err_out_release_parent: niu_put_parent(np); err_out_free_dev: free_netdev(dev); err_out_free_res: pci_release_regions(pdev); err_out_disable_pdev: pci_disable_device(pdev); return err; } static void niu_pci_remove_one(struct pci_dev *pdev) { struct net_device *dev = pci_get_drvdata(pdev); if (dev) { struct niu *np = netdev_priv(dev); unregister_netdev(dev); if (np->regs) { iounmap(np->regs); np->regs = NULL; } niu_ldg_free(np); niu_put_parent(np); free_netdev(dev); pci_release_regions(pdev); pci_disable_device(pdev); } } static int __maybe_unused niu_suspend(struct device *dev_d) { struct net_device *dev = dev_get_drvdata(dev_d); struct niu *np = netdev_priv(dev); unsigned long flags; if (!netif_running(dev)) return 0; flush_work(&np->reset_task); niu_netif_stop(np); del_timer_sync(&np->timer); spin_lock_irqsave(&np->lock, flags); niu_enable_interrupts(np, 0); spin_unlock_irqrestore(&np->lock, flags); netif_device_detach(dev); spin_lock_irqsave(&np->lock, flags); niu_stop_hw(np); spin_unlock_irqrestore(&np->lock, flags); return 0; } static int __maybe_unused niu_resume(struct device *dev_d) { struct net_device *dev = dev_get_drvdata(dev_d); struct niu *np = netdev_priv(dev); unsigned long flags; int err; if (!netif_running(dev)) return 0; netif_device_attach(dev); spin_lock_irqsave(&np->lock, flags); err = niu_init_hw(np); if (!err) { np->timer.expires = jiffies + HZ; add_timer(&np->timer); niu_netif_start(np); } spin_unlock_irqrestore(&np->lock, flags); return err; } static SIMPLE_DEV_PM_OPS(niu_pm_ops, niu_suspend, niu_resume); static struct pci_driver niu_pci_driver = { .name = DRV_MODULE_NAME, .id_table = niu_pci_tbl, .probe = niu_pci_init_one, .remove = niu_pci_remove_one, .driver.pm = &niu_pm_ops, }; #ifdef CONFIG_SPARC64 static void *niu_phys_alloc_coherent(struct device *dev, size_t size, u64 *dma_addr, gfp_t flag) { unsigned long order = get_order(size); unsigned long page = __get_free_pages(flag, order); if (page == 0UL) return NULL; memset((char *)page, 0, PAGE_SIZE << order); *dma_addr = __pa(page); return (void *) page; } static void niu_phys_free_coherent(struct device *dev, size_t size, void *cpu_addr, u64 handle) { unsigned long order = get_order(size); free_pages((unsigned long) cpu_addr, order); } static u64 niu_phys_map_page(struct device *dev, struct page *page, unsigned long offset, size_t size, enum dma_data_direction direction) { return page_to_phys(page) + offset; } static void niu_phys_unmap_page(struct device *dev, u64 dma_address, size_t size, enum dma_data_direction direction) { /* Nothing to do. */ } static u64 niu_phys_map_single(struct device *dev, void *cpu_addr, size_t size, enum dma_data_direction direction) { return __pa(cpu_addr); } static void niu_phys_unmap_single(struct device *dev, u64 dma_address, size_t size, enum dma_data_direction direction) { /* Nothing to do. */ } static const struct niu_ops niu_phys_ops = { .alloc_coherent = niu_phys_alloc_coherent, .free_coherent = niu_phys_free_coherent, .map_page = niu_phys_map_page, .unmap_page = niu_phys_unmap_page, .map_single = niu_phys_map_single, .unmap_single = niu_phys_unmap_single, }; static int niu_of_probe(struct platform_device *op) { union niu_parent_id parent_id; struct net_device *dev; struct niu *np; const u32 *reg; int err; niu_driver_version(); reg = of_get_property(op->dev.of_node, "reg", NULL); if (!reg) { dev_err(&op->dev, "%pOF: No 'reg' property, aborting\n", op->dev.of_node); return -ENODEV; } dev = niu_alloc_and_init(&op->dev, NULL, op, &niu_phys_ops, reg[0] & 0x1); if (!dev) { err = -ENOMEM; goto err_out; } np = netdev_priv(dev); memset(&parent_id, 0, sizeof(parent_id)); parent_id.of = of_get_parent(op->dev.of_node); np->parent = niu_get_parent(np, &parent_id, PLAT_TYPE_NIU); if (!np->parent) { err = -ENOMEM; goto err_out_free_dev; } niu_set_basic_features(dev); np->regs = of_ioremap(&op->resource[1], 0, resource_size(&op->resource[1]), "niu regs"); if (!np->regs) { dev_err(&op->dev, "Cannot map device registers, aborting\n"); err = -ENOMEM; goto err_out_release_parent; } np->vir_regs_1 = of_ioremap(&op->resource[2], 0, resource_size(&op->resource[2]), "niu vregs-1"); if (!np->vir_regs_1) { dev_err(&op->dev, "Cannot map device vir registers 1, aborting\n"); err = -ENOMEM; goto err_out_iounmap; } np->vir_regs_2 = of_ioremap(&op->resource[3], 0, resource_size(&op->resource[3]), "niu vregs-2"); if (!np->vir_regs_2) { dev_err(&op->dev, "Cannot map device vir registers 2, aborting\n"); err = -ENOMEM; goto err_out_iounmap; } niu_assign_netdev_ops(dev); err = niu_get_invariants(np); if (err) { if (err != -ENODEV) dev_err(&op->dev, "Problem fetching invariants of chip, aborting\n"); goto err_out_iounmap; } err = register_netdev(dev); if (err) { dev_err(&op->dev, "Cannot register net device, aborting\n"); goto err_out_iounmap; } platform_set_drvdata(op, dev); niu_device_announce(np); return 0; err_out_iounmap: if (np->vir_regs_1) { of_iounmap(&op->resource[2], np->vir_regs_1, resource_size(&op->resource[2])); np->vir_regs_1 = NULL; } if (np->vir_regs_2) { of_iounmap(&op->resource[3], np->vir_regs_2, resource_size(&op->resource[3])); np->vir_regs_2 = NULL; } if (np->regs) { of_iounmap(&op->resource[1], np->regs, resource_size(&op->resource[1])); np->regs = NULL; } err_out_release_parent: niu_put_parent(np); err_out_free_dev: free_netdev(dev); err_out: return err; } static int niu_of_remove(struct platform_device *op) { struct net_device *dev = platform_get_drvdata(op); if (dev) { struct niu *np = netdev_priv(dev); unregister_netdev(dev); if (np->vir_regs_1) { of_iounmap(&op->resource[2], np->vir_regs_1, resource_size(&op->resource[2])); np->vir_regs_1 = NULL; } if (np->vir_regs_2) { of_iounmap(&op->resource[3], np->vir_regs_2, resource_size(&op->resource[3])); np->vir_regs_2 = NULL; } if (np->regs) { of_iounmap(&op->resource[1], np->regs, resource_size(&op->resource[1])); np->regs = NULL; } niu_ldg_free(np); niu_put_parent(np); free_netdev(dev); } return 0; } static const struct of_device_id niu_match[] = { { .name = "network", .compatible = "SUNW,niusl", }, {}, }; MODULE_DEVICE_TABLE(of, niu_match); static struct platform_driver niu_of_driver = { .driver = { .name = "niu", .of_match_table = niu_match, }, .probe = niu_of_probe, .remove = niu_of_remove, }; #endif /* CONFIG_SPARC64 */ static int __init niu_init(void) { int err = 0; BUILD_BUG_ON(PAGE_SIZE < 4 * 1024); BUILD_BUG_ON(offsetof(struct page, mapping) != offsetof(union niu_page, next)); niu_debug = netif_msg_init(debug, NIU_MSG_DEFAULT); #ifdef CONFIG_SPARC64 err = platform_driver_register(&niu_of_driver); #endif if (!err) { err = pci_register_driver(&niu_pci_driver); #ifdef CONFIG_SPARC64 if (err) platform_driver_unregister(&niu_of_driver); #endif } return err; } static void __exit niu_exit(void) { pci_unregister_driver(&niu_pci_driver); #ifdef CONFIG_SPARC64 platform_driver_unregister(&niu_of_driver); #endif } module_init(niu_init); module_exit(niu_exit);
linux-master
drivers/net/ethernet/sun/niu.c
// SPDX-License-Identifier: GPL-2.0 /* sunbmac.c: Driver for Sparc BigMAC 100baseT ethernet adapters. * * Copyright (C) 1997, 1998, 1999, 2003, 2008 David S. Miller ([email protected]) */ #include <linux/module.h> #include <linux/pgtable.h> #include <linux/kernel.h> #include <linux/types.h> #include <linux/fcntl.h> #include <linux/interrupt.h> #include <linux/ioport.h> #include <linux/in.h> #include <linux/string.h> #include <linux/delay.h> #include <linux/crc32.h> #include <linux/errno.h> #include <linux/ethtool.h> #include <linux/mii.h> #include <linux/netdevice.h> #include <linux/etherdevice.h> #include <linux/skbuff.h> #include <linux/bitops.h> #include <linux/dma-mapping.h> #include <linux/of.h> #include <linux/platform_device.h> #include <linux/gfp.h> #include <asm/auxio.h> #include <asm/byteorder.h> #include <asm/dma.h> #include <asm/idprom.h> #include <asm/io.h> #include <asm/openprom.h> #include <asm/oplib.h> #include "sunbmac.h" #define DRV_NAME "sunbmac" #define DRV_VERSION "2.1" #define DRV_RELDATE "August 26, 2008" #define DRV_AUTHOR "David S. Miller ([email protected])" static char version[] = DRV_NAME ".c:v" DRV_VERSION " " DRV_RELDATE " " DRV_AUTHOR "\n"; MODULE_VERSION(DRV_VERSION); MODULE_AUTHOR(DRV_AUTHOR); MODULE_DESCRIPTION("Sun BigMAC 100baseT ethernet driver"); MODULE_LICENSE("GPL"); #undef DEBUG_PROBE #undef DEBUG_TX #undef DEBUG_IRQ #ifdef DEBUG_PROBE #define DP(x) printk x #else #define DP(x) #endif #ifdef DEBUG_TX #define DTX(x) printk x #else #define DTX(x) #endif #ifdef DEBUG_IRQ #define DIRQ(x) printk x #else #define DIRQ(x) #endif #define DEFAULT_JAMSIZE 4 /* Toe jam */ #define QEC_RESET_TRIES 200 static int qec_global_reset(void __iomem *gregs) { int tries = QEC_RESET_TRIES; sbus_writel(GLOB_CTRL_RESET, gregs + GLOB_CTRL); while (--tries) { if (sbus_readl(gregs + GLOB_CTRL) & GLOB_CTRL_RESET) { udelay(20); continue; } break; } if (tries) return 0; printk(KERN_ERR "BigMAC: Cannot reset the QEC.\n"); return -1; } static void qec_init(struct bigmac *bp) { struct platform_device *qec_op = bp->qec_op; void __iomem *gregs = bp->gregs; u8 bsizes = bp->bigmac_bursts; u32 regval; /* 64byte bursts do not work at the moment, do * not even try to enable them. -DaveM */ if (bsizes & DMA_BURST32) regval = GLOB_CTRL_B32; else regval = GLOB_CTRL_B16; sbus_writel(regval | GLOB_CTRL_BMODE, gregs + GLOB_CTRL); sbus_writel(GLOB_PSIZE_2048, gregs + GLOB_PSIZE); /* All of memsize is given to bigmac. */ sbus_writel(resource_size(&qec_op->resource[1]), gregs + GLOB_MSIZE); /* Half to the transmitter, half to the receiver. */ sbus_writel(resource_size(&qec_op->resource[1]) >> 1, gregs + GLOB_TSIZE); sbus_writel(resource_size(&qec_op->resource[1]) >> 1, gregs + GLOB_RSIZE); } #define TX_RESET_TRIES 32 #define RX_RESET_TRIES 32 static void bigmac_tx_reset(void __iomem *bregs) { int tries = TX_RESET_TRIES; sbus_writel(0, bregs + BMAC_TXCFG); /* The fifo threshold bit is read-only and does * not clear. -DaveM */ while ((sbus_readl(bregs + BMAC_TXCFG) & ~(BIGMAC_TXCFG_FIFO)) != 0 && --tries != 0) udelay(20); if (!tries) { printk(KERN_ERR "BIGMAC: Transmitter will not reset.\n"); printk(KERN_ERR "BIGMAC: tx_cfg is %08x\n", sbus_readl(bregs + BMAC_TXCFG)); } } static void bigmac_rx_reset(void __iomem *bregs) { int tries = RX_RESET_TRIES; sbus_writel(0, bregs + BMAC_RXCFG); while (sbus_readl(bregs + BMAC_RXCFG) && --tries) udelay(20); if (!tries) { printk(KERN_ERR "BIGMAC: Receiver will not reset.\n"); printk(KERN_ERR "BIGMAC: rx_cfg is %08x\n", sbus_readl(bregs + BMAC_RXCFG)); } } /* Reset the transmitter and receiver. */ static void bigmac_stop(struct bigmac *bp) { bigmac_tx_reset(bp->bregs); bigmac_rx_reset(bp->bregs); } static void bigmac_get_counters(struct bigmac *bp, void __iomem *bregs) { struct net_device_stats *stats = &bp->dev->stats; stats->rx_crc_errors += sbus_readl(bregs + BMAC_RCRCECTR); sbus_writel(0, bregs + BMAC_RCRCECTR); stats->rx_frame_errors += sbus_readl(bregs + BMAC_UNALECTR); sbus_writel(0, bregs + BMAC_UNALECTR); stats->rx_length_errors += sbus_readl(bregs + BMAC_GLECTR); sbus_writel(0, bregs + BMAC_GLECTR); stats->tx_aborted_errors += sbus_readl(bregs + BMAC_EXCTR); stats->collisions += (sbus_readl(bregs + BMAC_EXCTR) + sbus_readl(bregs + BMAC_LTCTR)); sbus_writel(0, bregs + BMAC_EXCTR); sbus_writel(0, bregs + BMAC_LTCTR); } static void bigmac_clean_rings(struct bigmac *bp) { int i; for (i = 0; i < RX_RING_SIZE; i++) { if (bp->rx_skbs[i] != NULL) { dev_kfree_skb_any(bp->rx_skbs[i]); bp->rx_skbs[i] = NULL; } } for (i = 0; i < TX_RING_SIZE; i++) { if (bp->tx_skbs[i] != NULL) { dev_kfree_skb_any(bp->tx_skbs[i]); bp->tx_skbs[i] = NULL; } } } static void bigmac_init_rings(struct bigmac *bp, bool non_blocking) { struct bmac_init_block *bb = bp->bmac_block; int i; gfp_t gfp_flags = GFP_KERNEL; if (non_blocking) gfp_flags = GFP_ATOMIC; bp->rx_new = bp->rx_old = bp->tx_new = bp->tx_old = 0; /* Free any skippy bufs left around in the rings. */ bigmac_clean_rings(bp); /* Now get new skbufs for the receive ring. */ for (i = 0; i < RX_RING_SIZE; i++) { struct sk_buff *skb; skb = big_mac_alloc_skb(RX_BUF_ALLOC_SIZE, gfp_flags); if (!skb) continue; bp->rx_skbs[i] = skb; /* Because we reserve afterwards. */ skb_put(skb, ETH_FRAME_LEN); skb_reserve(skb, 34); bb->be_rxd[i].rx_addr = dma_map_single(&bp->bigmac_op->dev, skb->data, RX_BUF_ALLOC_SIZE - 34, DMA_FROM_DEVICE); bb->be_rxd[i].rx_flags = (RXD_OWN | ((RX_BUF_ALLOC_SIZE - 34) & RXD_LENGTH)); } for (i = 0; i < TX_RING_SIZE; i++) bb->be_txd[i].tx_flags = bb->be_txd[i].tx_addr = 0; } #define MGMT_CLKON (MGMT_PAL_INT_MDIO|MGMT_PAL_EXT_MDIO|MGMT_PAL_OENAB|MGMT_PAL_DCLOCK) #define MGMT_CLKOFF (MGMT_PAL_INT_MDIO|MGMT_PAL_EXT_MDIO|MGMT_PAL_OENAB) static void idle_transceiver(void __iomem *tregs) { int i = 20; while (i--) { sbus_writel(MGMT_CLKOFF, tregs + TCVR_MPAL); sbus_readl(tregs + TCVR_MPAL); sbus_writel(MGMT_CLKON, tregs + TCVR_MPAL); sbus_readl(tregs + TCVR_MPAL); } } static void write_tcvr_bit(struct bigmac *bp, void __iomem *tregs, int bit) { if (bp->tcvr_type == internal) { bit = (bit & 1) << 3; sbus_writel(bit | (MGMT_PAL_OENAB | MGMT_PAL_EXT_MDIO), tregs + TCVR_MPAL); sbus_readl(tregs + TCVR_MPAL); sbus_writel(bit | MGMT_PAL_OENAB | MGMT_PAL_EXT_MDIO | MGMT_PAL_DCLOCK, tregs + TCVR_MPAL); sbus_readl(tregs + TCVR_MPAL); } else if (bp->tcvr_type == external) { bit = (bit & 1) << 2; sbus_writel(bit | MGMT_PAL_INT_MDIO | MGMT_PAL_OENAB, tregs + TCVR_MPAL); sbus_readl(tregs + TCVR_MPAL); sbus_writel(bit | MGMT_PAL_INT_MDIO | MGMT_PAL_OENAB | MGMT_PAL_DCLOCK, tregs + TCVR_MPAL); sbus_readl(tregs + TCVR_MPAL); } else { printk(KERN_ERR "write_tcvr_bit: No transceiver type known!\n"); } } static int read_tcvr_bit(struct bigmac *bp, void __iomem *tregs) { int retval = 0; if (bp->tcvr_type == internal) { sbus_writel(MGMT_PAL_EXT_MDIO, tregs + TCVR_MPAL); sbus_readl(tregs + TCVR_MPAL); sbus_writel(MGMT_PAL_EXT_MDIO | MGMT_PAL_DCLOCK, tregs + TCVR_MPAL); sbus_readl(tregs + TCVR_MPAL); retval = (sbus_readl(tregs + TCVR_MPAL) & MGMT_PAL_INT_MDIO) >> 3; } else if (bp->tcvr_type == external) { sbus_writel(MGMT_PAL_INT_MDIO, tregs + TCVR_MPAL); sbus_readl(tregs + TCVR_MPAL); sbus_writel(MGMT_PAL_INT_MDIO | MGMT_PAL_DCLOCK, tregs + TCVR_MPAL); sbus_readl(tregs + TCVR_MPAL); retval = (sbus_readl(tregs + TCVR_MPAL) & MGMT_PAL_EXT_MDIO) >> 2; } else { printk(KERN_ERR "read_tcvr_bit: No transceiver type known!\n"); } return retval; } static int read_tcvr_bit2(struct bigmac *bp, void __iomem *tregs) { int retval = 0; if (bp->tcvr_type == internal) { sbus_writel(MGMT_PAL_EXT_MDIO, tregs + TCVR_MPAL); sbus_readl(tregs + TCVR_MPAL); retval = (sbus_readl(tregs + TCVR_MPAL) & MGMT_PAL_INT_MDIO) >> 3; sbus_writel(MGMT_PAL_EXT_MDIO | MGMT_PAL_DCLOCK, tregs + TCVR_MPAL); sbus_readl(tregs + TCVR_MPAL); } else if (bp->tcvr_type == external) { sbus_writel(MGMT_PAL_INT_MDIO, tregs + TCVR_MPAL); sbus_readl(tregs + TCVR_MPAL); retval = (sbus_readl(tregs + TCVR_MPAL) & MGMT_PAL_EXT_MDIO) >> 2; sbus_writel(MGMT_PAL_INT_MDIO | MGMT_PAL_DCLOCK, tregs + TCVR_MPAL); sbus_readl(tregs + TCVR_MPAL); } else { printk(KERN_ERR "read_tcvr_bit2: No transceiver type known!\n"); } return retval; } static void put_tcvr_byte(struct bigmac *bp, void __iomem *tregs, unsigned int byte) { int shift = 4; do { write_tcvr_bit(bp, tregs, ((byte >> shift) & 1)); shift -= 1; } while (shift >= 0); } static void bigmac_tcvr_write(struct bigmac *bp, void __iomem *tregs, int reg, unsigned short val) { int shift; reg &= 0xff; val &= 0xffff; switch(bp->tcvr_type) { case internal: case external: break; default: printk(KERN_ERR "bigmac_tcvr_read: Whoops, no known transceiver type.\n"); return; } idle_transceiver(tregs); write_tcvr_bit(bp, tregs, 0); write_tcvr_bit(bp, tregs, 1); write_tcvr_bit(bp, tregs, 0); write_tcvr_bit(bp, tregs, 1); put_tcvr_byte(bp, tregs, ((bp->tcvr_type == internal) ? BIGMAC_PHY_INTERNAL : BIGMAC_PHY_EXTERNAL)); put_tcvr_byte(bp, tregs, reg); write_tcvr_bit(bp, tregs, 1); write_tcvr_bit(bp, tregs, 0); shift = 15; do { write_tcvr_bit(bp, tregs, (val >> shift) & 1); shift -= 1; } while (shift >= 0); } static unsigned short bigmac_tcvr_read(struct bigmac *bp, void __iomem *tregs, int reg) { unsigned short retval = 0; reg &= 0xff; switch(bp->tcvr_type) { case internal: case external: break; default: printk(KERN_ERR "bigmac_tcvr_read: Whoops, no known transceiver type.\n"); return 0xffff; } idle_transceiver(tregs); write_tcvr_bit(bp, tregs, 0); write_tcvr_bit(bp, tregs, 1); write_tcvr_bit(bp, tregs, 1); write_tcvr_bit(bp, tregs, 0); put_tcvr_byte(bp, tregs, ((bp->tcvr_type == internal) ? BIGMAC_PHY_INTERNAL : BIGMAC_PHY_EXTERNAL)); put_tcvr_byte(bp, tregs, reg); if (bp->tcvr_type == external) { int shift = 15; (void) read_tcvr_bit2(bp, tregs); (void) read_tcvr_bit2(bp, tregs); do { int tmp; tmp = read_tcvr_bit2(bp, tregs); retval |= ((tmp & 1) << shift); shift -= 1; } while (shift >= 0); (void) read_tcvr_bit2(bp, tregs); (void) read_tcvr_bit2(bp, tregs); (void) read_tcvr_bit2(bp, tregs); } else { int shift = 15; (void) read_tcvr_bit(bp, tregs); (void) read_tcvr_bit(bp, tregs); do { int tmp; tmp = read_tcvr_bit(bp, tregs); retval |= ((tmp & 1) << shift); shift -= 1; } while (shift >= 0); (void) read_tcvr_bit(bp, tregs); (void) read_tcvr_bit(bp, tregs); (void) read_tcvr_bit(bp, tregs); } return retval; } static void bigmac_tcvr_init(struct bigmac *bp) { void __iomem *tregs = bp->tregs; u32 mpal; idle_transceiver(tregs); sbus_writel(MGMT_PAL_INT_MDIO | MGMT_PAL_EXT_MDIO | MGMT_PAL_DCLOCK, tregs + TCVR_MPAL); sbus_readl(tregs + TCVR_MPAL); /* Only the bit for the present transceiver (internal or * external) will stick, set them both and see what stays. */ sbus_writel(MGMT_PAL_INT_MDIO | MGMT_PAL_EXT_MDIO, tregs + TCVR_MPAL); sbus_readl(tregs + TCVR_MPAL); udelay(20); mpal = sbus_readl(tregs + TCVR_MPAL); if (mpal & MGMT_PAL_EXT_MDIO) { bp->tcvr_type = external; sbus_writel(~(TCVR_PAL_EXTLBACK | TCVR_PAL_MSENSE | TCVR_PAL_LTENABLE), tregs + TCVR_TPAL); sbus_readl(tregs + TCVR_TPAL); } else if (mpal & MGMT_PAL_INT_MDIO) { bp->tcvr_type = internal; sbus_writel(~(TCVR_PAL_SERIAL | TCVR_PAL_EXTLBACK | TCVR_PAL_MSENSE | TCVR_PAL_LTENABLE), tregs + TCVR_TPAL); sbus_readl(tregs + TCVR_TPAL); } else { printk(KERN_ERR "BIGMAC: AIEEE, neither internal nor " "external MDIO available!\n"); printk(KERN_ERR "BIGMAC: mgmt_pal[%08x] tcvr_pal[%08x]\n", sbus_readl(tregs + TCVR_MPAL), sbus_readl(tregs + TCVR_TPAL)); } } static int bigmac_init_hw(struct bigmac *, bool); static int try_next_permutation(struct bigmac *bp, void __iomem *tregs) { if (bp->sw_bmcr & BMCR_SPEED100) { int timeout; /* Reset the PHY. */ bp->sw_bmcr = (BMCR_ISOLATE | BMCR_PDOWN | BMCR_LOOPBACK); bigmac_tcvr_write(bp, tregs, MII_BMCR, bp->sw_bmcr); bp->sw_bmcr = (BMCR_RESET); bigmac_tcvr_write(bp, tregs, MII_BMCR, bp->sw_bmcr); timeout = 64; while (--timeout) { bp->sw_bmcr = bigmac_tcvr_read(bp, tregs, MII_BMCR); if ((bp->sw_bmcr & BMCR_RESET) == 0) break; udelay(20); } if (timeout == 0) printk(KERN_ERR "%s: PHY reset failed.\n", bp->dev->name); bp->sw_bmcr = bigmac_tcvr_read(bp, tregs, MII_BMCR); /* Now we try 10baseT. */ bp->sw_bmcr &= ~(BMCR_SPEED100); bigmac_tcvr_write(bp, tregs, MII_BMCR, bp->sw_bmcr); return 0; } /* We've tried them all. */ return -1; } static void bigmac_timer(struct timer_list *t) { struct bigmac *bp = from_timer(bp, t, bigmac_timer); void __iomem *tregs = bp->tregs; int restart_timer = 0; bp->timer_ticks++; if (bp->timer_state == ltrywait) { bp->sw_bmsr = bigmac_tcvr_read(bp, tregs, MII_BMSR); bp->sw_bmcr = bigmac_tcvr_read(bp, tregs, MII_BMCR); if (bp->sw_bmsr & BMSR_LSTATUS) { printk(KERN_INFO "%s: Link is now up at %s.\n", bp->dev->name, (bp->sw_bmcr & BMCR_SPEED100) ? "100baseT" : "10baseT"); bp->timer_state = asleep; restart_timer = 0; } else { if (bp->timer_ticks >= 4) { int ret; ret = try_next_permutation(bp, tregs); if (ret == -1) { printk(KERN_ERR "%s: Link down, cable problem?\n", bp->dev->name); ret = bigmac_init_hw(bp, true); if (ret) { printk(KERN_ERR "%s: Error, cannot re-init the " "BigMAC.\n", bp->dev->name); } return; } bp->timer_ticks = 0; restart_timer = 1; } else { restart_timer = 1; } } } else { /* Can't happens.... */ printk(KERN_ERR "%s: Aieee, link timer is asleep but we got one anyways!\n", bp->dev->name); restart_timer = 0; bp->timer_ticks = 0; bp->timer_state = asleep; /* foo on you */ } if (restart_timer != 0) { bp->bigmac_timer.expires = jiffies + ((12 * HZ)/10); /* 1.2 sec. */ add_timer(&bp->bigmac_timer); } } /* Well, really we just force the chip into 100baseT then * 10baseT, each time checking for a link status. */ static void bigmac_begin_auto_negotiation(struct bigmac *bp) { void __iomem *tregs = bp->tregs; int timeout; /* Grab new software copies of PHY registers. */ bp->sw_bmsr = bigmac_tcvr_read(bp, tregs, MII_BMSR); bp->sw_bmcr = bigmac_tcvr_read(bp, tregs, MII_BMCR); /* Reset the PHY. */ bp->sw_bmcr = (BMCR_ISOLATE | BMCR_PDOWN | BMCR_LOOPBACK); bigmac_tcvr_write(bp, tregs, MII_BMCR, bp->sw_bmcr); bp->sw_bmcr = (BMCR_RESET); bigmac_tcvr_write(bp, tregs, MII_BMCR, bp->sw_bmcr); timeout = 64; while (--timeout) { bp->sw_bmcr = bigmac_tcvr_read(bp, tregs, MII_BMCR); if ((bp->sw_bmcr & BMCR_RESET) == 0) break; udelay(20); } if (timeout == 0) printk(KERN_ERR "%s: PHY reset failed.\n", bp->dev->name); bp->sw_bmcr = bigmac_tcvr_read(bp, tregs, MII_BMCR); /* First we try 100baseT. */ bp->sw_bmcr |= BMCR_SPEED100; bigmac_tcvr_write(bp, tregs, MII_BMCR, bp->sw_bmcr); bp->timer_state = ltrywait; bp->timer_ticks = 0; bp->bigmac_timer.expires = jiffies + (12 * HZ) / 10; add_timer(&bp->bigmac_timer); } static int bigmac_init_hw(struct bigmac *bp, bool non_blocking) { void __iomem *gregs = bp->gregs; void __iomem *cregs = bp->creg; void __iomem *bregs = bp->bregs; __u32 bblk_dvma = (__u32)bp->bblock_dvma; const unsigned char *e = &bp->dev->dev_addr[0]; /* Latch current counters into statistics. */ bigmac_get_counters(bp, bregs); /* Reset QEC. */ qec_global_reset(gregs); /* Init QEC. */ qec_init(bp); /* Alloc and reset the tx/rx descriptor chains. */ bigmac_init_rings(bp, non_blocking); /* Initialize the PHY. */ bigmac_tcvr_init(bp); /* Stop transmitter and receiver. */ bigmac_stop(bp); /* Set hardware ethernet address. */ sbus_writel(((e[4] << 8) | e[5]), bregs + BMAC_MACADDR2); sbus_writel(((e[2] << 8) | e[3]), bregs + BMAC_MACADDR1); sbus_writel(((e[0] << 8) | e[1]), bregs + BMAC_MACADDR0); /* Clear the hash table until mc upload occurs. */ sbus_writel(0, bregs + BMAC_HTABLE3); sbus_writel(0, bregs + BMAC_HTABLE2); sbus_writel(0, bregs + BMAC_HTABLE1); sbus_writel(0, bregs + BMAC_HTABLE0); /* Enable Big Mac hash table filter. */ sbus_writel(BIGMAC_RXCFG_HENABLE | BIGMAC_RXCFG_FIFO, bregs + BMAC_RXCFG); udelay(20); /* Ok, configure the Big Mac transmitter. */ sbus_writel(BIGMAC_TXCFG_FIFO, bregs + BMAC_TXCFG); /* The HME docs recommend to use the 10LSB of our MAC here. */ sbus_writel(((e[5] | e[4] << 8) & 0x3ff), bregs + BMAC_RSEED); /* Enable the output drivers no matter what. */ sbus_writel(BIGMAC_XCFG_ODENABLE | BIGMAC_XCFG_RESV, bregs + BMAC_XIFCFG); /* Tell the QEC where the ring descriptors are. */ sbus_writel(bblk_dvma + bib_offset(be_rxd, 0), cregs + CREG_RXDS); sbus_writel(bblk_dvma + bib_offset(be_txd, 0), cregs + CREG_TXDS); /* Setup the FIFO pointers into QEC local memory. */ sbus_writel(0, cregs + CREG_RXRBUFPTR); sbus_writel(0, cregs + CREG_RXWBUFPTR); sbus_writel(sbus_readl(gregs + GLOB_RSIZE), cregs + CREG_TXRBUFPTR); sbus_writel(sbus_readl(gregs + GLOB_RSIZE), cregs + CREG_TXWBUFPTR); /* Tell bigmac what interrupts we don't want to hear about. */ sbus_writel(BIGMAC_IMASK_GOTFRAME | BIGMAC_IMASK_SENTFRAME, bregs + BMAC_IMASK); /* Enable the various other irq's. */ sbus_writel(0, cregs + CREG_RIMASK); sbus_writel(0, cregs + CREG_TIMASK); sbus_writel(0, cregs + CREG_QMASK); sbus_writel(0, cregs + CREG_BMASK); /* Set jam size to a reasonable default. */ sbus_writel(DEFAULT_JAMSIZE, bregs + BMAC_JSIZE); /* Clear collision counter. */ sbus_writel(0, cregs + CREG_CCNT); /* Enable transmitter and receiver. */ sbus_writel(sbus_readl(bregs + BMAC_TXCFG) | BIGMAC_TXCFG_ENABLE, bregs + BMAC_TXCFG); sbus_writel(sbus_readl(bregs + BMAC_RXCFG) | BIGMAC_RXCFG_ENABLE, bregs + BMAC_RXCFG); /* Ok, start detecting link speed/duplex. */ bigmac_begin_auto_negotiation(bp); /* Success. */ return 0; } /* Error interrupts get sent here. */ static void bigmac_is_medium_rare(struct bigmac *bp, u32 qec_status, u32 bmac_status) { printk(KERN_ERR "bigmac_is_medium_rare: "); if (qec_status & (GLOB_STAT_ER | GLOB_STAT_BM)) { if (qec_status & GLOB_STAT_ER) printk("QEC_ERROR, "); if (qec_status & GLOB_STAT_BM) printk("QEC_BMAC_ERROR, "); } if (bmac_status & CREG_STAT_ERRORS) { if (bmac_status & CREG_STAT_BERROR) printk("BMAC_ERROR, "); if (bmac_status & CREG_STAT_TXDERROR) printk("TXD_ERROR, "); if (bmac_status & CREG_STAT_TXLERR) printk("TX_LATE_ERROR, "); if (bmac_status & CREG_STAT_TXPERR) printk("TX_PARITY_ERROR, "); if (bmac_status & CREG_STAT_TXSERR) printk("TX_SBUS_ERROR, "); if (bmac_status & CREG_STAT_RXDROP) printk("RX_DROP_ERROR, "); if (bmac_status & CREG_STAT_RXSMALL) printk("RX_SMALL_ERROR, "); if (bmac_status & CREG_STAT_RXLERR) printk("RX_LATE_ERROR, "); if (bmac_status & CREG_STAT_RXPERR) printk("RX_PARITY_ERROR, "); if (bmac_status & CREG_STAT_RXSERR) printk("RX_SBUS_ERROR, "); } printk(" RESET\n"); bigmac_init_hw(bp, true); } /* BigMAC transmit complete service routines. */ static void bigmac_tx(struct bigmac *bp) { struct be_txd *txbase = &bp->bmac_block->be_txd[0]; struct net_device *dev = bp->dev; int elem; spin_lock(&bp->lock); elem = bp->tx_old; DTX(("bigmac_tx: tx_old[%d] ", elem)); while (elem != bp->tx_new) { struct sk_buff *skb; struct be_txd *this = &txbase[elem]; DTX(("this(%p) [flags(%08x)addr(%08x)]", this, this->tx_flags, this->tx_addr)); if (this->tx_flags & TXD_OWN) break; skb = bp->tx_skbs[elem]; dev->stats.tx_packets++; dev->stats.tx_bytes += skb->len; dma_unmap_single(&bp->bigmac_op->dev, this->tx_addr, skb->len, DMA_TO_DEVICE); DTX(("skb(%p) ", skb)); bp->tx_skbs[elem] = NULL; dev_consume_skb_irq(skb); elem = NEXT_TX(elem); } DTX((" DONE, tx_old=%d\n", elem)); bp->tx_old = elem; if (netif_queue_stopped(dev) && TX_BUFFS_AVAIL(bp) > 0) netif_wake_queue(bp->dev); spin_unlock(&bp->lock); } /* BigMAC receive complete service routines. */ static void bigmac_rx(struct bigmac *bp) { struct be_rxd *rxbase = &bp->bmac_block->be_rxd[0]; struct be_rxd *this; int elem = bp->rx_new, drops = 0; u32 flags; this = &rxbase[elem]; while (!((flags = this->rx_flags) & RXD_OWN)) { struct sk_buff *skb; int len = (flags & RXD_LENGTH); /* FCS not included */ /* Check for errors. */ if (len < ETH_ZLEN) { bp->dev->stats.rx_errors++; bp->dev->stats.rx_length_errors++; drop_it: /* Return it to the BigMAC. */ bp->dev->stats.rx_dropped++; this->rx_flags = (RXD_OWN | ((RX_BUF_ALLOC_SIZE - 34) & RXD_LENGTH)); goto next; } skb = bp->rx_skbs[elem]; if (len > RX_COPY_THRESHOLD) { struct sk_buff *new_skb; /* Now refill the entry, if we can. */ new_skb = big_mac_alloc_skb(RX_BUF_ALLOC_SIZE, GFP_ATOMIC); if (new_skb == NULL) { drops++; goto drop_it; } dma_unmap_single(&bp->bigmac_op->dev, this->rx_addr, RX_BUF_ALLOC_SIZE - 34, DMA_FROM_DEVICE); bp->rx_skbs[elem] = new_skb; skb_put(new_skb, ETH_FRAME_LEN); skb_reserve(new_skb, 34); this->rx_addr = dma_map_single(&bp->bigmac_op->dev, new_skb->data, RX_BUF_ALLOC_SIZE - 34, DMA_FROM_DEVICE); this->rx_flags = (RXD_OWN | ((RX_BUF_ALLOC_SIZE - 34) & RXD_LENGTH)); /* Trim the original skb for the netif. */ skb_trim(skb, len); } else { struct sk_buff *copy_skb = netdev_alloc_skb(bp->dev, len + 2); if (copy_skb == NULL) { drops++; goto drop_it; } skb_reserve(copy_skb, 2); skb_put(copy_skb, len); dma_sync_single_for_cpu(&bp->bigmac_op->dev, this->rx_addr, len, DMA_FROM_DEVICE); skb_copy_to_linear_data(copy_skb, (unsigned char *)skb->data, len); dma_sync_single_for_device(&bp->bigmac_op->dev, this->rx_addr, len, DMA_FROM_DEVICE); /* Reuse original ring buffer. */ this->rx_flags = (RXD_OWN | ((RX_BUF_ALLOC_SIZE - 34) & RXD_LENGTH)); skb = copy_skb; } /* No checksums done by the BigMAC ;-( */ skb->protocol = eth_type_trans(skb, bp->dev); netif_rx(skb); bp->dev->stats.rx_packets++; bp->dev->stats.rx_bytes += len; next: elem = NEXT_RX(elem); this = &rxbase[elem]; } bp->rx_new = elem; if (drops) printk(KERN_NOTICE "%s: Memory squeeze, deferring packet.\n", bp->dev->name); } static irqreturn_t bigmac_interrupt(int irq, void *dev_id) { struct bigmac *bp = (struct bigmac *) dev_id; u32 qec_status, bmac_status; DIRQ(("bigmac_interrupt: ")); /* Latch status registers now. */ bmac_status = sbus_readl(bp->creg + CREG_STAT); qec_status = sbus_readl(bp->gregs + GLOB_STAT); DIRQ(("qec_status=%08x bmac_status=%08x\n", qec_status, bmac_status)); if ((qec_status & (GLOB_STAT_ER | GLOB_STAT_BM)) || (bmac_status & CREG_STAT_ERRORS)) bigmac_is_medium_rare(bp, qec_status, bmac_status); if (bmac_status & CREG_STAT_TXIRQ) bigmac_tx(bp); if (bmac_status & CREG_STAT_RXIRQ) bigmac_rx(bp); return IRQ_HANDLED; } static int bigmac_open(struct net_device *dev) { struct bigmac *bp = netdev_priv(dev); int ret; ret = request_irq(dev->irq, bigmac_interrupt, IRQF_SHARED, dev->name, bp); if (ret) { printk(KERN_ERR "BIGMAC: Can't order irq %d to go.\n", dev->irq); return ret; } timer_setup(&bp->bigmac_timer, bigmac_timer, 0); ret = bigmac_init_hw(bp, false); if (ret) free_irq(dev->irq, bp); return ret; } static int bigmac_close(struct net_device *dev) { struct bigmac *bp = netdev_priv(dev); del_timer(&bp->bigmac_timer); bp->timer_state = asleep; bp->timer_ticks = 0; bigmac_stop(bp); bigmac_clean_rings(bp); free_irq(dev->irq, bp); return 0; } static void bigmac_tx_timeout(struct net_device *dev, unsigned int txqueue) { struct bigmac *bp = netdev_priv(dev); bigmac_init_hw(bp, true); netif_wake_queue(dev); } /* Put a packet on the wire. */ static netdev_tx_t bigmac_start_xmit(struct sk_buff *skb, struct net_device *dev) { struct bigmac *bp = netdev_priv(dev); int len, entry; u32 mapping; len = skb->len; mapping = dma_map_single(&bp->bigmac_op->dev, skb->data, len, DMA_TO_DEVICE); /* Avoid a race... */ spin_lock_irq(&bp->lock); entry = bp->tx_new; DTX(("bigmac_start_xmit: len(%d) entry(%d)\n", len, entry)); bp->bmac_block->be_txd[entry].tx_flags = TXD_UPDATE; bp->tx_skbs[entry] = skb; bp->bmac_block->be_txd[entry].tx_addr = mapping; bp->bmac_block->be_txd[entry].tx_flags = (TXD_OWN | TXD_SOP | TXD_EOP | (len & TXD_LENGTH)); bp->tx_new = NEXT_TX(entry); if (TX_BUFFS_AVAIL(bp) <= 0) netif_stop_queue(dev); spin_unlock_irq(&bp->lock); /* Get it going. */ sbus_writel(CREG_CTRL_TWAKEUP, bp->creg + CREG_CTRL); return NETDEV_TX_OK; } static struct net_device_stats *bigmac_get_stats(struct net_device *dev) { struct bigmac *bp = netdev_priv(dev); bigmac_get_counters(bp, bp->bregs); return &dev->stats; } static void bigmac_set_multicast(struct net_device *dev) { struct bigmac *bp = netdev_priv(dev); void __iomem *bregs = bp->bregs; struct netdev_hw_addr *ha; u32 tmp, crc; /* Disable the receiver. The bit self-clears when * the operation is complete. */ tmp = sbus_readl(bregs + BMAC_RXCFG); tmp &= ~(BIGMAC_RXCFG_ENABLE); sbus_writel(tmp, bregs + BMAC_RXCFG); while ((sbus_readl(bregs + BMAC_RXCFG) & BIGMAC_RXCFG_ENABLE) != 0) udelay(20); if ((dev->flags & IFF_ALLMULTI) || (netdev_mc_count(dev) > 64)) { sbus_writel(0xffff, bregs + BMAC_HTABLE0); sbus_writel(0xffff, bregs + BMAC_HTABLE1); sbus_writel(0xffff, bregs + BMAC_HTABLE2); sbus_writel(0xffff, bregs + BMAC_HTABLE3); } else if (dev->flags & IFF_PROMISC) { tmp = sbus_readl(bregs + BMAC_RXCFG); tmp |= BIGMAC_RXCFG_PMISC; sbus_writel(tmp, bregs + BMAC_RXCFG); } else { u16 hash_table[4] = { 0 }; netdev_for_each_mc_addr(ha, dev) { crc = ether_crc_le(6, ha->addr); crc >>= 26; hash_table[crc >> 4] |= 1 << (crc & 0xf); } sbus_writel(hash_table[0], bregs + BMAC_HTABLE0); sbus_writel(hash_table[1], bregs + BMAC_HTABLE1); sbus_writel(hash_table[2], bregs + BMAC_HTABLE2); sbus_writel(hash_table[3], bregs + BMAC_HTABLE3); } /* Re-enable the receiver. */ tmp = sbus_readl(bregs + BMAC_RXCFG); tmp |= BIGMAC_RXCFG_ENABLE; sbus_writel(tmp, bregs + BMAC_RXCFG); } /* Ethtool support... */ static void bigmac_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) { strscpy(info->driver, "sunbmac", sizeof(info->driver)); strscpy(info->version, "2.0", sizeof(info->version)); } static u32 bigmac_get_link(struct net_device *dev) { struct bigmac *bp = netdev_priv(dev); spin_lock_irq(&bp->lock); bp->sw_bmsr = bigmac_tcvr_read(bp, bp->tregs, MII_BMSR); spin_unlock_irq(&bp->lock); return (bp->sw_bmsr & BMSR_LSTATUS); } static const struct ethtool_ops bigmac_ethtool_ops = { .get_drvinfo = bigmac_get_drvinfo, .get_link = bigmac_get_link, }; static const struct net_device_ops bigmac_ops = { .ndo_open = bigmac_open, .ndo_stop = bigmac_close, .ndo_start_xmit = bigmac_start_xmit, .ndo_get_stats = bigmac_get_stats, .ndo_set_rx_mode = bigmac_set_multicast, .ndo_tx_timeout = bigmac_tx_timeout, .ndo_set_mac_address = eth_mac_addr, .ndo_validate_addr = eth_validate_addr, }; static int bigmac_ether_init(struct platform_device *op, struct platform_device *qec_op) { static int version_printed; struct net_device *dev; u8 bsizes, bsizes_more; struct bigmac *bp; /* Get a new device struct for this interface. */ dev = alloc_etherdev(sizeof(struct bigmac)); if (!dev) return -ENOMEM; if (version_printed++ == 0) printk(KERN_INFO "%s", version); eth_hw_addr_set(dev, idprom->id_ethaddr); /* Setup softc, with backpointers to QEC and BigMAC SBUS device structs. */ bp = netdev_priv(dev); bp->qec_op = qec_op; bp->bigmac_op = op; SET_NETDEV_DEV(dev, &op->dev); spin_lock_init(&bp->lock); /* Map in QEC global control registers. */ bp->gregs = of_ioremap(&qec_op->resource[0], 0, GLOB_REG_SIZE, "BigMAC QEC GLobal Regs"); if (!bp->gregs) { printk(KERN_ERR "BIGMAC: Cannot map QEC global registers.\n"); goto fail_and_cleanup; } /* Make sure QEC is in BigMAC mode. */ if ((sbus_readl(bp->gregs + GLOB_CTRL) & 0xf0000000) != GLOB_CTRL_BMODE) { printk(KERN_ERR "BigMAC: AIEEE, QEC is not in BigMAC mode!\n"); goto fail_and_cleanup; } /* Reset the QEC. */ if (qec_global_reset(bp->gregs)) goto fail_and_cleanup; /* Get supported SBUS burst sizes. */ bsizes = of_getintprop_default(qec_op->dev.of_node, "burst-sizes", 0xff); bsizes_more = of_getintprop_default(qec_op->dev.of_node, "burst-sizes", 0xff); bsizes &= 0xff; if (bsizes_more != 0xff) bsizes &= bsizes_more; if (bsizes == 0xff || (bsizes & DMA_BURST16) == 0 || (bsizes & DMA_BURST32) == 0) bsizes = (DMA_BURST32 - 1); bp->bigmac_bursts = bsizes; /* Perform QEC initialization. */ qec_init(bp); /* Map in the BigMAC channel registers. */ bp->creg = of_ioremap(&op->resource[0], 0, CREG_REG_SIZE, "BigMAC QEC Channel Regs"); if (!bp->creg) { printk(KERN_ERR "BIGMAC: Cannot map QEC channel registers.\n"); goto fail_and_cleanup; } /* Map in the BigMAC control registers. */ bp->bregs = of_ioremap(&op->resource[1], 0, BMAC_REG_SIZE, "BigMAC Primary Regs"); if (!bp->bregs) { printk(KERN_ERR "BIGMAC: Cannot map BigMAC primary registers.\n"); goto fail_and_cleanup; } /* Map in the BigMAC transceiver registers, this is how you poke at * the BigMAC's PHY. */ bp->tregs = of_ioremap(&op->resource[2], 0, TCVR_REG_SIZE, "BigMAC Transceiver Regs"); if (!bp->tregs) { printk(KERN_ERR "BIGMAC: Cannot map BigMAC transceiver registers.\n"); goto fail_and_cleanup; } /* Stop the BigMAC. */ bigmac_stop(bp); /* Allocate transmit/receive descriptor DVMA block. */ bp->bmac_block = dma_alloc_coherent(&bp->bigmac_op->dev, PAGE_SIZE, &bp->bblock_dvma, GFP_ATOMIC); if (bp->bmac_block == NULL || bp->bblock_dvma == 0) goto fail_and_cleanup; /* Get the board revision of this BigMAC. */ bp->board_rev = of_getintprop_default(bp->bigmac_op->dev.of_node, "board-version", 1); /* Init auto-negotiation timer state. */ timer_setup(&bp->bigmac_timer, bigmac_timer, 0); bp->timer_state = asleep; bp->timer_ticks = 0; /* Backlink to generic net device struct. */ bp->dev = dev; /* Set links to our BigMAC open and close routines. */ dev->ethtool_ops = &bigmac_ethtool_ops; dev->netdev_ops = &bigmac_ops; dev->watchdog_timeo = 5*HZ; /* Finish net device registration. */ dev->irq = bp->bigmac_op->archdata.irqs[0]; dev->dma = 0; if (register_netdev(dev)) { printk(KERN_ERR "BIGMAC: Cannot register device.\n"); goto fail_and_cleanup; } dev_set_drvdata(&bp->bigmac_op->dev, bp); printk(KERN_INFO "%s: BigMAC 100baseT Ethernet %pM\n", dev->name, dev->dev_addr); return 0; fail_and_cleanup: /* Something went wrong, undo whatever we did so far. */ /* Free register mappings if any. */ if (bp->gregs) of_iounmap(&qec_op->resource[0], bp->gregs, GLOB_REG_SIZE); if (bp->creg) of_iounmap(&op->resource[0], bp->creg, CREG_REG_SIZE); if (bp->bregs) of_iounmap(&op->resource[1], bp->bregs, BMAC_REG_SIZE); if (bp->tregs) of_iounmap(&op->resource[2], bp->tregs, TCVR_REG_SIZE); if (bp->bmac_block) dma_free_coherent(&bp->bigmac_op->dev, PAGE_SIZE, bp->bmac_block, bp->bblock_dvma); /* This also frees the co-located private data */ free_netdev(dev); return -ENODEV; } /* QEC can be the parent of either QuadEthernet or a BigMAC. We want * the latter. */ static int bigmac_sbus_probe(struct platform_device *op) { struct device *parent = op->dev.parent; struct platform_device *qec_op; qec_op = to_platform_device(parent); return bigmac_ether_init(op, qec_op); } static int bigmac_sbus_remove(struct platform_device *op) { struct bigmac *bp = platform_get_drvdata(op); struct device *parent = op->dev.parent; struct net_device *net_dev = bp->dev; struct platform_device *qec_op; qec_op = to_platform_device(parent); unregister_netdev(net_dev); of_iounmap(&qec_op->resource[0], bp->gregs, GLOB_REG_SIZE); of_iounmap(&op->resource[0], bp->creg, CREG_REG_SIZE); of_iounmap(&op->resource[1], bp->bregs, BMAC_REG_SIZE); of_iounmap(&op->resource[2], bp->tregs, TCVR_REG_SIZE); dma_free_coherent(&op->dev, PAGE_SIZE, bp->bmac_block, bp->bblock_dvma); free_netdev(net_dev); return 0; } static const struct of_device_id bigmac_sbus_match[] = { { .name = "be", }, {}, }; MODULE_DEVICE_TABLE(of, bigmac_sbus_match); static struct platform_driver bigmac_sbus_driver = { .driver = { .name = "sunbmac", .of_match_table = bigmac_sbus_match, }, .probe = bigmac_sbus_probe, .remove = bigmac_sbus_remove, }; module_platform_driver(bigmac_sbus_driver);
linux-master
drivers/net/ethernet/sun/sunbmac.c
// SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause /* ADIN1110 Low Power 10BASE-T1L Ethernet MAC-PHY * ADIN2111 2-Port Ethernet Switch with Integrated 10BASE-T1L PHY * * Copyright 2021 Analog Devices Inc. */ #include <linux/bitfield.h> #include <linux/bits.h> #include <linux/cache.h> #include <linux/crc8.h> #include <linux/etherdevice.h> #include <linux/ethtool.h> #include <linux/if_bridge.h> #include <linux/interrupt.h> #include <linux/iopoll.h> #include <linux/gpio.h> #include <linux/kernel.h> #include <linux/mii.h> #include <linux/module.h> #include <linux/netdevice.h> #include <linux/regulator/consumer.h> #include <linux/phy.h> #include <linux/property.h> #include <linux/spi/spi.h> #include <net/switchdev.h> #include <asm/unaligned.h> #define ADIN1110_PHY_ID 0x1 #define ADIN1110_RESET 0x03 #define ADIN1110_SWRESET BIT(0) #define ADIN1110_CONFIG1 0x04 #define ADIN1110_CONFIG1_SYNC BIT(15) #define ADIN1110_CONFIG2 0x06 #define ADIN2111_P2_FWD_UNK2HOST BIT(12) #define ADIN2111_PORT_CUT_THRU_EN BIT(11) #define ADIN1110_CRC_APPEND BIT(5) #define ADIN1110_FWD_UNK2HOST BIT(2) #define ADIN1110_STATUS0 0x08 #define ADIN1110_STATUS1 0x09 #define ADIN2111_P2_RX_RDY BIT(17) #define ADIN1110_SPI_ERR BIT(10) #define ADIN1110_RX_RDY BIT(4) #define ADIN1110_IMASK1 0x0D #define ADIN2111_RX_RDY_IRQ BIT(17) #define ADIN1110_SPI_ERR_IRQ BIT(10) #define ADIN1110_RX_RDY_IRQ BIT(4) #define ADIN1110_TX_RDY_IRQ BIT(3) #define ADIN1110_MDIOACC 0x20 #define ADIN1110_MDIO_TRDONE BIT(31) #define ADIN1110_MDIO_ST GENMASK(29, 28) #define ADIN1110_MDIO_OP GENMASK(27, 26) #define ADIN1110_MDIO_PRTAD GENMASK(25, 21) #define ADIN1110_MDIO_DEVAD GENMASK(20, 16) #define ADIN1110_MDIO_DATA GENMASK(15, 0) #define ADIN1110_TX_FSIZE 0x30 #define ADIN1110_TX 0x31 #define ADIN1110_TX_SPACE 0x32 #define ADIN1110_MAC_ADDR_FILTER_UPR 0x50 #define ADIN2111_MAC_ADDR_APPLY2PORT2 BIT(31) #define ADIN1110_MAC_ADDR_APPLY2PORT BIT(30) #define ADIN2111_MAC_ADDR_TO_OTHER_PORT BIT(17) #define ADIN1110_MAC_ADDR_TO_HOST BIT(16) #define ADIN1110_MAC_ADDR_FILTER_LWR 0x51 #define ADIN1110_MAC_ADDR_MASK_UPR 0x70 #define ADIN1110_MAC_ADDR_MASK_LWR 0x71 #define ADIN1110_RX_FSIZE 0x90 #define ADIN1110_RX 0x91 #define ADIN2111_RX_P2_FSIZE 0xC0 #define ADIN2111_RX_P2 0xC1 #define ADIN1110_CLEAR_STATUS0 0xFFF /* MDIO_OP codes */ #define ADIN1110_MDIO_OP_WR 0x1 #define ADIN1110_MDIO_OP_RD 0x3 #define ADIN1110_CD BIT(7) #define ADIN1110_WRITE BIT(5) #define ADIN1110_MAX_BUFF 2048 #define ADIN1110_MAX_FRAMES_READ 64 #define ADIN1110_WR_HEADER_LEN 2 #define ADIN1110_FRAME_HEADER_LEN 2 #define ADIN1110_INTERNAL_SIZE_HEADER_LEN 2 #define ADIN1110_RD_HEADER_LEN 3 #define ADIN1110_REG_LEN 4 #define ADIN1110_FEC_LEN 4 #define ADIN1110_PHY_ID_VAL 0x0283BC91 #define ADIN2111_PHY_ID_VAL 0x0283BCA1 #define ADIN_MAC_MAX_PORTS 2 #define ADIN_MAC_MAX_ADDR_SLOTS 16 #define ADIN_MAC_MULTICAST_ADDR_SLOT 0 #define ADIN_MAC_BROADCAST_ADDR_SLOT 1 #define ADIN_MAC_P1_ADDR_SLOT 2 #define ADIN_MAC_P2_ADDR_SLOT 3 #define ADIN_MAC_FDB_ADDR_SLOT 4 DECLARE_CRC8_TABLE(adin1110_crc_table); enum adin1110_chips_id { ADIN1110_MAC = 0, ADIN2111_MAC, }; struct adin1110_cfg { enum adin1110_chips_id id; char name[MDIO_NAME_SIZE]; u32 phy_ids[PHY_MAX_ADDR]; u32 ports_nr; u32 phy_id_val; }; struct adin1110_port_priv { struct adin1110_priv *priv; struct net_device *netdev; struct net_device *bridge; struct phy_device *phydev; struct work_struct tx_work; u64 rx_packets; u64 tx_packets; u64 rx_bytes; u64 tx_bytes; struct work_struct rx_mode_work; u32 flags; struct sk_buff_head txq; u32 nr; u32 state; struct adin1110_cfg *cfg; }; struct adin1110_priv { struct mutex lock; /* protect spi */ spinlock_t state_lock; /* protect RX mode */ struct mii_bus *mii_bus; struct spi_device *spidev; bool append_crc; struct adin1110_cfg *cfg; u32 tx_space; u32 irq_mask; bool forwarding; int irq; struct adin1110_port_priv *ports[ADIN_MAC_MAX_PORTS]; char mii_bus_name[MII_BUS_ID_SIZE]; u8 data[ADIN1110_MAX_BUFF] ____cacheline_aligned; }; struct adin1110_switchdev_event_work { struct work_struct work; struct switchdev_notifier_fdb_info fdb_info; struct adin1110_port_priv *port_priv; unsigned long event; }; static struct adin1110_cfg adin1110_cfgs[] = { { .id = ADIN1110_MAC, .name = "adin1110", .phy_ids = {1}, .ports_nr = 1, .phy_id_val = ADIN1110_PHY_ID_VAL, }, { .id = ADIN2111_MAC, .name = "adin2111", .phy_ids = {1, 2}, .ports_nr = 2, .phy_id_val = ADIN2111_PHY_ID_VAL, }, }; static u8 adin1110_crc_data(u8 *data, u32 len) { return crc8(adin1110_crc_table, data, len, 0); } static int adin1110_read_reg(struct adin1110_priv *priv, u16 reg, u32 *val) { u32 header_len = ADIN1110_RD_HEADER_LEN; u32 read_len = ADIN1110_REG_LEN; struct spi_transfer t = {0}; int ret; priv->data[0] = ADIN1110_CD | FIELD_GET(GENMASK(12, 8), reg); priv->data[1] = FIELD_GET(GENMASK(7, 0), reg); priv->data[2] = 0x00; if (priv->append_crc) { priv->data[2] = adin1110_crc_data(&priv->data[0], 2); priv->data[3] = 0x00; header_len++; } if (priv->append_crc) read_len++; memset(&priv->data[header_len], 0, read_len); t.tx_buf = &priv->data[0]; t.rx_buf = &priv->data[0]; t.len = read_len + header_len; ret = spi_sync_transfer(priv->spidev, &t, 1); if (ret) return ret; if (priv->append_crc) { u8 recv_crc; u8 crc; crc = adin1110_crc_data(&priv->data[header_len], ADIN1110_REG_LEN); recv_crc = priv->data[header_len + ADIN1110_REG_LEN]; if (crc != recv_crc) { dev_err_ratelimited(&priv->spidev->dev, "CRC error."); return -EBADMSG; } } *val = get_unaligned_be32(&priv->data[header_len]); return ret; } static int adin1110_write_reg(struct adin1110_priv *priv, u16 reg, u32 val) { u32 header_len = ADIN1110_WR_HEADER_LEN; u32 write_len = ADIN1110_REG_LEN; priv->data[0] = ADIN1110_CD | ADIN1110_WRITE | FIELD_GET(GENMASK(12, 8), reg); priv->data[1] = FIELD_GET(GENMASK(7, 0), reg); if (priv->append_crc) { priv->data[2] = adin1110_crc_data(&priv->data[0], header_len); header_len++; } put_unaligned_be32(val, &priv->data[header_len]); if (priv->append_crc) { priv->data[header_len + write_len] = adin1110_crc_data(&priv->data[header_len], write_len); write_len++; } return spi_write(priv->spidev, &priv->data[0], header_len + write_len); } static int adin1110_set_bits(struct adin1110_priv *priv, u16 reg, unsigned long mask, unsigned long val) { u32 write_val; int ret; ret = adin1110_read_reg(priv, reg, &write_val); if (ret < 0) return ret; set_mask_bits(&write_val, mask, val); return adin1110_write_reg(priv, reg, write_val); } static int adin1110_round_len(int len) { /* can read/write only mutiples of 4 bytes of payload */ len = ALIGN(len, 4); /* NOTE: ADIN1110_WR_HEADER_LEN should be used for write ops. */ if (len + ADIN1110_RD_HEADER_LEN > ADIN1110_MAX_BUFF) return -EINVAL; return len; } static int adin1110_read_fifo(struct adin1110_port_priv *port_priv) { struct adin1110_priv *priv = port_priv->priv; u32 header_len = ADIN1110_RD_HEADER_LEN; struct spi_transfer t; u32 frame_size_no_fcs; struct sk_buff *rxb; u32 frame_size; int round_len; u16 reg; int ret; if (!port_priv->nr) { reg = ADIN1110_RX; ret = adin1110_read_reg(priv, ADIN1110_RX_FSIZE, &frame_size); } else { reg = ADIN2111_RX_P2; ret = adin1110_read_reg(priv, ADIN2111_RX_P2_FSIZE, &frame_size); } if (ret < 0) return ret; /* The read frame size includes the extra 2 bytes * from the ADIN1110 frame header. */ if (frame_size < ADIN1110_FRAME_HEADER_LEN + ADIN1110_FEC_LEN) return ret; round_len = adin1110_round_len(frame_size); if (round_len < 0) return ret; frame_size_no_fcs = frame_size - ADIN1110_FRAME_HEADER_LEN - ADIN1110_FEC_LEN; memset(priv->data, 0, ADIN1110_RD_HEADER_LEN); priv->data[0] = ADIN1110_CD | FIELD_GET(GENMASK(12, 8), reg); priv->data[1] = FIELD_GET(GENMASK(7, 0), reg); if (priv->append_crc) { priv->data[2] = adin1110_crc_data(&priv->data[0], 2); header_len++; } rxb = netdev_alloc_skb(port_priv->netdev, round_len + header_len); if (!rxb) return -ENOMEM; skb_put(rxb, frame_size_no_fcs + header_len + ADIN1110_FRAME_HEADER_LEN); t.tx_buf = &priv->data[0]; t.rx_buf = &rxb->data[0]; t.len = header_len + round_len; ret = spi_sync_transfer(priv->spidev, &t, 1); if (ret) { kfree_skb(rxb); return ret; } skb_pull(rxb, header_len + ADIN1110_FRAME_HEADER_LEN); rxb->protocol = eth_type_trans(rxb, port_priv->netdev); if ((port_priv->flags & IFF_ALLMULTI && rxb->pkt_type == PACKET_MULTICAST) || (port_priv->flags & IFF_BROADCAST && rxb->pkt_type == PACKET_BROADCAST)) rxb->offload_fwd_mark = port_priv->priv->forwarding; netif_rx(rxb); port_priv->rx_bytes += frame_size - ADIN1110_FRAME_HEADER_LEN; port_priv->rx_packets++; return 0; } static int adin1110_write_fifo(struct adin1110_port_priv *port_priv, struct sk_buff *txb) { struct adin1110_priv *priv = port_priv->priv; u32 header_len = ADIN1110_WR_HEADER_LEN; __be16 frame_header; int padding = 0; int padded_len; int round_len; int ret; /* Pad frame to 64 byte length, * MAC nor PHY will otherwise add the * required padding. * The FEC will be added by the MAC internally. */ if (txb->len + ADIN1110_FEC_LEN < 64) padding = 64 - (txb->len + ADIN1110_FEC_LEN); padded_len = txb->len + padding + ADIN1110_FRAME_HEADER_LEN; round_len = adin1110_round_len(padded_len); if (round_len < 0) return round_len; ret = adin1110_write_reg(priv, ADIN1110_TX_FSIZE, padded_len); if (ret < 0) return ret; memset(priv->data, 0, round_len + ADIN1110_WR_HEADER_LEN); priv->data[0] = ADIN1110_CD | ADIN1110_WRITE; priv->data[0] |= FIELD_GET(GENMASK(12, 8), ADIN1110_TX); priv->data[1] = FIELD_GET(GENMASK(7, 0), ADIN1110_TX); if (priv->append_crc) { priv->data[2] = adin1110_crc_data(&priv->data[0], 2); header_len++; } /* mention the port on which to send the frame in the frame header */ frame_header = cpu_to_be16(port_priv->nr); memcpy(&priv->data[header_len], &frame_header, ADIN1110_FRAME_HEADER_LEN); memcpy(&priv->data[header_len + ADIN1110_FRAME_HEADER_LEN], txb->data, txb->len); ret = spi_write(priv->spidev, &priv->data[0], round_len + header_len); if (ret < 0) return ret; port_priv->tx_bytes += txb->len; port_priv->tx_packets++; return 0; } static int adin1110_read_mdio_acc(struct adin1110_priv *priv) { u32 val; int ret; mutex_lock(&priv->lock); ret = adin1110_read_reg(priv, ADIN1110_MDIOACC, &val); mutex_unlock(&priv->lock); if (ret < 0) return 0; return val; } static int adin1110_mdio_read(struct mii_bus *bus, int phy_id, int reg) { struct adin1110_priv *priv = bus->priv; u32 val = 0; int ret; if (mdio_phy_id_is_c45(phy_id)) return -EOPNOTSUPP; val |= FIELD_PREP(ADIN1110_MDIO_OP, ADIN1110_MDIO_OP_RD); val |= FIELD_PREP(ADIN1110_MDIO_ST, 0x1); val |= FIELD_PREP(ADIN1110_MDIO_PRTAD, phy_id); val |= FIELD_PREP(ADIN1110_MDIO_DEVAD, reg); /* write the clause 22 read command to the chip */ mutex_lock(&priv->lock); ret = adin1110_write_reg(priv, ADIN1110_MDIOACC, val); mutex_unlock(&priv->lock); if (ret < 0) return ret; /* ADIN1110_MDIO_TRDONE BIT of the ADIN1110_MDIOACC * register is set when the read is done. * After the transaction is done, ADIN1110_MDIO_DATA * bitfield of ADIN1110_MDIOACC register will contain * the requested register value. */ ret = readx_poll_timeout(adin1110_read_mdio_acc, priv, val, (val & ADIN1110_MDIO_TRDONE), 10000, 30000); if (ret < 0) return ret; return (val & ADIN1110_MDIO_DATA); } static int adin1110_mdio_write(struct mii_bus *bus, int phy_id, int reg, u16 reg_val) { struct adin1110_priv *priv = bus->priv; u32 val = 0; int ret; if (mdio_phy_id_is_c45(phy_id)) return -EOPNOTSUPP; val |= FIELD_PREP(ADIN1110_MDIO_OP, ADIN1110_MDIO_OP_WR); val |= FIELD_PREP(ADIN1110_MDIO_ST, 0x1); val |= FIELD_PREP(ADIN1110_MDIO_PRTAD, phy_id); val |= FIELD_PREP(ADIN1110_MDIO_DEVAD, reg); val |= FIELD_PREP(ADIN1110_MDIO_DATA, reg_val); /* write the clause 22 write command to the chip */ mutex_lock(&priv->lock); ret = adin1110_write_reg(priv, ADIN1110_MDIOACC, val); mutex_unlock(&priv->lock); if (ret < 0) return ret; return readx_poll_timeout(adin1110_read_mdio_acc, priv, val, (val & ADIN1110_MDIO_TRDONE), 10000, 30000); } /* ADIN1110 MAC-PHY contains an ADIN1100 PHY. * ADIN2111 MAC-PHY contains two ADIN1100 PHYs. * By registering a new MDIO bus we allow the PAL to discover * the encapsulated PHY and probe the ADIN1100 driver. */ static int adin1110_register_mdiobus(struct adin1110_priv *priv, struct device *dev) { struct mii_bus *mii_bus; int ret; mii_bus = devm_mdiobus_alloc(dev); if (!mii_bus) return -ENOMEM; snprintf(priv->mii_bus_name, MII_BUS_ID_SIZE, "%s-%u", priv->cfg->name, spi_get_chipselect(priv->spidev, 0)); mii_bus->name = priv->mii_bus_name; mii_bus->read = adin1110_mdio_read; mii_bus->write = adin1110_mdio_write; mii_bus->priv = priv; mii_bus->parent = dev; mii_bus->phy_mask = ~((u32)GENMASK(2, 0)); snprintf(mii_bus->id, MII_BUS_ID_SIZE, "%s", dev_name(dev)); ret = devm_mdiobus_register(dev, mii_bus); if (ret) return ret; priv->mii_bus = mii_bus; return 0; } static bool adin1110_port_rx_ready(struct adin1110_port_priv *port_priv, u32 status) { if (!netif_oper_up(port_priv->netdev)) return false; if (!port_priv->nr) return !!(status & ADIN1110_RX_RDY); else return !!(status & ADIN2111_P2_RX_RDY); } static void adin1110_read_frames(struct adin1110_port_priv *port_priv, unsigned int budget) { struct adin1110_priv *priv = port_priv->priv; u32 status1; int ret; while (budget) { ret = adin1110_read_reg(priv, ADIN1110_STATUS1, &status1); if (ret < 0) return; if (!adin1110_port_rx_ready(port_priv, status1)) break; ret = adin1110_read_fifo(port_priv); if (ret < 0) return; budget--; } } static void adin1110_wake_queues(struct adin1110_priv *priv) { int i; for (i = 0; i < priv->cfg->ports_nr; i++) netif_wake_queue(priv->ports[i]->netdev); } static irqreturn_t adin1110_irq(int irq, void *p) { struct adin1110_priv *priv = p; u32 status1; u32 val; int ret; int i; mutex_lock(&priv->lock); ret = adin1110_read_reg(priv, ADIN1110_STATUS1, &status1); if (ret < 0) goto out; if (priv->append_crc && (status1 & ADIN1110_SPI_ERR)) dev_warn_ratelimited(&priv->spidev->dev, "SPI CRC error on write.\n"); ret = adin1110_read_reg(priv, ADIN1110_TX_SPACE, &val); if (ret < 0) goto out; /* TX FIFO space is expressed in half-words */ priv->tx_space = 2 * val; for (i = 0; i < priv->cfg->ports_nr; i++) { if (adin1110_port_rx_ready(priv->ports[i], status1)) adin1110_read_frames(priv->ports[i], ADIN1110_MAX_FRAMES_READ); } /* clear IRQ sources */ adin1110_write_reg(priv, ADIN1110_STATUS0, ADIN1110_CLEAR_STATUS0); adin1110_write_reg(priv, ADIN1110_STATUS1, priv->irq_mask); out: mutex_unlock(&priv->lock); if (priv->tx_space > 0 && ret >= 0) adin1110_wake_queues(priv); return IRQ_HANDLED; } /* ADIN1110 can filter up to 16 MAC addresses, mac_nr here is the slot used */ static int adin1110_write_mac_address(struct adin1110_port_priv *port_priv, int mac_nr, const u8 *addr, u8 *mask, u32 port_rules) { struct adin1110_priv *priv = port_priv->priv; u32 offset = mac_nr * 2; u32 port_rules_mask; int ret; u32 val; if (!port_priv->nr) port_rules_mask = ADIN1110_MAC_ADDR_APPLY2PORT; else port_rules_mask = ADIN2111_MAC_ADDR_APPLY2PORT2; if (port_rules & port_rules_mask) port_rules_mask |= ADIN1110_MAC_ADDR_TO_HOST | ADIN2111_MAC_ADDR_TO_OTHER_PORT; port_rules_mask |= GENMASK(15, 0); val = port_rules | get_unaligned_be16(&addr[0]); ret = adin1110_set_bits(priv, ADIN1110_MAC_ADDR_FILTER_UPR + offset, port_rules_mask, val); if (ret < 0) return ret; val = get_unaligned_be32(&addr[2]); ret = adin1110_write_reg(priv, ADIN1110_MAC_ADDR_FILTER_LWR + offset, val); if (ret < 0) return ret; /* Only the first two MAC address slots support masking. */ if (mac_nr < ADIN_MAC_P1_ADDR_SLOT) { val = get_unaligned_be16(&mask[0]); ret = adin1110_write_reg(priv, ADIN1110_MAC_ADDR_MASK_UPR + offset, val); if (ret < 0) return ret; val = get_unaligned_be32(&mask[2]); return adin1110_write_reg(priv, ADIN1110_MAC_ADDR_MASK_LWR + offset, val); } return 0; } static int adin1110_clear_mac_address(struct adin1110_priv *priv, int mac_nr) { u32 offset = mac_nr * 2; int ret; ret = adin1110_write_reg(priv, ADIN1110_MAC_ADDR_FILTER_UPR + offset, 0); if (ret < 0) return ret; ret = adin1110_write_reg(priv, ADIN1110_MAC_ADDR_FILTER_LWR + offset, 0); if (ret < 0) return ret; /* only the first two MAC address slots are maskable */ if (mac_nr <= 1) { ret = adin1110_write_reg(priv, ADIN1110_MAC_ADDR_MASK_UPR + offset, 0); if (ret < 0) return ret; ret = adin1110_write_reg(priv, ADIN1110_MAC_ADDR_MASK_LWR + offset, 0); } return ret; } static u32 adin1110_port_rules(struct adin1110_port_priv *port_priv, bool fw_to_host, bool fw_to_other_port) { u32 port_rules = 0; if (!port_priv->nr) port_rules |= ADIN1110_MAC_ADDR_APPLY2PORT; else port_rules |= ADIN2111_MAC_ADDR_APPLY2PORT2; if (fw_to_host) port_rules |= ADIN1110_MAC_ADDR_TO_HOST; if (fw_to_other_port && port_priv->priv->forwarding) port_rules |= ADIN2111_MAC_ADDR_TO_OTHER_PORT; return port_rules; } static int adin1110_multicast_filter(struct adin1110_port_priv *port_priv, int mac_nr, bool accept_multicast) { u8 mask[ETH_ALEN] = {0}; u8 mac[ETH_ALEN] = {0}; u32 port_rules = 0; mask[0] = BIT(0); mac[0] = BIT(0); if (accept_multicast && port_priv->state == BR_STATE_FORWARDING) port_rules = adin1110_port_rules(port_priv, true, true); return adin1110_write_mac_address(port_priv, mac_nr, mac, mask, port_rules); } static int adin1110_broadcasts_filter(struct adin1110_port_priv *port_priv, int mac_nr, bool accept_broadcast) { u32 port_rules = 0; u8 mask[ETH_ALEN]; eth_broadcast_addr(mask); if (accept_broadcast && port_priv->state == BR_STATE_FORWARDING) port_rules = adin1110_port_rules(port_priv, true, true); return adin1110_write_mac_address(port_priv, mac_nr, mask, mask, port_rules); } static int adin1110_set_mac_address(struct net_device *netdev, const unsigned char *dev_addr) { struct adin1110_port_priv *port_priv = netdev_priv(netdev); u8 mask[ETH_ALEN]; u32 port_rules; u32 mac_slot; if (!is_valid_ether_addr(dev_addr)) return -EADDRNOTAVAIL; eth_hw_addr_set(netdev, dev_addr); eth_broadcast_addr(mask); mac_slot = (!port_priv->nr) ? ADIN_MAC_P1_ADDR_SLOT : ADIN_MAC_P2_ADDR_SLOT; port_rules = adin1110_port_rules(port_priv, true, false); return adin1110_write_mac_address(port_priv, mac_slot, netdev->dev_addr, mask, port_rules); } static int adin1110_ndo_set_mac_address(struct net_device *netdev, void *addr) { struct sockaddr *sa = addr; int ret; ret = eth_prepare_mac_addr_change(netdev, addr); if (ret < 0) return ret; return adin1110_set_mac_address(netdev, sa->sa_data); } static int adin1110_ioctl(struct net_device *netdev, struct ifreq *rq, int cmd) { if (!netif_running(netdev)) return -EINVAL; return phy_do_ioctl(netdev, rq, cmd); } static int adin1110_set_promisc_mode(struct adin1110_port_priv *port_priv, bool promisc) { struct adin1110_priv *priv = port_priv->priv; u32 mask; if (port_priv->state != BR_STATE_FORWARDING) promisc = false; if (!port_priv->nr) mask = ADIN1110_FWD_UNK2HOST; else mask = ADIN2111_P2_FWD_UNK2HOST; return adin1110_set_bits(priv, ADIN1110_CONFIG2, mask, promisc ? mask : 0); } static int adin1110_setup_rx_mode(struct adin1110_port_priv *port_priv) { int ret; ret = adin1110_set_promisc_mode(port_priv, !!(port_priv->flags & IFF_PROMISC)); if (ret < 0) return ret; ret = adin1110_multicast_filter(port_priv, ADIN_MAC_MULTICAST_ADDR_SLOT, !!(port_priv->flags & IFF_ALLMULTI)); if (ret < 0) return ret; ret = adin1110_broadcasts_filter(port_priv, ADIN_MAC_BROADCAST_ADDR_SLOT, !!(port_priv->flags & IFF_BROADCAST)); if (ret < 0) return ret; return adin1110_set_bits(port_priv->priv, ADIN1110_CONFIG1, ADIN1110_CONFIG1_SYNC, ADIN1110_CONFIG1_SYNC); } static bool adin1110_can_offload_forwarding(struct adin1110_priv *priv) { int i; if (priv->cfg->id != ADIN2111_MAC) return false; /* Can't enable forwarding if ports do not belong to the same bridge */ if (priv->ports[0]->bridge != priv->ports[1]->bridge || !priv->ports[0]->bridge) return false; /* Can't enable forwarding if there is a port * that has been blocked by STP. */ for (i = 0; i < priv->cfg->ports_nr; i++) { if (priv->ports[i]->state != BR_STATE_FORWARDING) return false; } return true; } static void adin1110_rx_mode_work(struct work_struct *work) { struct adin1110_port_priv *port_priv; struct adin1110_priv *priv; port_priv = container_of(work, struct adin1110_port_priv, rx_mode_work); priv = port_priv->priv; mutex_lock(&priv->lock); adin1110_setup_rx_mode(port_priv); mutex_unlock(&priv->lock); } static void adin1110_set_rx_mode(struct net_device *dev) { struct adin1110_port_priv *port_priv = netdev_priv(dev); struct adin1110_priv *priv = port_priv->priv; spin_lock(&priv->state_lock); port_priv->flags = dev->flags; schedule_work(&port_priv->rx_mode_work); spin_unlock(&priv->state_lock); } static int adin1110_net_open(struct net_device *net_dev) { struct adin1110_port_priv *port_priv = netdev_priv(net_dev); struct adin1110_priv *priv = port_priv->priv; u32 val; int ret; mutex_lock(&priv->lock); /* Configure MAC to compute and append the FCS itself. */ ret = adin1110_write_reg(priv, ADIN1110_CONFIG2, ADIN1110_CRC_APPEND); if (ret < 0) goto out; val = ADIN1110_TX_RDY_IRQ | ADIN1110_RX_RDY_IRQ | ADIN1110_SPI_ERR_IRQ; if (priv->cfg->id == ADIN2111_MAC) val |= ADIN2111_RX_RDY_IRQ; priv->irq_mask = val; ret = adin1110_write_reg(priv, ADIN1110_IMASK1, ~val); if (ret < 0) { netdev_err(net_dev, "Failed to enable chip IRQs: %d\n", ret); goto out; } ret = adin1110_read_reg(priv, ADIN1110_TX_SPACE, &val); if (ret < 0) { netdev_err(net_dev, "Failed to read TX FIFO space: %d\n", ret); goto out; } priv->tx_space = 2 * val; port_priv->state = BR_STATE_FORWARDING; ret = adin1110_set_mac_address(net_dev, net_dev->dev_addr); if (ret < 0) { netdev_err(net_dev, "Could not set MAC address: %pM, %d\n", net_dev->dev_addr, ret); goto out; } ret = adin1110_set_bits(priv, ADIN1110_CONFIG1, ADIN1110_CONFIG1_SYNC, ADIN1110_CONFIG1_SYNC); out: mutex_unlock(&priv->lock); if (ret < 0) return ret; phy_start(port_priv->phydev); netif_start_queue(net_dev); return 0; } static int adin1110_net_stop(struct net_device *net_dev) { struct adin1110_port_priv *port_priv = netdev_priv(net_dev); struct adin1110_priv *priv = port_priv->priv; u32 mask; int ret; mask = !port_priv->nr ? ADIN2111_RX_RDY_IRQ : ADIN1110_RX_RDY_IRQ; /* Disable RX RDY IRQs */ mutex_lock(&priv->lock); ret = adin1110_set_bits(priv, ADIN1110_IMASK1, mask, mask); mutex_unlock(&priv->lock); if (ret < 0) return ret; netif_stop_queue(port_priv->netdev); flush_work(&port_priv->tx_work); phy_stop(port_priv->phydev); return 0; } static void adin1110_tx_work(struct work_struct *work) { struct adin1110_port_priv *port_priv; struct adin1110_priv *priv; struct sk_buff *txb; int ret; port_priv = container_of(work, struct adin1110_port_priv, tx_work); priv = port_priv->priv; mutex_lock(&priv->lock); while ((txb = skb_dequeue(&port_priv->txq))) { ret = adin1110_write_fifo(port_priv, txb); if (ret < 0) dev_err_ratelimited(&priv->spidev->dev, "Frame write error: %d\n", ret); dev_kfree_skb(txb); } mutex_unlock(&priv->lock); } static netdev_tx_t adin1110_start_xmit(struct sk_buff *skb, struct net_device *dev) { struct adin1110_port_priv *port_priv = netdev_priv(dev); struct adin1110_priv *priv = port_priv->priv; netdev_tx_t netdev_ret = NETDEV_TX_OK; u32 tx_space_needed; tx_space_needed = skb->len + ADIN1110_FRAME_HEADER_LEN + ADIN1110_INTERNAL_SIZE_HEADER_LEN; if (tx_space_needed > priv->tx_space) { netif_stop_queue(dev); netdev_ret = NETDEV_TX_BUSY; } else { priv->tx_space -= tx_space_needed; skb_queue_tail(&port_priv->txq, skb); } schedule_work(&port_priv->tx_work); return netdev_ret; } static void adin1110_ndo_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *storage) { struct adin1110_port_priv *port_priv = netdev_priv(dev); storage->rx_packets = port_priv->rx_packets; storage->tx_packets = port_priv->tx_packets; storage->rx_bytes = port_priv->rx_bytes; storage->tx_bytes = port_priv->tx_bytes; } static int adin1110_port_get_port_parent_id(struct net_device *dev, struct netdev_phys_item_id *ppid) { struct adin1110_port_priv *port_priv = netdev_priv(dev); struct adin1110_priv *priv = port_priv->priv; ppid->id_len = strnlen(priv->mii_bus_name, MAX_PHYS_ITEM_ID_LEN); memcpy(ppid->id, priv->mii_bus_name, ppid->id_len); return 0; } static int adin1110_ndo_get_phys_port_name(struct net_device *dev, char *name, size_t len) { struct adin1110_port_priv *port_priv = netdev_priv(dev); int err; err = snprintf(name, len, "p%d", port_priv->nr); if (err >= len) return -EINVAL; return 0; } static const struct net_device_ops adin1110_netdev_ops = { .ndo_open = adin1110_net_open, .ndo_stop = adin1110_net_stop, .ndo_eth_ioctl = adin1110_ioctl, .ndo_start_xmit = adin1110_start_xmit, .ndo_set_mac_address = adin1110_ndo_set_mac_address, .ndo_set_rx_mode = adin1110_set_rx_mode, .ndo_validate_addr = eth_validate_addr, .ndo_get_stats64 = adin1110_ndo_get_stats64, .ndo_get_port_parent_id = adin1110_port_get_port_parent_id, .ndo_get_phys_port_name = adin1110_ndo_get_phys_port_name, }; static void adin1110_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *di) { strscpy(di->driver, "ADIN1110", sizeof(di->driver)); strscpy(di->bus_info, dev_name(dev->dev.parent), sizeof(di->bus_info)); } static const struct ethtool_ops adin1110_ethtool_ops = { .get_drvinfo = adin1110_get_drvinfo, .get_link = ethtool_op_get_link, .get_link_ksettings = phy_ethtool_get_link_ksettings, .set_link_ksettings = phy_ethtool_set_link_ksettings, }; static void adin1110_adjust_link(struct net_device *dev) { struct phy_device *phydev = dev->phydev; if (!phydev->link) phy_print_status(phydev); } /* PHY ID is stored in the MAC registers too, * check spi connection by reading it. */ static int adin1110_check_spi(struct adin1110_priv *priv) { struct gpio_desc *reset_gpio; int ret; u32 val; reset_gpio = devm_gpiod_get_optional(&priv->spidev->dev, "reset", GPIOD_OUT_LOW); if (reset_gpio) { /* MISO pin is used for internal configuration, can't have * anyone else disturbing the SDO line. */ spi_bus_lock(priv->spidev->controller); gpiod_set_value(reset_gpio, 1); fsleep(10000); gpiod_set_value(reset_gpio, 0); /* Need to wait 90 ms before interacting with * the MAC after a HW reset. */ fsleep(90000); spi_bus_unlock(priv->spidev->controller); } ret = adin1110_read_reg(priv, ADIN1110_PHY_ID, &val); if (ret < 0) return ret; if (val != priv->cfg->phy_id_val) { dev_err(&priv->spidev->dev, "PHY ID expected: %x, read: %x\n", priv->cfg->phy_id_val, val); return -EIO; } return 0; } static int adin1110_hw_forwarding(struct adin1110_priv *priv, bool enable) { int ret; int i; priv->forwarding = enable; if (!priv->forwarding) { for (i = ADIN_MAC_FDB_ADDR_SLOT; i < ADIN_MAC_MAX_ADDR_SLOTS; i++) { ret = adin1110_clear_mac_address(priv, i); if (ret < 0) return ret; } } /* Forwarding is optimised when MAC runs in Cut Through mode. */ ret = adin1110_set_bits(priv, ADIN1110_CONFIG2, ADIN2111_PORT_CUT_THRU_EN, priv->forwarding ? ADIN2111_PORT_CUT_THRU_EN : 0); if (ret < 0) return ret; for (i = 0; i < priv->cfg->ports_nr; i++) { ret = adin1110_setup_rx_mode(priv->ports[i]); if (ret < 0) return ret; } return ret; } static int adin1110_port_bridge_join(struct adin1110_port_priv *port_priv, struct net_device *bridge) { struct adin1110_priv *priv = port_priv->priv; int ret; port_priv->bridge = bridge; if (adin1110_can_offload_forwarding(priv)) { mutex_lock(&priv->lock); ret = adin1110_hw_forwarding(priv, true); mutex_unlock(&priv->lock); if (ret < 0) return ret; } return adin1110_set_mac_address(port_priv->netdev, bridge->dev_addr); } static int adin1110_port_bridge_leave(struct adin1110_port_priv *port_priv, struct net_device *bridge) { struct adin1110_priv *priv = port_priv->priv; int ret; port_priv->bridge = NULL; mutex_lock(&priv->lock); ret = adin1110_hw_forwarding(priv, false); mutex_unlock(&priv->lock); return ret; } static bool adin1110_port_dev_check(const struct net_device *dev) { return dev->netdev_ops == &adin1110_netdev_ops; } static int adin1110_netdevice_event(struct notifier_block *unused, unsigned long event, void *ptr) { struct net_device *dev = netdev_notifier_info_to_dev(ptr); struct adin1110_port_priv *port_priv = netdev_priv(dev); struct netdev_notifier_changeupper_info *info = ptr; int ret = 0; if (!adin1110_port_dev_check(dev)) return NOTIFY_DONE; switch (event) { case NETDEV_CHANGEUPPER: if (netif_is_bridge_master(info->upper_dev)) { if (info->linking) ret = adin1110_port_bridge_join(port_priv, info->upper_dev); else ret = adin1110_port_bridge_leave(port_priv, info->upper_dev); } break; default: break; } return notifier_from_errno(ret); } static struct notifier_block adin1110_netdevice_nb = { .notifier_call = adin1110_netdevice_event, }; static void adin1110_disconnect_phy(void *data) { phy_disconnect(data); } static int adin1110_port_set_forwarding_state(struct adin1110_port_priv *port_priv) { struct adin1110_priv *priv = port_priv->priv; int ret; port_priv->state = BR_STATE_FORWARDING; mutex_lock(&priv->lock); ret = adin1110_set_mac_address(port_priv->netdev, port_priv->netdev->dev_addr); if (ret < 0) goto out; if (adin1110_can_offload_forwarding(priv)) ret = adin1110_hw_forwarding(priv, true); else ret = adin1110_setup_rx_mode(port_priv); out: mutex_unlock(&priv->lock); return ret; } static int adin1110_port_set_blocking_state(struct adin1110_port_priv *port_priv) { u8 mac[ETH_ALEN] = {0x01, 0x80, 0xC2, 0x00, 0x00, 0x00}; struct adin1110_priv *priv = port_priv->priv; u8 mask[ETH_ALEN]; u32 port_rules; int mac_slot; int ret; port_priv->state = BR_STATE_BLOCKING; mutex_lock(&priv->lock); mac_slot = (!port_priv->nr) ? ADIN_MAC_P1_ADDR_SLOT : ADIN_MAC_P2_ADDR_SLOT; ret = adin1110_clear_mac_address(priv, mac_slot); if (ret < 0) goto out; ret = adin1110_hw_forwarding(priv, false); if (ret < 0) goto out; /* Allow only BPDUs to be passed to the CPU */ eth_broadcast_addr(mask); port_rules = adin1110_port_rules(port_priv, true, false); ret = adin1110_write_mac_address(port_priv, mac_slot, mac, mask, port_rules); out: mutex_unlock(&priv->lock); return ret; } /* ADIN1110/2111 does not have any native STP support. * Listen for bridge core state changes and * allow all frames to pass or only the BPDUs. */ static int adin1110_port_attr_stp_state_set(struct adin1110_port_priv *port_priv, u8 state) { switch (state) { case BR_STATE_FORWARDING: return adin1110_port_set_forwarding_state(port_priv); case BR_STATE_LEARNING: case BR_STATE_LISTENING: case BR_STATE_DISABLED: case BR_STATE_BLOCKING: return adin1110_port_set_blocking_state(port_priv); default: return -EINVAL; } } static int adin1110_port_attr_set(struct net_device *dev, const void *ctx, const struct switchdev_attr *attr, struct netlink_ext_ack *extack) { struct adin1110_port_priv *port_priv = netdev_priv(dev); switch (attr->id) { case SWITCHDEV_ATTR_ID_PORT_STP_STATE: return adin1110_port_attr_stp_state_set(port_priv, attr->u.stp_state); default: return -EOPNOTSUPP; } } static int adin1110_switchdev_blocking_event(struct notifier_block *unused, unsigned long event, void *ptr) { struct net_device *netdev = switchdev_notifier_info_to_dev(ptr); int ret; if (event == SWITCHDEV_PORT_ATTR_SET) { ret = switchdev_handle_port_attr_set(netdev, ptr, adin1110_port_dev_check, adin1110_port_attr_set); return notifier_from_errno(ret); } return NOTIFY_DONE; } static struct notifier_block adin1110_switchdev_blocking_notifier = { .notifier_call = adin1110_switchdev_blocking_event, }; static void adin1110_fdb_offload_notify(struct net_device *netdev, struct switchdev_notifier_fdb_info *rcv) { struct switchdev_notifier_fdb_info info = {}; info.addr = rcv->addr; info.vid = rcv->vid; info.offloaded = true; call_switchdev_notifiers(SWITCHDEV_FDB_OFFLOADED, netdev, &info.info, NULL); } static int adin1110_fdb_add(struct adin1110_port_priv *port_priv, struct switchdev_notifier_fdb_info *fdb) { struct adin1110_priv *priv = port_priv->priv; struct adin1110_port_priv *other_port; u8 mask[ETH_ALEN]; u32 port_rules; int mac_nr; u32 val; int ret; netdev_dbg(port_priv->netdev, "DEBUG: %s: MACID = %pM vid = %u flags = %u %u -- port %d\n", __func__, fdb->addr, fdb->vid, fdb->added_by_user, fdb->offloaded, port_priv->nr); if (!priv->forwarding) return 0; if (fdb->is_local) return -EINVAL; /* Find free FDB slot on device. */ for (mac_nr = ADIN_MAC_FDB_ADDR_SLOT; mac_nr < ADIN_MAC_MAX_ADDR_SLOTS; mac_nr++) { ret = adin1110_read_reg(priv, ADIN1110_MAC_ADDR_FILTER_UPR + (mac_nr * 2), &val); if (ret < 0) return ret; if (!val) break; } if (mac_nr == ADIN_MAC_MAX_ADDR_SLOTS) return -ENOMEM; other_port = priv->ports[!port_priv->nr]; port_rules = adin1110_port_rules(other_port, false, true); eth_broadcast_addr(mask); return adin1110_write_mac_address(other_port, mac_nr, (u8 *)fdb->addr, mask, port_rules); } static int adin1110_read_mac(struct adin1110_priv *priv, int mac_nr, u8 *addr) { u32 val; int ret; ret = adin1110_read_reg(priv, ADIN1110_MAC_ADDR_FILTER_UPR + (mac_nr * 2), &val); if (ret < 0) return ret; put_unaligned_be16(val, addr); ret = adin1110_read_reg(priv, ADIN1110_MAC_ADDR_FILTER_LWR + (mac_nr * 2), &val); if (ret < 0) return ret; put_unaligned_be32(val, addr + 2); return 0; } static int adin1110_fdb_del(struct adin1110_port_priv *port_priv, struct switchdev_notifier_fdb_info *fdb) { struct adin1110_priv *priv = port_priv->priv; u8 addr[ETH_ALEN]; int mac_nr; int ret; netdev_dbg(port_priv->netdev, "DEBUG: %s: MACID = %pM vid = %u flags = %u %u -- port %d\n", __func__, fdb->addr, fdb->vid, fdb->added_by_user, fdb->offloaded, port_priv->nr); if (fdb->is_local) return -EINVAL; for (mac_nr = ADIN_MAC_FDB_ADDR_SLOT; mac_nr < ADIN_MAC_MAX_ADDR_SLOTS; mac_nr++) { ret = adin1110_read_mac(priv, mac_nr, addr); if (ret < 0) return ret; if (ether_addr_equal(addr, fdb->addr)) { ret = adin1110_clear_mac_address(priv, mac_nr); if (ret < 0) return ret; } } return 0; } static void adin1110_switchdev_event_work(struct work_struct *work) { struct adin1110_switchdev_event_work *switchdev_work; struct adin1110_port_priv *port_priv; int ret; switchdev_work = container_of(work, struct adin1110_switchdev_event_work, work); port_priv = switchdev_work->port_priv; mutex_lock(&port_priv->priv->lock); switch (switchdev_work->event) { case SWITCHDEV_FDB_ADD_TO_DEVICE: ret = adin1110_fdb_add(port_priv, &switchdev_work->fdb_info); if (!ret) adin1110_fdb_offload_notify(port_priv->netdev, &switchdev_work->fdb_info); break; case SWITCHDEV_FDB_DEL_TO_DEVICE: adin1110_fdb_del(port_priv, &switchdev_work->fdb_info); break; default: break; } mutex_unlock(&port_priv->priv->lock); kfree(switchdev_work->fdb_info.addr); kfree(switchdev_work); dev_put(port_priv->netdev); } /* called under rcu_read_lock() */ static int adin1110_switchdev_event(struct notifier_block *unused, unsigned long event, void *ptr) { struct net_device *netdev = switchdev_notifier_info_to_dev(ptr); struct adin1110_port_priv *port_priv = netdev_priv(netdev); struct adin1110_switchdev_event_work *switchdev_work; struct switchdev_notifier_fdb_info *fdb_info = ptr; if (!adin1110_port_dev_check(netdev)) return NOTIFY_DONE; switchdev_work = kzalloc(sizeof(*switchdev_work), GFP_ATOMIC); if (WARN_ON(!switchdev_work)) return NOTIFY_BAD; INIT_WORK(&switchdev_work->work, adin1110_switchdev_event_work); switchdev_work->port_priv = port_priv; switchdev_work->event = event; switch (event) { case SWITCHDEV_FDB_ADD_TO_DEVICE: case SWITCHDEV_FDB_DEL_TO_DEVICE: memcpy(&switchdev_work->fdb_info, ptr, sizeof(switchdev_work->fdb_info)); switchdev_work->fdb_info.addr = kzalloc(ETH_ALEN, GFP_ATOMIC); if (!switchdev_work->fdb_info.addr) goto err_addr_alloc; ether_addr_copy((u8 *)switchdev_work->fdb_info.addr, fdb_info->addr); dev_hold(netdev); break; default: kfree(switchdev_work); return NOTIFY_DONE; } queue_work(system_long_wq, &switchdev_work->work); return NOTIFY_DONE; err_addr_alloc: kfree(switchdev_work); return NOTIFY_BAD; } static struct notifier_block adin1110_switchdev_notifier = { .notifier_call = adin1110_switchdev_event, }; static void adin1110_unregister_notifiers(void) { unregister_switchdev_blocking_notifier(&adin1110_switchdev_blocking_notifier); unregister_switchdev_notifier(&adin1110_switchdev_notifier); unregister_netdevice_notifier(&adin1110_netdevice_nb); } static int adin1110_setup_notifiers(void) { int ret; ret = register_netdevice_notifier(&adin1110_netdevice_nb); if (ret < 0) return ret; ret = register_switchdev_notifier(&adin1110_switchdev_notifier); if (ret < 0) goto err_netdev; ret = register_switchdev_blocking_notifier(&adin1110_switchdev_blocking_notifier); if (ret < 0) goto err_sdev; return 0; err_sdev: unregister_switchdev_notifier(&adin1110_switchdev_notifier); err_netdev: unregister_netdevice_notifier(&adin1110_netdevice_nb); return ret; } static int adin1110_probe_netdevs(struct adin1110_priv *priv) { struct device *dev = &priv->spidev->dev; struct adin1110_port_priv *port_priv; struct net_device *netdev; int ret; int i; for (i = 0; i < priv->cfg->ports_nr; i++) { netdev = devm_alloc_etherdev(dev, sizeof(*port_priv)); if (!netdev) return -ENOMEM; port_priv = netdev_priv(netdev); port_priv->netdev = netdev; port_priv->priv = priv; port_priv->cfg = priv->cfg; port_priv->nr = i; priv->ports[i] = port_priv; SET_NETDEV_DEV(netdev, dev); ret = device_get_ethdev_address(dev, netdev); if (ret < 0) return ret; netdev->irq = priv->spidev->irq; INIT_WORK(&port_priv->tx_work, adin1110_tx_work); INIT_WORK(&port_priv->rx_mode_work, adin1110_rx_mode_work); skb_queue_head_init(&port_priv->txq); netif_carrier_off(netdev); netdev->if_port = IF_PORT_10BASET; netdev->netdev_ops = &adin1110_netdev_ops; netdev->ethtool_ops = &adin1110_ethtool_ops; netdev->priv_flags |= IFF_UNICAST_FLT; netdev->features |= NETIF_F_NETNS_LOCAL; port_priv->phydev = get_phy_device(priv->mii_bus, i + 1, false); if (IS_ERR(port_priv->phydev)) { netdev_err(netdev, "Could not find PHY with device address: %d.\n", i); return PTR_ERR(port_priv->phydev); } port_priv->phydev = phy_connect(netdev, phydev_name(port_priv->phydev), adin1110_adjust_link, PHY_INTERFACE_MODE_INTERNAL); if (IS_ERR(port_priv->phydev)) { netdev_err(netdev, "Could not connect PHY with device address: %d.\n", i); return PTR_ERR(port_priv->phydev); } ret = devm_add_action_or_reset(dev, adin1110_disconnect_phy, port_priv->phydev); if (ret < 0) return ret; } /* ADIN1110 INT_N pin will be used to signal the host */ ret = devm_request_threaded_irq(dev, priv->spidev->irq, NULL, adin1110_irq, IRQF_TRIGGER_LOW | IRQF_ONESHOT, dev_name(dev), priv); if (ret < 0) return ret; for (i = 0; i < priv->cfg->ports_nr; i++) { ret = devm_register_netdev(dev, priv->ports[i]->netdev); if (ret < 0) { dev_err(dev, "Failed to register network device.\n"); return ret; } } return 0; } static int adin1110_probe(struct spi_device *spi) { const struct spi_device_id *dev_id = spi_get_device_id(spi); struct device *dev = &spi->dev; struct adin1110_priv *priv; int ret; priv = devm_kzalloc(dev, sizeof(struct adin1110_priv), GFP_KERNEL); if (!priv) return -ENOMEM; priv->spidev = spi; priv->cfg = &adin1110_cfgs[dev_id->driver_data]; spi->bits_per_word = 8; spi->mode = SPI_MODE_0; mutex_init(&priv->lock); spin_lock_init(&priv->state_lock); /* use of CRC on control and data transactions is pin dependent */ priv->append_crc = device_property_read_bool(dev, "adi,spi-crc"); if (priv->append_crc) crc8_populate_msb(adin1110_crc_table, 0x7); ret = adin1110_check_spi(priv); if (ret < 0) { dev_err(dev, "Probe SPI Read check failed: %d\n", ret); return ret; } ret = adin1110_write_reg(priv, ADIN1110_RESET, ADIN1110_SWRESET); if (ret < 0) return ret; ret = adin1110_register_mdiobus(priv, dev); if (ret < 0) { dev_err(dev, "Could not register MDIO bus %d\n", ret); return ret; } return adin1110_probe_netdevs(priv); } static const struct of_device_id adin1110_match_table[] = { { .compatible = "adi,adin1110" }, { .compatible = "adi,adin2111" }, { } }; MODULE_DEVICE_TABLE(of, adin1110_match_table); static const struct spi_device_id adin1110_spi_id[] = { { .name = "adin1110", .driver_data = ADIN1110_MAC }, { .name = "adin2111", .driver_data = ADIN2111_MAC }, { } }; MODULE_DEVICE_TABLE(spi, adin1110_spi_id); static struct spi_driver adin1110_driver = { .driver = { .name = "adin1110", .of_match_table = adin1110_match_table, }, .probe = adin1110_probe, .id_table = adin1110_spi_id, }; static int __init adin1110_driver_init(void) { int ret; ret = adin1110_setup_notifiers(); if (ret < 0) return ret; ret = spi_register_driver(&adin1110_driver); if (ret < 0) { adin1110_unregister_notifiers(); return ret; } return 0; } static void __exit adin1110_exit(void) { adin1110_unregister_notifiers(); spi_unregister_driver(&adin1110_driver); } module_init(adin1110_driver_init); module_exit(adin1110_exit); MODULE_DESCRIPTION("ADIN1110 Network driver"); MODULE_AUTHOR("Alexandru Tachici <[email protected]>"); MODULE_LICENSE("Dual BSD/GPL");
linux-master
drivers/net/ethernet/adi/adin1110.c
// SPDX-License-Identifier: GPL-2.0 /* * macsonic.c * * (C) 2005 Finn Thain * * Converted to DMA API, converted to unified driver model, made it work as * a module again, and from the mac68k project, introduced more 32-bit cards * and dhd's support for 16-bit cards. * * (C) 1998 Alan Cox * * Debugging Andreas Ehliar, Michael Schmitz * * Based on code * (C) 1996 by Thomas Bogendoerfer ([email protected]) * * This driver is based on work from Andreas Busse, but most of * the code is rewritten. * * (C) 1995 by Andreas Busse ([email protected]) * * A driver for the Mac onboard Sonic ethernet chip. * * 98/12/21 MSch: judged from tests on Q800, it's basically working, * but eating up both receive and transmit resources * and duplicating packets. Needs more testing. * * 99/01/03 MSch: upgraded to version 0.92 of the core driver, fixed. * * 00/10/31 [email protected]: Updated driver for 2.4 kernels, fixed problems * on centris. */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/types.h> #include <linux/fcntl.h> #include <linux/gfp.h> #include <linux/interrupt.h> #include <linux/ioport.h> #include <linux/in.h> #include <linux/string.h> #include <linux/delay.h> #include <linux/nubus.h> #include <linux/errno.h> #include <linux/netdevice.h> #include <linux/etherdevice.h> #include <linux/skbuff.h> #include <linux/platform_device.h> #include <linux/dma-mapping.h> #include <linux/bitrev.h> #include <linux/slab.h> #include <linux/pgtable.h> #include <asm/io.h> #include <asm/hwtest.h> #include <asm/dma.h> #include <asm/macintosh.h> #include <asm/macints.h> #include <asm/mac_via.h> #include "sonic.h" /* These should basically be bus-size and endian independent (since the SONIC is at least smart enough that it uses the same endianness as the host, unlike certain less enlightened Macintosh NICs) */ #define SONIC_READ(reg) (nubus_readw(dev->base_addr + (reg * 4) \ + lp->reg_offset)) #define SONIC_WRITE(reg,val) (nubus_writew(val, dev->base_addr + (reg * 4) \ + lp->reg_offset)) /* For onboard SONIC */ #define ONBOARD_SONIC_REGISTERS 0x50F0A000 #define ONBOARD_SONIC_PROM_BASE 0x50f08000 enum macsonic_type { MACSONIC_DUODOCK, MACSONIC_APPLE, MACSONIC_APPLE16, MACSONIC_DAYNA, MACSONIC_DAYNALINK }; /* For the built-in SONIC in the Duo Dock */ #define DUODOCK_SONIC_REGISTERS 0xe10000 #define DUODOCK_SONIC_PROM_BASE 0xe12000 /* For Apple-style NuBus SONIC */ #define APPLE_SONIC_REGISTERS 0 #define APPLE_SONIC_PROM_BASE 0x40000 /* Daynalink LC SONIC */ #define DAYNALINK_PROM_BASE 0x400000 /* For Dayna-style NuBus SONIC (haven't seen one yet) */ #define DAYNA_SONIC_REGISTERS 0x180000 /* This is what OpenBSD says. However, this is definitely in NuBus ROM space so we should be able to get it by walking the NuBus resource directories */ #define DAYNA_SONIC_MAC_ADDR 0xffe004 #define SONIC_READ_PROM(addr) nubus_readb(prom_addr+addr) /* * For reversing the PROM address */ static inline void bit_reverse_addr(unsigned char addr[6]) { int i; for(i = 0; i < 6; i++) addr[i] = bitrev8(addr[i]); } static int macsonic_open(struct net_device* dev) { int retval; retval = request_irq(dev->irq, sonic_interrupt, 0, "sonic", dev); if (retval) { printk(KERN_ERR "%s: unable to get IRQ %d.\n", dev->name, dev->irq); goto err; } /* Under the A/UX interrupt scheme, the onboard SONIC interrupt gets * moved from level 2 to level 3. Unfortunately we still get some * level 2 interrupts so register the handler for both. */ if (dev->irq == IRQ_AUTO_3) { retval = request_irq(IRQ_NUBUS_9, sonic_interrupt, 0, "sonic", dev); if (retval) { printk(KERN_ERR "%s: unable to get IRQ %d.\n", dev->name, IRQ_NUBUS_9); goto err_irq; } } retval = sonic_open(dev); if (retval) goto err_irq_nubus; return 0; err_irq_nubus: if (dev->irq == IRQ_AUTO_3) free_irq(IRQ_NUBUS_9, dev); err_irq: free_irq(dev->irq, dev); err: return retval; } static int macsonic_close(struct net_device* dev) { int err; err = sonic_close(dev); free_irq(dev->irq, dev); if (dev->irq == IRQ_AUTO_3) free_irq(IRQ_NUBUS_9, dev); return err; } static const struct net_device_ops macsonic_netdev_ops = { .ndo_open = macsonic_open, .ndo_stop = macsonic_close, .ndo_start_xmit = sonic_send_packet, .ndo_set_rx_mode = sonic_multicast_list, .ndo_tx_timeout = sonic_tx_timeout, .ndo_get_stats = sonic_get_stats, .ndo_validate_addr = eth_validate_addr, .ndo_set_mac_address = eth_mac_addr, }; static int macsonic_init(struct net_device *dev) { struct sonic_local* lp = netdev_priv(dev); int err = sonic_alloc_descriptors(dev); if (err) return err; dev->netdev_ops = &macsonic_netdev_ops; dev->watchdog_timeo = TX_TIMEOUT; /* * clear tally counter */ SONIC_WRITE(SONIC_CRCT, 0xffff); SONIC_WRITE(SONIC_FAET, 0xffff); SONIC_WRITE(SONIC_MPT, 0xffff); return 0; } #define INVALID_MAC(mac) (memcmp(mac, "\x08\x00\x07", 3) && \ memcmp(mac, "\x00\xA0\x40", 3) && \ memcmp(mac, "\x00\x80\x19", 3) && \ memcmp(mac, "\x00\x05\x02", 3)) static void mac_onboard_sonic_ethernet_addr(struct net_device *dev) { struct sonic_local *lp = netdev_priv(dev); const int prom_addr = ONBOARD_SONIC_PROM_BASE; unsigned short val; u8 addr[ETH_ALEN]; /* * On NuBus boards we can sometimes look in the ROM resources. * No such luck for comm-slot/onboard. * On the PowerBook 520, the PROM base address is a mystery. */ if (hwreg_present((void *)prom_addr)) { int i; for (i = 0; i < 6; i++) addr[i] = SONIC_READ_PROM(i); eth_hw_addr_set(dev, addr); if (!INVALID_MAC(dev->dev_addr)) return; /* * Most of the time, the address is bit-reversed. The NetBSD * source has a rather long and detailed historical account of * why this is so. */ bit_reverse_addr(addr); eth_hw_addr_set(dev, addr); if (!INVALID_MAC(dev->dev_addr)) return; /* * If we still have what seems to be a bogus address, we'll * look in the CAM. The top entry should be ours. */ printk(KERN_WARNING "macsonic: MAC address in PROM seems " "to be invalid, trying CAM\n"); } else { printk(KERN_WARNING "macsonic: cannot read MAC address from " "PROM, trying CAM\n"); } /* This only works if MacOS has already initialized the card. */ SONIC_WRITE(SONIC_CMD, SONIC_CR_RST); SONIC_WRITE(SONIC_CEP, 15); val = SONIC_READ(SONIC_CAP2); addr[5] = val >> 8; addr[4] = val & 0xff; val = SONIC_READ(SONIC_CAP1); addr[3] = val >> 8; addr[2] = val & 0xff; val = SONIC_READ(SONIC_CAP0); addr[1] = val >> 8; addr[0] = val & 0xff; eth_hw_addr_set(dev, addr); if (!INVALID_MAC(dev->dev_addr)) return; /* Still nonsense ... messed up someplace! */ printk(KERN_WARNING "macsonic: MAC address in CAM entry 15 " "seems invalid, will use a random MAC\n"); eth_hw_addr_random(dev); } static int mac_onboard_sonic_probe(struct net_device *dev) { struct sonic_local* lp = netdev_priv(dev); int sr; bool commslot = macintosh_config->expansion_type == MAC_EXP_PDS_COMM; /* Bogus probing, on the models which may or may not have Ethernet (BTW, the Ethernet *is* always at the same address, and nothing else lives there, at least if Apple's documentation is to be believed) */ if (commslot || macintosh_config->ident == MAC_MODEL_C610) { int card_present; card_present = hwreg_present((void*)ONBOARD_SONIC_REGISTERS); if (!card_present) { pr_info("Onboard/comm-slot SONIC not found\n"); return -ENODEV; } } /* Danger! My arms are flailing wildly! You *must* set lp->reg_offset * and dev->base_addr before using SONIC_READ() or SONIC_WRITE() */ dev->base_addr = ONBOARD_SONIC_REGISTERS; if (via_alt_mapping) dev->irq = IRQ_AUTO_3; else dev->irq = IRQ_NUBUS_9; /* The PowerBook's SONIC is 16 bit always. */ if (macintosh_config->ident == MAC_MODEL_PB520) { lp->reg_offset = 0; lp->dma_bitmode = SONIC_BITMODE16; } else if (commslot) { /* Some of the comm-slot cards are 16 bit. But some of them are not. The 32-bit cards use offset 2 and have known revisions, we try reading the revision register at offset 2, if we don't get a known revision we assume 16 bit at offset 0. */ lp->reg_offset = 2; lp->dma_bitmode = SONIC_BITMODE16; sr = SONIC_READ(SONIC_SR); if (sr == 0x0004 || sr == 0x0006 || sr == 0x0100 || sr == 0x0101) /* 83932 is 0x0004 or 0x0006, 83934 is 0x0100 or 0x0101 */ lp->dma_bitmode = SONIC_BITMODE32; else { lp->dma_bitmode = SONIC_BITMODE16; lp->reg_offset = 0; } } else { /* All onboard cards are at offset 2 with 32 bit DMA. */ lp->reg_offset = 2; lp->dma_bitmode = SONIC_BITMODE32; } pr_info("Onboard/comm-slot SONIC, revision 0x%04x, %d bit DMA, register offset %d\n", SONIC_READ(SONIC_SR), lp->dma_bitmode ? 32 : 16, lp->reg_offset); /* This is sometimes useful to find out how MacOS configured the card */ pr_debug("%s: DCR=0x%04x, DCR2=0x%04x\n", __func__, SONIC_READ(SONIC_DCR) & 0xffff, SONIC_READ(SONIC_DCR2) & 0xffff); /* Software reset, then initialize control registers. */ SONIC_WRITE(SONIC_CMD, SONIC_CR_RST); SONIC_WRITE(SONIC_DCR, SONIC_DCR_EXBUS | SONIC_DCR_BMS | SONIC_DCR_RFT1 | SONIC_DCR_TFT0 | (lp->dma_bitmode ? SONIC_DCR_DW : 0)); /* This *must* be written back to in order to restore the * extended programmable output bits, as it may not have been * initialised since the hardware reset. */ SONIC_WRITE(SONIC_DCR2, 0); /* Clear *and* disable interrupts to be on the safe side */ SONIC_WRITE(SONIC_IMR, 0); SONIC_WRITE(SONIC_ISR, 0x7fff); /* Now look for the MAC address. */ mac_onboard_sonic_ethernet_addr(dev); pr_info("SONIC ethernet @%08lx, MAC %pM, IRQ %d\n", dev->base_addr, dev->dev_addr, dev->irq); /* Shared init code */ return macsonic_init(dev); } static int mac_sonic_nubus_ethernet_addr(struct net_device *dev, unsigned long prom_addr, int id) { u8 addr[ETH_ALEN]; int i; for(i = 0; i < 6; i++) addr[i] = SONIC_READ_PROM(i); /* Some of the addresses are bit-reversed */ if (id != MACSONIC_DAYNA) bit_reverse_addr(addr); eth_hw_addr_set(dev, addr); return 0; } static int macsonic_ident(struct nubus_rsrc *fres) { if (fres->dr_hw == NUBUS_DRHW_ASANTE_LC && fres->dr_sw == NUBUS_DRSW_SONIC_LC) return MACSONIC_DAYNALINK; if (fres->dr_hw == NUBUS_DRHW_SONIC && fres->dr_sw == NUBUS_DRSW_APPLE) { /* There has to be a better way to do this... */ if (strstr(fres->board->name, "DuoDock")) return MACSONIC_DUODOCK; else return MACSONIC_APPLE; } if (fres->dr_hw == NUBUS_DRHW_SMC9194 && fres->dr_sw == NUBUS_DRSW_DAYNA) return MACSONIC_DAYNA; if (fres->dr_hw == NUBUS_DRHW_APPLE_SONIC_LC && fres->dr_sw == 0) { /* huh? */ return MACSONIC_APPLE16; } return -1; } static int mac_sonic_nubus_probe_board(struct nubus_board *board, int id, struct net_device *dev) { struct sonic_local* lp = netdev_priv(dev); unsigned long base_addr, prom_addr; u16 sonic_dcr; int reg_offset, dma_bitmode; switch (id) { case MACSONIC_DUODOCK: base_addr = board->slot_addr + DUODOCK_SONIC_REGISTERS; prom_addr = board->slot_addr + DUODOCK_SONIC_PROM_BASE; sonic_dcr = SONIC_DCR_EXBUS | SONIC_DCR_RFT0 | SONIC_DCR_RFT1 | SONIC_DCR_TFT0; reg_offset = 2; dma_bitmode = SONIC_BITMODE32; break; case MACSONIC_APPLE: base_addr = board->slot_addr + APPLE_SONIC_REGISTERS; prom_addr = board->slot_addr + APPLE_SONIC_PROM_BASE; sonic_dcr = SONIC_DCR_BMS | SONIC_DCR_RFT1 | SONIC_DCR_TFT0; reg_offset = 0; dma_bitmode = SONIC_BITMODE32; break; case MACSONIC_APPLE16: base_addr = board->slot_addr + APPLE_SONIC_REGISTERS; prom_addr = board->slot_addr + APPLE_SONIC_PROM_BASE; sonic_dcr = SONIC_DCR_EXBUS | SONIC_DCR_RFT1 | SONIC_DCR_TFT0 | SONIC_DCR_PO1 | SONIC_DCR_BMS; reg_offset = 0; dma_bitmode = SONIC_BITMODE16; break; case MACSONIC_DAYNALINK: base_addr = board->slot_addr + APPLE_SONIC_REGISTERS; prom_addr = board->slot_addr + DAYNALINK_PROM_BASE; sonic_dcr = SONIC_DCR_RFT1 | SONIC_DCR_TFT0 | SONIC_DCR_PO1 | SONIC_DCR_BMS; reg_offset = 0; dma_bitmode = SONIC_BITMODE16; break; case MACSONIC_DAYNA: base_addr = board->slot_addr + DAYNA_SONIC_REGISTERS; prom_addr = board->slot_addr + DAYNA_SONIC_MAC_ADDR; sonic_dcr = SONIC_DCR_BMS | SONIC_DCR_RFT1 | SONIC_DCR_TFT0 | SONIC_DCR_PO1; reg_offset = 0; dma_bitmode = SONIC_BITMODE16; break; default: printk(KERN_ERR "macsonic: WTF, id is %d\n", id); return -ENODEV; } /* Danger! My arms are flailing wildly! You *must* set lp->reg_offset * and dev->base_addr before using SONIC_READ() or SONIC_WRITE() */ dev->base_addr = base_addr; lp->reg_offset = reg_offset; lp->dma_bitmode = dma_bitmode; dev->irq = SLOT2IRQ(board->slot); dev_info(&board->dev, "%s, revision 0x%04x, %d bit DMA, register offset %d\n", board->name, SONIC_READ(SONIC_SR), lp->dma_bitmode ? 32 : 16, lp->reg_offset); /* This is sometimes useful to find out how MacOS configured the card */ dev_dbg(&board->dev, "%s: DCR=0x%04x, DCR2=0x%04x\n", __func__, SONIC_READ(SONIC_DCR) & 0xffff, SONIC_READ(SONIC_DCR2) & 0xffff); /* Software reset, then initialize control registers. */ SONIC_WRITE(SONIC_CMD, SONIC_CR_RST); SONIC_WRITE(SONIC_DCR, sonic_dcr | (dma_bitmode ? SONIC_DCR_DW : 0)); /* This *must* be written back to in order to restore the * extended programmable output bits, since it may not have been * initialised since the hardware reset. */ SONIC_WRITE(SONIC_DCR2, 0); /* Clear *and* disable interrupts to be on the safe side */ SONIC_WRITE(SONIC_IMR, 0); SONIC_WRITE(SONIC_ISR, 0x7fff); /* Now look for the MAC address. */ if (mac_sonic_nubus_ethernet_addr(dev, prom_addr, id) != 0) return -ENODEV; dev_info(&board->dev, "SONIC ethernet @%08lx, MAC %pM, IRQ %d\n", dev->base_addr, dev->dev_addr, dev->irq); /* Shared init code */ return macsonic_init(dev); } static int mac_sonic_platform_probe(struct platform_device *pdev) { struct net_device *dev; struct sonic_local *lp; int err; dev = alloc_etherdev(sizeof(struct sonic_local)); if (!dev) return -ENOMEM; lp = netdev_priv(dev); lp->device = &pdev->dev; SET_NETDEV_DEV(dev, &pdev->dev); platform_set_drvdata(pdev, dev); err = mac_onboard_sonic_probe(dev); if (err) goto out; sonic_msg_init(dev); err = register_netdev(dev); if (err) goto undo_probe; return 0; undo_probe: dma_free_coherent(lp->device, SIZEOF_SONIC_DESC * SONIC_BUS_SCALE(lp->dma_bitmode), lp->descriptors, lp->descriptors_laddr); out: free_netdev(dev); return err; } MODULE_DESCRIPTION("Macintosh SONIC ethernet driver"); MODULE_ALIAS("platform:macsonic"); #include "sonic.c" static int mac_sonic_platform_remove(struct platform_device *pdev) { struct net_device *dev = platform_get_drvdata(pdev); struct sonic_local* lp = netdev_priv(dev); unregister_netdev(dev); dma_free_coherent(lp->device, SIZEOF_SONIC_DESC * SONIC_BUS_SCALE(lp->dma_bitmode), lp->descriptors, lp->descriptors_laddr); free_netdev(dev); return 0; } static struct platform_driver mac_sonic_platform_driver = { .probe = mac_sonic_platform_probe, .remove = mac_sonic_platform_remove, .driver = { .name = "macsonic", }, }; static int mac_sonic_nubus_probe(struct nubus_board *board) { struct net_device *ndev; struct sonic_local *lp; struct nubus_rsrc *fres; int id = -1; int err; /* The platform driver will handle a PDS or Comm Slot card (even if * it has a pseudoslot declaration ROM). */ if (macintosh_config->expansion_type == MAC_EXP_PDS_COMM) return -ENODEV; for_each_board_func_rsrc(board, fres) { if (fres->category != NUBUS_CAT_NETWORK || fres->type != NUBUS_TYPE_ETHERNET) continue; id = macsonic_ident(fres); if (id != -1) break; } if (!fres) return -ENODEV; ndev = alloc_etherdev(sizeof(struct sonic_local)); if (!ndev) return -ENOMEM; lp = netdev_priv(ndev); lp->device = &board->dev; SET_NETDEV_DEV(ndev, &board->dev); err = mac_sonic_nubus_probe_board(board, id, ndev); if (err) goto out; sonic_msg_init(ndev); err = register_netdev(ndev); if (err) goto undo_probe; nubus_set_drvdata(board, ndev); return 0; undo_probe: dma_free_coherent(lp->device, SIZEOF_SONIC_DESC * SONIC_BUS_SCALE(lp->dma_bitmode), lp->descriptors, lp->descriptors_laddr); out: free_netdev(ndev); return err; } static void mac_sonic_nubus_remove(struct nubus_board *board) { struct net_device *ndev = nubus_get_drvdata(board); struct sonic_local *lp = netdev_priv(ndev); unregister_netdev(ndev); dma_free_coherent(lp->device, SIZEOF_SONIC_DESC * SONIC_BUS_SCALE(lp->dma_bitmode), lp->descriptors, lp->descriptors_laddr); free_netdev(ndev); } static struct nubus_driver mac_sonic_nubus_driver = { .probe = mac_sonic_nubus_probe, .remove = mac_sonic_nubus_remove, .driver = { .name = "macsonic-nubus", .owner = THIS_MODULE, }, }; static int perr, nerr; static int __init mac_sonic_init(void) { perr = platform_driver_register(&mac_sonic_platform_driver); nerr = nubus_driver_register(&mac_sonic_nubus_driver); return 0; } module_init(mac_sonic_init); static void __exit mac_sonic_exit(void) { if (!perr) platform_driver_unregister(&mac_sonic_platform_driver); if (!nerr) nubus_driver_unregister(&mac_sonic_nubus_driver); } module_exit(mac_sonic_exit);
linux-master
drivers/net/ethernet/natsemi/macsonic.c
// SPDX-License-Identifier: GPL-2.0-or-later #define VERSION "0.23" /* ns83820.c by Benjamin LaHaise with contributions. * * Questions/comments/discussion to [email protected]. * * $Revision: 1.34.2.23 $ * * Copyright 2001 Benjamin LaHaise. * Copyright 2001, 2002 Red Hat. * * Mmmm, chocolate vanilla mocha... * * ChangeLog * ========= * 20010414 0.1 - created * 20010622 0.2 - basic rx and tx. * 20010711 0.3 - added duplex and link state detection support. * 20010713 0.4 - zero copy, no hangs. * 0.5 - 64 bit dma support (davem will hate me for this) * - disable jumbo frames to avoid tx hangs * - work around tx deadlocks on my 1.02 card via * fiddling with TXCFG * 20010810 0.6 - use pci dma api for ringbuffers, work on ia64 * 20010816 0.7 - misc cleanups * 20010826 0.8 - fix critical zero copy bugs * 0.9 - internal experiment * 20010827 0.10 - fix ia64 unaligned access. * 20010906 0.11 - accept all packets with checksum errors as * otherwise fragments get lost * - fix >> 32 bugs * 0.12 - add statistics counters * - add allmulti/promisc support * 20011009 0.13 - hotplug support, other smaller pci api cleanups * 20011204 0.13a - optical transceiver support added * by Michael Clark <[email protected]> * 20011205 0.13b - call register_netdev earlier in initialization * suppress duplicate link status messages * 20011117 0.14 - ethtool GDRVINFO, GLINK support from jgarzik * 20011204 0.15 get ppc (big endian) working * 20011218 0.16 various cleanups * 20020310 0.17 speedups * 20020610 0.18 - actually use the pci dma api for highmem * - remove pci latency register fiddling * 0.19 - better bist support * - add ihr and reset_phy parameters * - gmii bus probing * - fix missed txok introduced during performance * tuning * 0.20 - fix stupid RFEN thinko. i am such a smurf. * 20040828 0.21 - add hardware vlan accleration * by Neil Horman <[email protected]> * 20050406 0.22 - improved DAC ifdefs from Andi Kleen * - removal of dead code from Adrian Bunk * - fix half duplex collision behaviour * Driver Overview * =============== * * This driver was originally written for the National Semiconductor * 83820 chip, a 10/100/1000 Mbps 64 bit PCI ethernet NIC. Hopefully * this code will turn out to be a) clean, b) correct, and c) fast. * With that in mind, I'm aiming to split the code up as much as * reasonably possible. At present there are X major sections that * break down into a) packet receive, b) packet transmit, c) link * management, d) initialization and configuration. Where possible, * these code paths are designed to run in parallel. * * This driver has been tested and found to work with the following * cards (in no particular order): * * Cameo SOHO-GA2000T SOHO-GA2500T * D-Link DGE-500T * PureData PDP8023Z-TG * SMC SMC9452TX SMC9462TX * Netgear GA621 * * Special thanks to SMC for providing hardware to test this driver on. * * Reports of success or failure would be greatly appreciated. */ //#define dprintk printk #define dprintk(x...) do { } while (0) #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/types.h> #include <linux/pci.h> #include <linux/dma-mapping.h> #include <linux/netdevice.h> #include <linux/etherdevice.h> #include <linux/delay.h> #include <linux/workqueue.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/ip.h> /* for iph */ #include <linux/in.h> /* for IPPROTO_... */ #include <linux/compiler.h> #include <linux/prefetch.h> #include <linux/ethtool.h> #include <linux/sched.h> #include <linux/timer.h> #include <linux/if_vlan.h> #include <linux/rtnetlink.h> #include <linux/jiffies.h> #include <linux/slab.h> #include <asm/io.h> #include <linux/uaccess.h> #define DRV_NAME "ns83820" /* Global parameters. See module_param near the bottom. */ static int ihr = 2; static int reset_phy = 0; static int lnksts = 0; /* CFG_LNKSTS bit polarity */ /* Dprintk is used for more interesting debug events */ #undef Dprintk #define Dprintk dprintk /* tunables */ #define RX_BUF_SIZE 1500 /* 8192 */ #if IS_ENABLED(CONFIG_VLAN_8021Q) #define NS83820_VLAN_ACCEL_SUPPORT #endif /* Must not exceed ~65000. */ #define NR_RX_DESC 64 #define NR_TX_DESC 128 /* not tunable */ #define REAL_RX_BUF_SIZE (RX_BUF_SIZE + 14) /* rx/tx mac addr + type */ #define MIN_TX_DESC_FREE 8 /* register defines */ #define CFGCS 0x04 #define CR_TXE 0x00000001 #define CR_TXD 0x00000002 /* Ramit : Here's a tip, don't do a RXD immediately followed by an RXE * The Receive engine skips one descriptor and moves * onto the next one!! */ #define CR_RXE 0x00000004 #define CR_RXD 0x00000008 #define CR_TXR 0x00000010 #define CR_RXR 0x00000020 #define CR_SWI 0x00000080 #define CR_RST 0x00000100 #define PTSCR_EEBIST_FAIL 0x00000001 #define PTSCR_EEBIST_EN 0x00000002 #define PTSCR_EELOAD_EN 0x00000004 #define PTSCR_RBIST_FAIL 0x000001b8 #define PTSCR_RBIST_DONE 0x00000200 #define PTSCR_RBIST_EN 0x00000400 #define PTSCR_RBIST_RST 0x00002000 #define MEAR_EEDI 0x00000001 #define MEAR_EEDO 0x00000002 #define MEAR_EECLK 0x00000004 #define MEAR_EESEL 0x00000008 #define MEAR_MDIO 0x00000010 #define MEAR_MDDIR 0x00000020 #define MEAR_MDC 0x00000040 #define ISR_TXDESC3 0x40000000 #define ISR_TXDESC2 0x20000000 #define ISR_TXDESC1 0x10000000 #define ISR_TXDESC0 0x08000000 #define ISR_RXDESC3 0x04000000 #define ISR_RXDESC2 0x02000000 #define ISR_RXDESC1 0x01000000 #define ISR_RXDESC0 0x00800000 #define ISR_TXRCMP 0x00400000 #define ISR_RXRCMP 0x00200000 #define ISR_DPERR 0x00100000 #define ISR_SSERR 0x00080000 #define ISR_RMABT 0x00040000 #define ISR_RTABT 0x00020000 #define ISR_RXSOVR 0x00010000 #define ISR_HIBINT 0x00008000 #define ISR_PHY 0x00004000 #define ISR_PME 0x00002000 #define ISR_SWI 0x00001000 #define ISR_MIB 0x00000800 #define ISR_TXURN 0x00000400 #define ISR_TXIDLE 0x00000200 #define ISR_TXERR 0x00000100 #define ISR_TXDESC 0x00000080 #define ISR_TXOK 0x00000040 #define ISR_RXORN 0x00000020 #define ISR_RXIDLE 0x00000010 #define ISR_RXEARLY 0x00000008 #define ISR_RXERR 0x00000004 #define ISR_RXDESC 0x00000002 #define ISR_RXOK 0x00000001 #define TXCFG_CSI 0x80000000 #define TXCFG_HBI 0x40000000 #define TXCFG_MLB 0x20000000 #define TXCFG_ATP 0x10000000 #define TXCFG_ECRETRY 0x00800000 #define TXCFG_BRST_DIS 0x00080000 #define TXCFG_MXDMA1024 0x00000000 #define TXCFG_MXDMA512 0x00700000 #define TXCFG_MXDMA256 0x00600000 #define TXCFG_MXDMA128 0x00500000 #define TXCFG_MXDMA64 0x00400000 #define TXCFG_MXDMA32 0x00300000 #define TXCFG_MXDMA16 0x00200000 #define TXCFG_MXDMA8 0x00100000 #define CFG_LNKSTS 0x80000000 #define CFG_SPDSTS 0x60000000 #define CFG_SPDSTS1 0x40000000 #define CFG_SPDSTS0 0x20000000 #define CFG_DUPSTS 0x10000000 #define CFG_TBI_EN 0x01000000 #define CFG_MODE_1000 0x00400000 /* Ramit : Dont' ever use AUTO_1000, it never works and is buggy. * Read the Phy response and then configure the MAC accordingly */ #define CFG_AUTO_1000 0x00200000 #define CFG_PINT_CTL 0x001c0000 #define CFG_PINT_DUPSTS 0x00100000 #define CFG_PINT_LNKSTS 0x00080000 #define CFG_PINT_SPDSTS 0x00040000 #define CFG_TMRTEST 0x00020000 #define CFG_MRM_DIS 0x00010000 #define CFG_MWI_DIS 0x00008000 #define CFG_T64ADDR 0x00004000 #define CFG_PCI64_DET 0x00002000 #define CFG_DATA64_EN 0x00001000 #define CFG_M64ADDR 0x00000800 #define CFG_PHY_RST 0x00000400 #define CFG_PHY_DIS 0x00000200 #define CFG_EXTSTS_EN 0x00000100 #define CFG_REQALG 0x00000080 #define CFG_SB 0x00000040 #define CFG_POW 0x00000020 #define CFG_EXD 0x00000010 #define CFG_PESEL 0x00000008 #define CFG_BROM_DIS 0x00000004 #define CFG_EXT_125 0x00000002 #define CFG_BEM 0x00000001 #define EXTSTS_UDPPKT 0x00200000 #define EXTSTS_TCPPKT 0x00080000 #define EXTSTS_IPPKT 0x00020000 #define EXTSTS_VPKT 0x00010000 #define EXTSTS_VTG_MASK 0x0000ffff #define SPDSTS_POLARITY (CFG_SPDSTS1 | CFG_SPDSTS0 | CFG_DUPSTS | (lnksts ? CFG_LNKSTS : 0)) #define MIBC_MIBS 0x00000008 #define MIBC_ACLR 0x00000004 #define MIBC_FRZ 0x00000002 #define MIBC_WRN 0x00000001 #define PCR_PSEN (1 << 31) #define PCR_PS_MCAST (1 << 30) #define PCR_PS_DA (1 << 29) #define PCR_STHI_8 (3 << 23) #define PCR_STLO_4 (1 << 23) #define PCR_FFHI_8K (3 << 21) #define PCR_FFLO_4K (1 << 21) #define PCR_PAUSE_CNT 0xFFFE #define RXCFG_AEP 0x80000000 #define RXCFG_ARP 0x40000000 #define RXCFG_STRIPCRC 0x20000000 #define RXCFG_RX_FD 0x10000000 #define RXCFG_ALP 0x08000000 #define RXCFG_AIRL 0x04000000 #define RXCFG_MXDMA512 0x00700000 #define RXCFG_DRTH 0x0000003e #define RXCFG_DRTH0 0x00000002 #define RFCR_RFEN 0x80000000 #define RFCR_AAB 0x40000000 #define RFCR_AAM 0x20000000 #define RFCR_AAU 0x10000000 #define RFCR_APM 0x08000000 #define RFCR_APAT 0x07800000 #define RFCR_APAT3 0x04000000 #define RFCR_APAT2 0x02000000 #define RFCR_APAT1 0x01000000 #define RFCR_APAT0 0x00800000 #define RFCR_AARP 0x00400000 #define RFCR_MHEN 0x00200000 #define RFCR_UHEN 0x00100000 #define RFCR_ULM 0x00080000 #define VRCR_RUDPE 0x00000080 #define VRCR_RTCPE 0x00000040 #define VRCR_RIPE 0x00000020 #define VRCR_IPEN 0x00000010 #define VRCR_DUTF 0x00000008 #define VRCR_DVTF 0x00000004 #define VRCR_VTREN 0x00000002 #define VRCR_VTDEN 0x00000001 #define VTCR_PPCHK 0x00000008 #define VTCR_GCHK 0x00000004 #define VTCR_VPPTI 0x00000002 #define VTCR_VGTI 0x00000001 #define CR 0x00 #define CFG 0x04 #define MEAR 0x08 #define PTSCR 0x0c #define ISR 0x10 #define IMR 0x14 #define IER 0x18 #define IHR 0x1c #define TXDP 0x20 #define TXDP_HI 0x24 #define TXCFG 0x28 #define GPIOR 0x2c #define RXDP 0x30 #define RXDP_HI 0x34 #define RXCFG 0x38 #define PQCR 0x3c #define WCSR 0x40 #define PCR 0x44 #define RFCR 0x48 #define RFDR 0x4c #define SRR 0x58 #define VRCR 0xbc #define VTCR 0xc0 #define VDR 0xc4 #define CCSR 0xcc #define TBICR 0xe0 #define TBISR 0xe4 #define TANAR 0xe8 #define TANLPAR 0xec #define TANER 0xf0 #define TESR 0xf4 #define TBICR_MR_AN_ENABLE 0x00001000 #define TBICR_MR_RESTART_AN 0x00000200 #define TBISR_MR_LINK_STATUS 0x00000020 #define TBISR_MR_AN_COMPLETE 0x00000004 #define TANAR_PS2 0x00000100 #define TANAR_PS1 0x00000080 #define TANAR_HALF_DUP 0x00000040 #define TANAR_FULL_DUP 0x00000020 #define GPIOR_GP5_OE 0x00000200 #define GPIOR_GP4_OE 0x00000100 #define GPIOR_GP3_OE 0x00000080 #define GPIOR_GP2_OE 0x00000040 #define GPIOR_GP1_OE 0x00000020 #define GPIOR_GP3_OUT 0x00000004 #define GPIOR_GP1_OUT 0x00000001 #define LINK_AUTONEGOTIATE 0x01 #define LINK_DOWN 0x02 #define LINK_UP 0x04 #define HW_ADDR_LEN sizeof(dma_addr_t) #define desc_addr_set(desc, addr) \ do { \ ((desc)[0] = cpu_to_le32(addr)); \ if (HW_ADDR_LEN == 8) \ (desc)[1] = cpu_to_le32(((u64)addr) >> 32); \ } while(0) #define desc_addr_get(desc) \ (le32_to_cpu((desc)[0]) | \ (HW_ADDR_LEN == 8 ? ((dma_addr_t)le32_to_cpu((desc)[1]))<<32 : 0)) #define DESC_LINK 0 #define DESC_BUFPTR (DESC_LINK + HW_ADDR_LEN/4) #define DESC_CMDSTS (DESC_BUFPTR + HW_ADDR_LEN/4) #define DESC_EXTSTS (DESC_CMDSTS + 4/4) #define CMDSTS_OWN 0x80000000 #define CMDSTS_MORE 0x40000000 #define CMDSTS_INTR 0x20000000 #define CMDSTS_ERR 0x10000000 #define CMDSTS_OK 0x08000000 #define CMDSTS_RUNT 0x00200000 #define CMDSTS_LEN_MASK 0x0000ffff #define CMDSTS_DEST_MASK 0x01800000 #define CMDSTS_DEST_SELF 0x00800000 #define CMDSTS_DEST_MULTI 0x01000000 #define DESC_SIZE 8 /* Should be cache line sized */ struct rx_info { spinlock_t lock; int up; unsigned long idle; struct sk_buff *skbs[NR_RX_DESC]; __le32 *next_rx_desc; u16 next_rx, next_empty; __le32 *descs; dma_addr_t phy_descs; }; struct ns83820 { u8 __iomem *base; struct pci_dev *pci_dev; struct net_device *ndev; struct rx_info rx_info; struct tasklet_struct rx_tasklet; unsigned ihr; struct work_struct tq_refill; /* protects everything below. irqsave when using. */ spinlock_t misc_lock; u32 CFG_cache; u32 MEAR_cache; u32 IMR_cache; unsigned linkstate; spinlock_t tx_lock; u16 tx_done_idx; u16 tx_idx; volatile u16 tx_free_idx; /* idx of free desc chain */ u16 tx_intr_idx; atomic_t nr_tx_skbs; struct sk_buff *tx_skbs[NR_TX_DESC]; char pad[16] __attribute__((aligned(16))); __le32 *tx_descs; dma_addr_t tx_phy_descs; struct timer_list tx_watchdog; }; static inline struct ns83820 *PRIV(struct net_device *dev) { return netdev_priv(dev); } #define __kick_rx(dev) writel(CR_RXE, dev->base + CR) static inline void kick_rx(struct net_device *ndev) { struct ns83820 *dev = PRIV(ndev); dprintk("kick_rx: maybe kicking\n"); if (test_and_clear_bit(0, &dev->rx_info.idle)) { dprintk("actually kicking\n"); writel(dev->rx_info.phy_descs + (4 * DESC_SIZE * dev->rx_info.next_rx), dev->base + RXDP); if (dev->rx_info.next_rx == dev->rx_info.next_empty) printk(KERN_DEBUG "%s: uh-oh: next_rx == next_empty???\n", ndev->name); __kick_rx(dev); } } //free = (tx_done_idx + NR_TX_DESC-2 - free_idx) % NR_TX_DESC #define start_tx_okay(dev) \ (((NR_TX_DESC-2 + dev->tx_done_idx - dev->tx_free_idx) % NR_TX_DESC) > MIN_TX_DESC_FREE) /* Packet Receiver * * The hardware supports linked lists of receive descriptors for * which ownership is transferred back and forth by means of an * ownership bit. While the hardware does support the use of a * ring for receive descriptors, we only make use of a chain in * an attempt to reduce bus traffic under heavy load scenarios. * This will also make bugs a bit more obvious. The current code * only makes use of a single rx chain; I hope to implement * priority based rx for version 1.0. Goal: even under overload * conditions, still route realtime traffic with as low jitter as * possible. */ static inline void build_rx_desc(struct ns83820 *dev, __le32 *desc, dma_addr_t link, dma_addr_t buf, u32 cmdsts, u32 extsts) { desc_addr_set(desc + DESC_LINK, link); desc_addr_set(desc + DESC_BUFPTR, buf); desc[DESC_EXTSTS] = cpu_to_le32(extsts); mb(); desc[DESC_CMDSTS] = cpu_to_le32(cmdsts); } #define nr_rx_empty(dev) ((NR_RX_DESC-2 + dev->rx_info.next_rx - dev->rx_info.next_empty) % NR_RX_DESC) static inline int ns83820_add_rx_skb(struct ns83820 *dev, struct sk_buff *skb) { unsigned next_empty; u32 cmdsts; __le32 *sg; dma_addr_t buf; next_empty = dev->rx_info.next_empty; /* don't overrun last rx marker */ if (unlikely(nr_rx_empty(dev) <= 2)) { kfree_skb(skb); return 1; } #if 0 dprintk("next_empty[%d] nr_used[%d] next_rx[%d]\n", dev->rx_info.next_empty, dev->rx_info.nr_used, dev->rx_info.next_rx ); #endif sg = dev->rx_info.descs + (next_empty * DESC_SIZE); BUG_ON(NULL != dev->rx_info.skbs[next_empty]); dev->rx_info.skbs[next_empty] = skb; dev->rx_info.next_empty = (next_empty + 1) % NR_RX_DESC; cmdsts = REAL_RX_BUF_SIZE | CMDSTS_INTR; buf = dma_map_single(&dev->pci_dev->dev, skb->data, REAL_RX_BUF_SIZE, DMA_FROM_DEVICE); build_rx_desc(dev, sg, 0, buf, cmdsts, 0); /* update link of previous rx */ if (likely(next_empty != dev->rx_info.next_rx)) dev->rx_info.descs[((NR_RX_DESC + next_empty - 1) % NR_RX_DESC) * DESC_SIZE] = cpu_to_le32(dev->rx_info.phy_descs + (next_empty * DESC_SIZE * 4)); return 0; } static inline int rx_refill(struct net_device *ndev, gfp_t gfp) { struct ns83820 *dev = PRIV(ndev); unsigned i; unsigned long flags = 0; if (unlikely(nr_rx_empty(dev) <= 2)) return 0; dprintk("rx_refill(%p)\n", ndev); if (gfp == GFP_ATOMIC) spin_lock_irqsave(&dev->rx_info.lock, flags); for (i=0; i<NR_RX_DESC; i++) { struct sk_buff *skb; long res; /* extra 16 bytes for alignment */ skb = __netdev_alloc_skb(ndev, REAL_RX_BUF_SIZE+16, gfp); if (unlikely(!skb)) break; skb_reserve(skb, skb->data - PTR_ALIGN(skb->data, 16)); if (gfp != GFP_ATOMIC) spin_lock_irqsave(&dev->rx_info.lock, flags); res = ns83820_add_rx_skb(dev, skb); if (gfp != GFP_ATOMIC) spin_unlock_irqrestore(&dev->rx_info.lock, flags); if (res) { i = 1; break; } } if (gfp == GFP_ATOMIC) spin_unlock_irqrestore(&dev->rx_info.lock, flags); return i ? 0 : -ENOMEM; } static void rx_refill_atomic(struct net_device *ndev) { rx_refill(ndev, GFP_ATOMIC); } /* REFILL */ static inline void queue_refill(struct work_struct *work) { struct ns83820 *dev = container_of(work, struct ns83820, tq_refill); struct net_device *ndev = dev->ndev; rx_refill(ndev, GFP_KERNEL); if (dev->rx_info.up) kick_rx(ndev); } static inline void clear_rx_desc(struct ns83820 *dev, unsigned i) { build_rx_desc(dev, dev->rx_info.descs + (DESC_SIZE * i), 0, 0, CMDSTS_OWN, 0); } static void phy_intr(struct net_device *ndev) { struct ns83820 *dev = PRIV(ndev); static const char *speeds[] = { "10", "100", "1000", "1000(?)", "1000F" }; u32 cfg, new_cfg; u32 tanar, tanlpar; int speed, fullduplex, newlinkstate; cfg = readl(dev->base + CFG) ^ SPDSTS_POLARITY; if (dev->CFG_cache & CFG_TBI_EN) { u32 __maybe_unused tbisr; /* we have an optical transceiver */ tbisr = readl(dev->base + TBISR); tanar = readl(dev->base + TANAR); tanlpar = readl(dev->base + TANLPAR); dprintk("phy_intr: tbisr=%08x, tanar=%08x, tanlpar=%08x\n", tbisr, tanar, tanlpar); if ( (fullduplex = (tanlpar & TANAR_FULL_DUP) && (tanar & TANAR_FULL_DUP)) ) { /* both of us are full duplex */ writel(readl(dev->base + TXCFG) | TXCFG_CSI | TXCFG_HBI | TXCFG_ATP, dev->base + TXCFG); writel(readl(dev->base + RXCFG) | RXCFG_RX_FD, dev->base + RXCFG); /* Light up full duplex LED */ writel(readl(dev->base + GPIOR) | GPIOR_GP1_OUT, dev->base + GPIOR); } else if (((tanlpar & TANAR_HALF_DUP) && (tanar & TANAR_HALF_DUP)) || ((tanlpar & TANAR_FULL_DUP) && (tanar & TANAR_HALF_DUP)) || ((tanlpar & TANAR_HALF_DUP) && (tanar & TANAR_FULL_DUP))) { /* one or both of us are half duplex */ writel((readl(dev->base + TXCFG) & ~(TXCFG_CSI | TXCFG_HBI)) | TXCFG_ATP, dev->base + TXCFG); writel(readl(dev->base + RXCFG) & ~RXCFG_RX_FD, dev->base + RXCFG); /* Turn off full duplex LED */ writel(readl(dev->base + GPIOR) & ~GPIOR_GP1_OUT, dev->base + GPIOR); } speed = 4; /* 1000F */ } else { /* we have a copper transceiver */ new_cfg = dev->CFG_cache & ~(CFG_SB | CFG_MODE_1000 | CFG_SPDSTS); if (cfg & CFG_SPDSTS1) new_cfg |= CFG_MODE_1000; else new_cfg &= ~CFG_MODE_1000; speed = ((cfg / CFG_SPDSTS0) & 3); fullduplex = (cfg & CFG_DUPSTS); if (fullduplex) { new_cfg |= CFG_SB; writel(readl(dev->base + TXCFG) | TXCFG_CSI | TXCFG_HBI, dev->base + TXCFG); writel(readl(dev->base + RXCFG) | RXCFG_RX_FD, dev->base + RXCFG); } else { writel(readl(dev->base + TXCFG) & ~(TXCFG_CSI | TXCFG_HBI), dev->base + TXCFG); writel(readl(dev->base + RXCFG) & ~(RXCFG_RX_FD), dev->base + RXCFG); } if ((cfg & CFG_LNKSTS) && ((new_cfg ^ dev->CFG_cache) != 0)) { writel(new_cfg, dev->base + CFG); dev->CFG_cache = new_cfg; } dev->CFG_cache &= ~CFG_SPDSTS; dev->CFG_cache |= cfg & CFG_SPDSTS; } newlinkstate = (cfg & CFG_LNKSTS) ? LINK_UP : LINK_DOWN; if (newlinkstate & LINK_UP && dev->linkstate != newlinkstate) { netif_start_queue(ndev); netif_wake_queue(ndev); printk(KERN_INFO "%s: link now %s mbps, %s duplex and up.\n", ndev->name, speeds[speed], fullduplex ? "full" : "half"); } else if (newlinkstate & LINK_DOWN && dev->linkstate != newlinkstate) { netif_stop_queue(ndev); printk(KERN_INFO "%s: link now down.\n", ndev->name); } dev->linkstate = newlinkstate; } static int ns83820_setup_rx(struct net_device *ndev) { struct ns83820 *dev = PRIV(ndev); unsigned i; int ret; dprintk("ns83820_setup_rx(%p)\n", ndev); dev->rx_info.idle = 1; dev->rx_info.next_rx = 0; dev->rx_info.next_rx_desc = dev->rx_info.descs; dev->rx_info.next_empty = 0; for (i=0; i<NR_RX_DESC; i++) clear_rx_desc(dev, i); writel(0, dev->base + RXDP_HI); writel(dev->rx_info.phy_descs, dev->base + RXDP); ret = rx_refill(ndev, GFP_KERNEL); if (!ret) { dprintk("starting receiver\n"); /* prevent the interrupt handler from stomping on us */ spin_lock_irq(&dev->rx_info.lock); writel(0x0001, dev->base + CCSR); writel(0, dev->base + RFCR); writel(0x7fc00000, dev->base + RFCR); writel(0xffc00000, dev->base + RFCR); dev->rx_info.up = 1; phy_intr(ndev); /* Okay, let it rip */ spin_lock(&dev->misc_lock); dev->IMR_cache |= ISR_PHY; dev->IMR_cache |= ISR_RXRCMP; //dev->IMR_cache |= ISR_RXERR; //dev->IMR_cache |= ISR_RXOK; dev->IMR_cache |= ISR_RXORN; dev->IMR_cache |= ISR_RXSOVR; dev->IMR_cache |= ISR_RXDESC; dev->IMR_cache |= ISR_RXIDLE; dev->IMR_cache |= ISR_TXDESC; dev->IMR_cache |= ISR_TXIDLE; writel(dev->IMR_cache, dev->base + IMR); writel(1, dev->base + IER); spin_unlock(&dev->misc_lock); kick_rx(ndev); spin_unlock_irq(&dev->rx_info.lock); } return ret; } static void ns83820_cleanup_rx(struct ns83820 *dev) { unsigned i; unsigned long flags; dprintk("ns83820_cleanup_rx(%p)\n", dev); /* disable receive interrupts */ spin_lock_irqsave(&dev->misc_lock, flags); dev->IMR_cache &= ~(ISR_RXOK | ISR_RXDESC | ISR_RXERR | ISR_RXEARLY | ISR_RXIDLE); writel(dev->IMR_cache, dev->base + IMR); spin_unlock_irqrestore(&dev->misc_lock, flags); /* synchronize with the interrupt handler and kill it */ dev->rx_info.up = 0; synchronize_irq(dev->pci_dev->irq); /* touch the pci bus... */ readl(dev->base + IMR); /* assumes the transmitter is already disabled and reset */ writel(0, dev->base + RXDP_HI); writel(0, dev->base + RXDP); for (i=0; i<NR_RX_DESC; i++) { struct sk_buff *skb = dev->rx_info.skbs[i]; dev->rx_info.skbs[i] = NULL; clear_rx_desc(dev, i); kfree_skb(skb); } } static void ns83820_rx_kick(struct net_device *ndev) { struct ns83820 *dev = PRIV(ndev); /*if (nr_rx_empty(dev) >= NR_RX_DESC/4)*/ { if (dev->rx_info.up) { rx_refill_atomic(ndev); kick_rx(ndev); } } if (dev->rx_info.up && nr_rx_empty(dev) > NR_RX_DESC*3/4) schedule_work(&dev->tq_refill); else kick_rx(ndev); if (dev->rx_info.idle) printk(KERN_DEBUG "%s: BAD\n", ndev->name); } /* rx_irq * */ static void rx_irq(struct net_device *ndev) { struct ns83820 *dev = PRIV(ndev); struct rx_info *info = &dev->rx_info; unsigned next_rx; int rx_rc, len; u32 cmdsts; __le32 *desc; unsigned long flags; int nr = 0; dprintk("rx_irq(%p)\n", ndev); dprintk("rxdp: %08x, descs: %08lx next_rx[%d]: %p next_empty[%d]: %p\n", readl(dev->base + RXDP), (long)(dev->rx_info.phy_descs), (int)dev->rx_info.next_rx, (dev->rx_info.descs + (DESC_SIZE * dev->rx_info.next_rx)), (int)dev->rx_info.next_empty, (dev->rx_info.descs + (DESC_SIZE * dev->rx_info.next_empty)) ); spin_lock_irqsave(&info->lock, flags); if (!info->up) goto out; dprintk("walking descs\n"); next_rx = info->next_rx; desc = info->next_rx_desc; while ((CMDSTS_OWN & (cmdsts = le32_to_cpu(desc[DESC_CMDSTS]))) && (cmdsts != CMDSTS_OWN)) { struct sk_buff *skb; u32 extsts = le32_to_cpu(desc[DESC_EXTSTS]); dma_addr_t bufptr = desc_addr_get(desc + DESC_BUFPTR); dprintk("cmdsts: %08x\n", cmdsts); dprintk("link: %08x\n", cpu_to_le32(desc[DESC_LINK])); dprintk("extsts: %08x\n", extsts); skb = info->skbs[next_rx]; info->skbs[next_rx] = NULL; info->next_rx = (next_rx + 1) % NR_RX_DESC; mb(); clear_rx_desc(dev, next_rx); dma_unmap_single(&dev->pci_dev->dev, bufptr, RX_BUF_SIZE, DMA_FROM_DEVICE); len = cmdsts & CMDSTS_LEN_MASK; #ifdef NS83820_VLAN_ACCEL_SUPPORT /* NH: As was mentioned below, this chip is kinda * brain dead about vlan tag stripping. Frames * that are 64 bytes with a vlan header appended * like arp frames, or pings, are flagged as Runts * when the tag is stripped and hardware. This * also means that the OK bit in the descriptor * is cleared when the frame comes in so we have * to do a specific length check here to make sure * the frame would have been ok, had we not stripped * the tag. */ if (likely((CMDSTS_OK & cmdsts) || ((cmdsts & CMDSTS_RUNT) && len >= 56))) { #else if (likely(CMDSTS_OK & cmdsts)) { #endif skb_put(skb, len); if (unlikely(!skb)) goto netdev_mangle_me_harder_failed; if (cmdsts & CMDSTS_DEST_MULTI) ndev->stats.multicast++; ndev->stats.rx_packets++; ndev->stats.rx_bytes += len; if ((extsts & 0x002a0000) && !(extsts & 0x00540000)) { skb->ip_summed = CHECKSUM_UNNECESSARY; } else { skb_checksum_none_assert(skb); } skb->protocol = eth_type_trans(skb, ndev); #ifdef NS83820_VLAN_ACCEL_SUPPORT if(extsts & EXTSTS_VPKT) { unsigned short tag; tag = ntohs(extsts & EXTSTS_VTG_MASK); __vlan_hwaccel_put_tag(skb, htons(ETH_P_IPV6), tag); } #endif rx_rc = netif_rx(skb); if (NET_RX_DROP == rx_rc) { netdev_mangle_me_harder_failed: ndev->stats.rx_dropped++; } } else { dev_kfree_skb_irq(skb); } nr++; next_rx = info->next_rx; desc = info->descs + (DESC_SIZE * next_rx); } info->next_rx = next_rx; info->next_rx_desc = info->descs + (DESC_SIZE * next_rx); out: if (0 && !nr) { Dprintk("dazed: cmdsts_f: %08x\n", cmdsts); } spin_unlock_irqrestore(&info->lock, flags); } static void rx_action(struct tasklet_struct *t) { struct ns83820 *dev = from_tasklet(dev, t, rx_tasklet); struct net_device *ndev = dev->ndev; rx_irq(ndev); writel(ihr, dev->base + IHR); spin_lock_irq(&dev->misc_lock); dev->IMR_cache |= ISR_RXDESC; writel(dev->IMR_cache, dev->base + IMR); spin_unlock_irq(&dev->misc_lock); rx_irq(ndev); ns83820_rx_kick(ndev); } /* Packet Transmit code */ static inline void kick_tx(struct ns83820 *dev) { dprintk("kick_tx(%p): tx_idx=%d free_idx=%d\n", dev, dev->tx_idx, dev->tx_free_idx); writel(CR_TXE, dev->base + CR); } /* No spinlock needed on the transmit irq path as the interrupt handler is * serialized. */ static void do_tx_done(struct net_device *ndev) { struct ns83820 *dev = PRIV(ndev); u32 cmdsts, tx_done_idx; __le32 *desc; dprintk("do_tx_done(%p)\n", ndev); tx_done_idx = dev->tx_done_idx; desc = dev->tx_descs + (tx_done_idx * DESC_SIZE); dprintk("tx_done_idx=%d free_idx=%d cmdsts=%08x\n", tx_done_idx, dev->tx_free_idx, le32_to_cpu(desc[DESC_CMDSTS])); while ((tx_done_idx != dev->tx_free_idx) && !(CMDSTS_OWN & (cmdsts = le32_to_cpu(desc[DESC_CMDSTS]))) ) { struct sk_buff *skb; unsigned len; dma_addr_t addr; if (cmdsts & CMDSTS_ERR) ndev->stats.tx_errors++; if (cmdsts & CMDSTS_OK) ndev->stats.tx_packets++; if (cmdsts & CMDSTS_OK) ndev->stats.tx_bytes += cmdsts & 0xffff; dprintk("tx_done_idx=%d free_idx=%d cmdsts=%08x\n", tx_done_idx, dev->tx_free_idx, cmdsts); skb = dev->tx_skbs[tx_done_idx]; dev->tx_skbs[tx_done_idx] = NULL; dprintk("done(%p)\n", skb); len = cmdsts & CMDSTS_LEN_MASK; addr = desc_addr_get(desc + DESC_BUFPTR); if (skb) { dma_unmap_single(&dev->pci_dev->dev, addr, len, DMA_TO_DEVICE); dev_consume_skb_irq(skb); atomic_dec(&dev->nr_tx_skbs); } else dma_unmap_page(&dev->pci_dev->dev, addr, len, DMA_TO_DEVICE); tx_done_idx = (tx_done_idx + 1) % NR_TX_DESC; dev->tx_done_idx = tx_done_idx; desc[DESC_CMDSTS] = cpu_to_le32(0); mb(); desc = dev->tx_descs + (tx_done_idx * DESC_SIZE); } /* Allow network stack to resume queueing packets after we've * finished transmitting at least 1/4 of the packets in the queue. */ if (netif_queue_stopped(ndev) && start_tx_okay(dev)) { dprintk("start_queue(%p)\n", ndev); netif_start_queue(ndev); netif_wake_queue(ndev); } } static void ns83820_cleanup_tx(struct ns83820 *dev) { unsigned i; for (i=0; i<NR_TX_DESC; i++) { struct sk_buff *skb = dev->tx_skbs[i]; dev->tx_skbs[i] = NULL; if (skb) { __le32 *desc = dev->tx_descs + (i * DESC_SIZE); dma_unmap_single(&dev->pci_dev->dev, desc_addr_get(desc + DESC_BUFPTR), le32_to_cpu(desc[DESC_CMDSTS]) & CMDSTS_LEN_MASK, DMA_TO_DEVICE); dev_kfree_skb_irq(skb); atomic_dec(&dev->nr_tx_skbs); } } memset(dev->tx_descs, 0, NR_TX_DESC * DESC_SIZE * 4); } /* transmit routine. This code relies on the network layer serializing * its calls in, but will run happily in parallel with the interrupt * handler. This code currently has provisions for fragmenting tx buffers * while trying to track down a bug in either the zero copy code or * the tx fifo (hence the MAX_FRAG_LEN). */ static netdev_tx_t ns83820_hard_start_xmit(struct sk_buff *skb, struct net_device *ndev) { struct ns83820 *dev = PRIV(ndev); u32 free_idx, cmdsts, extsts; int nr_free, nr_frags; unsigned tx_done_idx, last_idx; dma_addr_t buf; unsigned len; skb_frag_t *frag; int stopped = 0; int do_intr = 0; volatile __le32 *first_desc; dprintk("ns83820_hard_start_xmit\n"); nr_frags = skb_shinfo(skb)->nr_frags; again: if (unlikely(dev->CFG_cache & CFG_LNKSTS)) { netif_stop_queue(ndev); if (unlikely(dev->CFG_cache & CFG_LNKSTS)) return NETDEV_TX_BUSY; netif_start_queue(ndev); } last_idx = free_idx = dev->tx_free_idx; tx_done_idx = dev->tx_done_idx; nr_free = (tx_done_idx + NR_TX_DESC-2 - free_idx) % NR_TX_DESC; nr_free -= 1; if (nr_free <= nr_frags) { dprintk("stop_queue - not enough(%p)\n", ndev); netif_stop_queue(ndev); /* Check again: we may have raced with a tx done irq */ if (dev->tx_done_idx != tx_done_idx) { dprintk("restart queue(%p)\n", ndev); netif_start_queue(ndev); goto again; } return NETDEV_TX_BUSY; } if (free_idx == dev->tx_intr_idx) { do_intr = 1; dev->tx_intr_idx = (dev->tx_intr_idx + NR_TX_DESC/4) % NR_TX_DESC; } nr_free -= nr_frags; if (nr_free < MIN_TX_DESC_FREE) { dprintk("stop_queue - last entry(%p)\n", ndev); netif_stop_queue(ndev); stopped = 1; } frag = skb_shinfo(skb)->frags; if (!nr_frags) frag = NULL; extsts = 0; if (skb->ip_summed == CHECKSUM_PARTIAL) { extsts |= EXTSTS_IPPKT; if (IPPROTO_TCP == ip_hdr(skb)->protocol) extsts |= EXTSTS_TCPPKT; else if (IPPROTO_UDP == ip_hdr(skb)->protocol) extsts |= EXTSTS_UDPPKT; } #ifdef NS83820_VLAN_ACCEL_SUPPORT if (skb_vlan_tag_present(skb)) { /* fetch the vlan tag info out of the * ancillary data if the vlan code * is using hw vlan acceleration */ short tag = skb_vlan_tag_get(skb); extsts |= (EXTSTS_VPKT | htons(tag)); } #endif len = skb->len; if (nr_frags) len -= skb->data_len; buf = dma_map_single(&dev->pci_dev->dev, skb->data, len, DMA_TO_DEVICE); first_desc = dev->tx_descs + (free_idx * DESC_SIZE); for (;;) { volatile __le32 *desc = dev->tx_descs + (free_idx * DESC_SIZE); dprintk("frag[%3u]: %4u @ 0x%08Lx\n", free_idx, len, (unsigned long long)buf); last_idx = free_idx; free_idx = (free_idx + 1) % NR_TX_DESC; desc[DESC_LINK] = cpu_to_le32(dev->tx_phy_descs + (free_idx * DESC_SIZE * 4)); desc_addr_set(desc + DESC_BUFPTR, buf); desc[DESC_EXTSTS] = cpu_to_le32(extsts); cmdsts = ((nr_frags) ? CMDSTS_MORE : do_intr ? CMDSTS_INTR : 0); cmdsts |= (desc == first_desc) ? 0 : CMDSTS_OWN; cmdsts |= len; desc[DESC_CMDSTS] = cpu_to_le32(cmdsts); if (!nr_frags) break; buf = skb_frag_dma_map(&dev->pci_dev->dev, frag, 0, skb_frag_size(frag), DMA_TO_DEVICE); dprintk("frag: buf=%08Lx page=%08lx offset=%08lx\n", (long long)buf, (long) page_to_pfn(frag->page), frag->page_offset); len = skb_frag_size(frag); frag++; nr_frags--; } dprintk("done pkt\n"); spin_lock_irq(&dev->tx_lock); dev->tx_skbs[last_idx] = skb; first_desc[DESC_CMDSTS] |= cpu_to_le32(CMDSTS_OWN); dev->tx_free_idx = free_idx; atomic_inc(&dev->nr_tx_skbs); spin_unlock_irq(&dev->tx_lock); kick_tx(dev); /* Check again: we may have raced with a tx done irq */ if (stopped && (dev->tx_done_idx != tx_done_idx) && start_tx_okay(dev)) netif_start_queue(ndev); return NETDEV_TX_OK; } static void ns83820_update_stats(struct ns83820 *dev) { struct net_device *ndev = dev->ndev; u8 __iomem *base = dev->base; /* the DP83820 will freeze counters, so we need to read all of them */ ndev->stats.rx_errors += readl(base + 0x60) & 0xffff; ndev->stats.rx_crc_errors += readl(base + 0x64) & 0xffff; ndev->stats.rx_missed_errors += readl(base + 0x68) & 0xffff; ndev->stats.rx_frame_errors += readl(base + 0x6c) & 0xffff; /*ndev->stats.rx_symbol_errors +=*/ readl(base + 0x70); ndev->stats.rx_length_errors += readl(base + 0x74) & 0xffff; ndev->stats.rx_length_errors += readl(base + 0x78) & 0xffff; /*ndev->stats.rx_badopcode_errors += */ readl(base + 0x7c); /*ndev->stats.rx_pause_count += */ readl(base + 0x80); /*ndev->stats.tx_pause_count += */ readl(base + 0x84); ndev->stats.tx_carrier_errors += readl(base + 0x88) & 0xff; } static struct net_device_stats *ns83820_get_stats(struct net_device *ndev) { struct ns83820 *dev = PRIV(ndev); /* somewhat overkill */ spin_lock_irq(&dev->misc_lock); ns83820_update_stats(dev); spin_unlock_irq(&dev->misc_lock); return &ndev->stats; } /* Let ethtool retrieve info */ static int ns83820_get_link_ksettings(struct net_device *ndev, struct ethtool_link_ksettings *cmd) { struct ns83820 *dev = PRIV(ndev); u32 cfg, tbicr; int fullduplex = 0; u32 supported; /* * Here's the list of available ethtool commands from other drivers: * cmd->advertising = * ethtool_cmd_speed_set(cmd, ...) * cmd->duplex = * cmd->port = 0; * cmd->phy_address = * cmd->transceiver = 0; * cmd->autoneg = * cmd->maxtxpkt = 0; * cmd->maxrxpkt = 0; */ /* read current configuration */ cfg = readl(dev->base + CFG) ^ SPDSTS_POLARITY; readl(dev->base + TANAR); tbicr = readl(dev->base + TBICR); fullduplex = (cfg & CFG_DUPSTS) ? 1 : 0; supported = SUPPORTED_Autoneg; if (dev->CFG_cache & CFG_TBI_EN) { /* we have optical interface */ supported |= SUPPORTED_1000baseT_Half | SUPPORTED_1000baseT_Full | SUPPORTED_FIBRE; cmd->base.port = PORT_FIBRE; } else { /* we have copper */ supported |= SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full | SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full | SUPPORTED_1000baseT_Half | SUPPORTED_1000baseT_Full | SUPPORTED_MII; cmd->base.port = PORT_MII; } ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported, supported); cmd->base.duplex = fullduplex ? DUPLEX_FULL : DUPLEX_HALF; switch (cfg / CFG_SPDSTS0 & 3) { case 2: cmd->base.speed = SPEED_1000; break; case 1: cmd->base.speed = SPEED_100; break; default: cmd->base.speed = SPEED_10; break; } cmd->base.autoneg = (tbicr & TBICR_MR_AN_ENABLE) ? AUTONEG_ENABLE : AUTONEG_DISABLE; return 0; } /* Let ethool change settings*/ static int ns83820_set_link_ksettings(struct net_device *ndev, const struct ethtool_link_ksettings *cmd) { struct ns83820 *dev = PRIV(ndev); u32 cfg, tanar; int have_optical = 0; int fullduplex = 0; /* read current configuration */ cfg = readl(dev->base + CFG) ^ SPDSTS_POLARITY; tanar = readl(dev->base + TANAR); if (dev->CFG_cache & CFG_TBI_EN) { /* we have optical */ have_optical = 1; fullduplex = (tanar & TANAR_FULL_DUP); } else { /* we have copper */ fullduplex = cfg & CFG_DUPSTS; } spin_lock_irq(&dev->misc_lock); spin_lock(&dev->tx_lock); /* Set duplex */ if (cmd->base.duplex != fullduplex) { if (have_optical) { /*set full duplex*/ if (cmd->base.duplex == DUPLEX_FULL) { /* force full duplex */ writel(readl(dev->base + TXCFG) | TXCFG_CSI | TXCFG_HBI | TXCFG_ATP, dev->base + TXCFG); writel(readl(dev->base + RXCFG) | RXCFG_RX_FD, dev->base + RXCFG); /* Light up full duplex LED */ writel(readl(dev->base + GPIOR) | GPIOR_GP1_OUT, dev->base + GPIOR); } else { /*TODO: set half duplex */ } } else { /*we have copper*/ /* TODO: Set duplex for copper cards */ } printk(KERN_INFO "%s: Duplex set via ethtool\n", ndev->name); } /* Set autonegotiation */ if (1) { if (cmd->base.autoneg == AUTONEG_ENABLE) { /* restart auto negotiation */ writel(TBICR_MR_AN_ENABLE | TBICR_MR_RESTART_AN, dev->base + TBICR); writel(TBICR_MR_AN_ENABLE, dev->base + TBICR); dev->linkstate = LINK_AUTONEGOTIATE; printk(KERN_INFO "%s: autoneg enabled via ethtool\n", ndev->name); } else { /* disable auto negotiation */ writel(0x00000000, dev->base + TBICR); } printk(KERN_INFO "%s: autoneg %s via ethtool\n", ndev->name, cmd->base.autoneg ? "ENABLED" : "DISABLED"); } phy_intr(ndev); spin_unlock(&dev->tx_lock); spin_unlock_irq(&dev->misc_lock); return 0; } /* end ethtool get/set support -df */ static void ns83820_get_drvinfo(struct net_device *ndev, struct ethtool_drvinfo *info) { struct ns83820 *dev = PRIV(ndev); strscpy(info->driver, "ns83820", sizeof(info->driver)); strscpy(info->version, VERSION, sizeof(info->version)); strscpy(info->bus_info, pci_name(dev->pci_dev), sizeof(info->bus_info)); } static u32 ns83820_get_link(struct net_device *ndev) { struct ns83820 *dev = PRIV(ndev); u32 cfg = readl(dev->base + CFG) ^ SPDSTS_POLARITY; return cfg & CFG_LNKSTS ? 1 : 0; } static const struct ethtool_ops ops = { .get_drvinfo = ns83820_get_drvinfo, .get_link = ns83820_get_link, .get_link_ksettings = ns83820_get_link_ksettings, .set_link_ksettings = ns83820_set_link_ksettings, }; static inline void ns83820_disable_interrupts(struct ns83820 *dev) { writel(0, dev->base + IMR); writel(0, dev->base + IER); readl(dev->base + IER); } /* this function is called in irq context from the ISR */ static void ns83820_mib_isr(struct ns83820 *dev) { unsigned long flags; spin_lock_irqsave(&dev->misc_lock, flags); ns83820_update_stats(dev); spin_unlock_irqrestore(&dev->misc_lock, flags); } static void ns83820_do_isr(struct net_device *ndev, u32 isr); static irqreturn_t ns83820_irq(int foo, void *data) { struct net_device *ndev = data; struct ns83820 *dev = PRIV(ndev); u32 isr; dprintk("ns83820_irq(%p)\n", ndev); dev->ihr = 0; isr = readl(dev->base + ISR); dprintk("irq: %08x\n", isr); ns83820_do_isr(ndev, isr); return IRQ_HANDLED; } static void ns83820_do_isr(struct net_device *ndev, u32 isr) { struct ns83820 *dev = PRIV(ndev); unsigned long flags; #ifdef DEBUG if (isr & ~(ISR_PHY | ISR_RXDESC | ISR_RXEARLY | ISR_RXOK | ISR_RXERR | ISR_TXIDLE | ISR_TXOK | ISR_TXDESC)) Dprintk("odd isr? 0x%08x\n", isr); #endif if (ISR_RXIDLE & isr) { dev->rx_info.idle = 1; Dprintk("oh dear, we are idle\n"); ns83820_rx_kick(ndev); } if ((ISR_RXDESC | ISR_RXOK) & isr) { prefetch(dev->rx_info.next_rx_desc); spin_lock_irqsave(&dev->misc_lock, flags); dev->IMR_cache &= ~(ISR_RXDESC | ISR_RXOK); writel(dev->IMR_cache, dev->base + IMR); spin_unlock_irqrestore(&dev->misc_lock, flags); tasklet_schedule(&dev->rx_tasklet); //rx_irq(ndev); //writel(4, dev->base + IHR); } if ((ISR_RXIDLE | ISR_RXORN | ISR_RXDESC | ISR_RXOK | ISR_RXERR) & isr) ns83820_rx_kick(ndev); if (unlikely(ISR_RXSOVR & isr)) { //printk("overrun: rxsovr\n"); ndev->stats.rx_fifo_errors++; } if (unlikely(ISR_RXORN & isr)) { //printk("overrun: rxorn\n"); ndev->stats.rx_fifo_errors++; } if ((ISR_RXRCMP & isr) && dev->rx_info.up) writel(CR_RXE, dev->base + CR); if (ISR_TXIDLE & isr) { u32 txdp; txdp = readl(dev->base + TXDP); dprintk("txdp: %08x\n", txdp); txdp -= dev->tx_phy_descs; dev->tx_idx = txdp / (DESC_SIZE * 4); if (dev->tx_idx >= NR_TX_DESC) { printk(KERN_ALERT "%s: BUG -- txdp out of range\n", ndev->name); dev->tx_idx = 0; } /* The may have been a race between a pci originated read * and the descriptor update from the cpu. Just in case, * kick the transmitter if the hardware thinks it is on a * different descriptor than we are. */ if (dev->tx_idx != dev->tx_free_idx) kick_tx(dev); } /* Defer tx ring processing until more than a minimum amount of * work has accumulated */ if ((ISR_TXDESC | ISR_TXIDLE | ISR_TXOK | ISR_TXERR) & isr) { spin_lock_irqsave(&dev->tx_lock, flags); do_tx_done(ndev); spin_unlock_irqrestore(&dev->tx_lock, flags); /* Disable TxOk if there are no outstanding tx packets. */ if ((dev->tx_done_idx == dev->tx_free_idx) && (dev->IMR_cache & ISR_TXOK)) { spin_lock_irqsave(&dev->misc_lock, flags); dev->IMR_cache &= ~ISR_TXOK; writel(dev->IMR_cache, dev->base + IMR); spin_unlock_irqrestore(&dev->misc_lock, flags); } } /* The TxIdle interrupt can come in before the transmit has * completed. Normally we reap packets off of the combination * of TxDesc and TxIdle and leave TxOk disabled (since it * occurs on every packet), but when no further irqs of this * nature are expected, we must enable TxOk. */ if ((ISR_TXIDLE & isr) && (dev->tx_done_idx != dev->tx_free_idx)) { spin_lock_irqsave(&dev->misc_lock, flags); dev->IMR_cache |= ISR_TXOK; writel(dev->IMR_cache, dev->base + IMR); spin_unlock_irqrestore(&dev->misc_lock, flags); } /* MIB interrupt: one of the statistics counters is about to overflow */ if (unlikely(ISR_MIB & isr)) ns83820_mib_isr(dev); /* PHY: Link up/down/negotiation state change */ if (unlikely(ISR_PHY & isr)) phy_intr(ndev); #if 0 /* Still working on the interrupt mitigation strategy */ if (dev->ihr) writel(dev->ihr, dev->base + IHR); #endif } static void ns83820_do_reset(struct ns83820 *dev, u32 which) { Dprintk("resetting chip...\n"); writel(which, dev->base + CR); do { schedule(); } while (readl(dev->base + CR) & which); Dprintk("okay!\n"); } static int ns83820_stop(struct net_device *ndev) { struct ns83820 *dev = PRIV(ndev); /* FIXME: protect against interrupt handler? */ del_timer_sync(&dev->tx_watchdog); ns83820_disable_interrupts(dev); dev->rx_info.up = 0; synchronize_irq(dev->pci_dev->irq); ns83820_do_reset(dev, CR_RST); synchronize_irq(dev->pci_dev->irq); spin_lock_irq(&dev->misc_lock); dev->IMR_cache &= ~(ISR_TXURN | ISR_TXIDLE | ISR_TXERR | ISR_TXDESC | ISR_TXOK); spin_unlock_irq(&dev->misc_lock); ns83820_cleanup_rx(dev); ns83820_cleanup_tx(dev); return 0; } static void ns83820_tx_timeout(struct net_device *ndev, unsigned int txqueue) { struct ns83820 *dev = PRIV(ndev); u32 tx_done_idx; __le32 *desc; unsigned long flags; spin_lock_irqsave(&dev->tx_lock, flags); tx_done_idx = dev->tx_done_idx; desc = dev->tx_descs + (tx_done_idx * DESC_SIZE); printk(KERN_INFO "%s: tx_timeout: tx_done_idx=%d free_idx=%d cmdsts=%08x\n", ndev->name, tx_done_idx, dev->tx_free_idx, le32_to_cpu(desc[DESC_CMDSTS])); #if defined(DEBUG) { u32 isr; isr = readl(dev->base + ISR); printk("irq: %08x imr: %08x\n", isr, dev->IMR_cache); ns83820_do_isr(ndev, isr); } #endif do_tx_done(ndev); tx_done_idx = dev->tx_done_idx; desc = dev->tx_descs + (tx_done_idx * DESC_SIZE); printk(KERN_INFO "%s: after: tx_done_idx=%d free_idx=%d cmdsts=%08x\n", ndev->name, tx_done_idx, dev->tx_free_idx, le32_to_cpu(desc[DESC_CMDSTS])); spin_unlock_irqrestore(&dev->tx_lock, flags); } static void ns83820_tx_watch(struct timer_list *t) { struct ns83820 *dev = from_timer(dev, t, tx_watchdog); struct net_device *ndev = dev->ndev; #if defined(DEBUG) printk("ns83820_tx_watch: %u %u %d\n", dev->tx_done_idx, dev->tx_free_idx, atomic_read(&dev->nr_tx_skbs) ); #endif if (time_after(jiffies, dev_trans_start(ndev) + 1*HZ) && dev->tx_done_idx != dev->tx_free_idx) { printk(KERN_DEBUG "%s: ns83820_tx_watch: %u %u %d\n", ndev->name, dev->tx_done_idx, dev->tx_free_idx, atomic_read(&dev->nr_tx_skbs)); ns83820_tx_timeout(ndev, UINT_MAX); } mod_timer(&dev->tx_watchdog, jiffies + 2*HZ); } static int ns83820_open(struct net_device *ndev) { struct ns83820 *dev = PRIV(ndev); unsigned i; u32 desc; int ret; dprintk("ns83820_open\n"); writel(0, dev->base + PQCR); ret = ns83820_setup_rx(ndev); if (ret) goto failed; memset(dev->tx_descs, 0, 4 * NR_TX_DESC * DESC_SIZE); for (i=0; i<NR_TX_DESC; i++) { dev->tx_descs[(i * DESC_SIZE) + DESC_LINK] = cpu_to_le32( dev->tx_phy_descs + ((i+1) % NR_TX_DESC) * DESC_SIZE * 4); } dev->tx_idx = 0; dev->tx_done_idx = 0; desc = dev->tx_phy_descs; writel(0, dev->base + TXDP_HI); writel(desc, dev->base + TXDP); timer_setup(&dev->tx_watchdog, ns83820_tx_watch, 0); mod_timer(&dev->tx_watchdog, jiffies + 2*HZ); netif_start_queue(ndev); /* FIXME: wait for phy to come up */ return 0; failed: ns83820_stop(ndev); return ret; } static void ns83820_getmac(struct ns83820 *dev, struct net_device *ndev) { u8 mac[ETH_ALEN]; unsigned i; for (i=0; i<3; i++) { u32 data; /* Read from the perfect match memory: this is loaded by * the chip from the EEPROM via the EELOAD self test. */ writel(i*2, dev->base + RFCR); data = readl(dev->base + RFDR); mac[i * 2] = data; mac[i * 2 + 1] = data >> 8; } eth_hw_addr_set(ndev, mac); } static void ns83820_set_multicast(struct net_device *ndev) { struct ns83820 *dev = PRIV(ndev); u8 __iomem *rfcr = dev->base + RFCR; u32 and_mask = 0xffffffff; u32 or_mask = 0; u32 val; if (ndev->flags & IFF_PROMISC) or_mask |= RFCR_AAU | RFCR_AAM; else and_mask &= ~(RFCR_AAU | RFCR_AAM); if (ndev->flags & IFF_ALLMULTI || netdev_mc_count(ndev)) or_mask |= RFCR_AAM; else and_mask &= ~RFCR_AAM; spin_lock_irq(&dev->misc_lock); val = (readl(rfcr) & and_mask) | or_mask; /* Ramit : RFCR Write Fix doc says RFEN must be 0 modify other bits */ writel(val & ~RFCR_RFEN, rfcr); writel(val, rfcr); spin_unlock_irq(&dev->misc_lock); } static void ns83820_run_bist(struct net_device *ndev, const char *name, u32 enable, u32 done, u32 fail) { struct ns83820 *dev = PRIV(ndev); int timed_out = 0; unsigned long start; u32 status; int loops = 0; dprintk("%s: start %s\n", ndev->name, name); start = jiffies; writel(enable, dev->base + PTSCR); for (;;) { loops++; status = readl(dev->base + PTSCR); if (!(status & enable)) break; if (status & done) break; if (status & fail) break; if (time_after_eq(jiffies, start + HZ)) { timed_out = 1; break; } schedule_timeout_uninterruptible(1); } if (status & fail) printk(KERN_INFO "%s: %s failed! (0x%08x & 0x%08x)\n", ndev->name, name, status, fail); else if (timed_out) printk(KERN_INFO "%s: run_bist %s timed out! (%08x)\n", ndev->name, name, status); dprintk("%s: done %s in %d loops\n", ndev->name, name, loops); } #ifdef PHY_CODE_IS_FINISHED static void ns83820_mii_write_bit(struct ns83820 *dev, int bit) { /* drive MDC low */ dev->MEAR_cache &= ~MEAR_MDC; writel(dev->MEAR_cache, dev->base + MEAR); readl(dev->base + MEAR); /* enable output, set bit */ dev->MEAR_cache |= MEAR_MDDIR; if (bit) dev->MEAR_cache |= MEAR_MDIO; else dev->MEAR_cache &= ~MEAR_MDIO; /* set the output bit */ writel(dev->MEAR_cache, dev->base + MEAR); readl(dev->base + MEAR); /* Wait. Max clock rate is 2.5MHz, this way we come in under 1MHz */ udelay(1); /* drive MDC high causing the data bit to be latched */ dev->MEAR_cache |= MEAR_MDC; writel(dev->MEAR_cache, dev->base + MEAR); readl(dev->base + MEAR); /* Wait again... */ udelay(1); } static int ns83820_mii_read_bit(struct ns83820 *dev) { int bit; /* drive MDC low, disable output */ dev->MEAR_cache &= ~MEAR_MDC; dev->MEAR_cache &= ~MEAR_MDDIR; writel(dev->MEAR_cache, dev->base + MEAR); readl(dev->base + MEAR); /* Wait. Max clock rate is 2.5MHz, this way we come in under 1MHz */ udelay(1); /* drive MDC high causing the data bit to be latched */ bit = (readl(dev->base + MEAR) & MEAR_MDIO) ? 1 : 0; dev->MEAR_cache |= MEAR_MDC; writel(dev->MEAR_cache, dev->base + MEAR); /* Wait again... */ udelay(1); return bit; } static unsigned ns83820_mii_read_reg(struct ns83820 *dev, unsigned phy, unsigned reg) { unsigned data = 0; int i; /* read some garbage so that we eventually sync up */ for (i=0; i<64; i++) ns83820_mii_read_bit(dev); ns83820_mii_write_bit(dev, 0); /* start */ ns83820_mii_write_bit(dev, 1); ns83820_mii_write_bit(dev, 1); /* opcode read */ ns83820_mii_write_bit(dev, 0); /* write out the phy address: 5 bits, msb first */ for (i=0; i<5; i++) ns83820_mii_write_bit(dev, phy & (0x10 >> i)); /* write out the register address, 5 bits, msb first */ for (i=0; i<5; i++) ns83820_mii_write_bit(dev, reg & (0x10 >> i)); ns83820_mii_read_bit(dev); /* turn around cycles */ ns83820_mii_read_bit(dev); /* read in the register data, 16 bits msb first */ for (i=0; i<16; i++) { data <<= 1; data |= ns83820_mii_read_bit(dev); } return data; } static unsigned ns83820_mii_write_reg(struct ns83820 *dev, unsigned phy, unsigned reg, unsigned data) { int i; /* read some garbage so that we eventually sync up */ for (i=0; i<64; i++) ns83820_mii_read_bit(dev); ns83820_mii_write_bit(dev, 0); /* start */ ns83820_mii_write_bit(dev, 1); ns83820_mii_write_bit(dev, 0); /* opcode read */ ns83820_mii_write_bit(dev, 1); /* write out the phy address: 5 bits, msb first */ for (i=0; i<5; i++) ns83820_mii_write_bit(dev, phy & (0x10 >> i)); /* write out the register address, 5 bits, msb first */ for (i=0; i<5; i++) ns83820_mii_write_bit(dev, reg & (0x10 >> i)); ns83820_mii_read_bit(dev); /* turn around cycles */ ns83820_mii_read_bit(dev); /* read in the register data, 16 bits msb first */ for (i=0; i<16; i++) ns83820_mii_write_bit(dev, (data >> (15 - i)) & 1); return data; } static void ns83820_probe_phy(struct net_device *ndev) { struct ns83820 *dev = PRIV(ndev); int j; unsigned a, b; for (j = 0; j < 0x16; j += 4) { dprintk("%s: [0x%02x] %04x %04x %04x %04x\n", ndev->name, j, ns83820_mii_read_reg(dev, 1, 0 + j), ns83820_mii_read_reg(dev, 1, 1 + j), ns83820_mii_read_reg(dev, 1, 2 + j), ns83820_mii_read_reg(dev, 1, 3 + j) ); } /* read firmware version: memory addr is 0x8402 and 0x8403 */ ns83820_mii_write_reg(dev, 1, 0x16, 0x000d); ns83820_mii_write_reg(dev, 1, 0x1e, 0x810e); a = ns83820_mii_read_reg(dev, 1, 0x1d); ns83820_mii_write_reg(dev, 1, 0x16, 0x000d); ns83820_mii_write_reg(dev, 1, 0x1e, 0x810e); b = ns83820_mii_read_reg(dev, 1, 0x1d); dprintk("version: 0x%04x 0x%04x\n", a, b); } #endif static const struct net_device_ops netdev_ops = { .ndo_open = ns83820_open, .ndo_stop = ns83820_stop, .ndo_start_xmit = ns83820_hard_start_xmit, .ndo_get_stats = ns83820_get_stats, .ndo_set_rx_mode = ns83820_set_multicast, .ndo_validate_addr = eth_validate_addr, .ndo_set_mac_address = eth_mac_addr, .ndo_tx_timeout = ns83820_tx_timeout, }; static int ns83820_init_one(struct pci_dev *pci_dev, const struct pci_device_id *id) { struct net_device *ndev; struct ns83820 *dev; long addr; int err; int using_dac = 0; /* See if we can set the dma mask early on; failure is fatal. */ if (sizeof(dma_addr_t) == 8 && !dma_set_mask(&pci_dev->dev, DMA_BIT_MASK(64))) { using_dac = 1; } else if (!dma_set_mask(&pci_dev->dev, DMA_BIT_MASK(32))) { using_dac = 0; } else { dev_warn(&pci_dev->dev, "dma_set_mask failed!\n"); return -ENODEV; } ndev = alloc_etherdev(sizeof(struct ns83820)); err = -ENOMEM; if (!ndev) goto out; dev = PRIV(ndev); dev->ndev = ndev; spin_lock_init(&dev->rx_info.lock); spin_lock_init(&dev->tx_lock); spin_lock_init(&dev->misc_lock); dev->pci_dev = pci_dev; SET_NETDEV_DEV(ndev, &pci_dev->dev); INIT_WORK(&dev->tq_refill, queue_refill); tasklet_setup(&dev->rx_tasklet, rx_action); err = pci_enable_device(pci_dev); if (err) { dev_info(&pci_dev->dev, "pci_enable_dev failed: %d\n", err); goto out_free; } pci_set_master(pci_dev); addr = pci_resource_start(pci_dev, 1); dev->base = ioremap(addr, PAGE_SIZE); dev->tx_descs = dma_alloc_coherent(&pci_dev->dev, 4 * DESC_SIZE * NR_TX_DESC, &dev->tx_phy_descs, GFP_KERNEL); dev->rx_info.descs = dma_alloc_coherent(&pci_dev->dev, 4 * DESC_SIZE * NR_RX_DESC, &dev->rx_info.phy_descs, GFP_KERNEL); err = -ENOMEM; if (!dev->base || !dev->tx_descs || !dev->rx_info.descs) goto out_disable; dprintk("%p: %08lx %p: %08lx\n", dev->tx_descs, (long)dev->tx_phy_descs, dev->rx_info.descs, (long)dev->rx_info.phy_descs); ns83820_disable_interrupts(dev); dev->IMR_cache = 0; err = request_irq(pci_dev->irq, ns83820_irq, IRQF_SHARED, DRV_NAME, ndev); if (err) { dev_info(&pci_dev->dev, "unable to register irq %d, err %d\n", pci_dev->irq, err); goto out_disable; } /* * FIXME: we are holding rtnl_lock() over obscenely long area only * because some of the setup code uses dev->name. It's Wrong(tm) - * we should be using driver-specific names for all that stuff. * For now that will do, but we really need to come back and kill * most of the dev_alloc_name() users later. */ rtnl_lock(); err = dev_alloc_name(ndev, ndev->name); if (err < 0) { dev_info(&pci_dev->dev, "unable to get netdev name: %d\n", err); goto out_free_irq; } printk("%s: ns83820.c: 0x22c: %08x, subsystem: %04x:%04x\n", ndev->name, le32_to_cpu(readl(dev->base + 0x22c)), pci_dev->subsystem_vendor, pci_dev->subsystem_device); ndev->netdev_ops = &netdev_ops; ndev->ethtool_ops = &ops; ndev->watchdog_timeo = 5 * HZ; pci_set_drvdata(pci_dev, ndev); ns83820_do_reset(dev, CR_RST); /* Must reset the ram bist before running it */ writel(PTSCR_RBIST_RST, dev->base + PTSCR); ns83820_run_bist(ndev, "sram bist", PTSCR_RBIST_EN, PTSCR_RBIST_DONE, PTSCR_RBIST_FAIL); ns83820_run_bist(ndev, "eeprom bist", PTSCR_EEBIST_EN, 0, PTSCR_EEBIST_FAIL); ns83820_run_bist(ndev, "eeprom load", PTSCR_EELOAD_EN, 0, 0); /* I love config registers */ dev->CFG_cache = readl(dev->base + CFG); if ((dev->CFG_cache & CFG_PCI64_DET)) { printk(KERN_INFO "%s: detected 64 bit PCI data bus.\n", ndev->name); /*dev->CFG_cache |= CFG_DATA64_EN;*/ if (!(dev->CFG_cache & CFG_DATA64_EN)) printk(KERN_INFO "%s: EEPROM did not enable 64 bit bus. Disabled.\n", ndev->name); } else dev->CFG_cache &= ~(CFG_DATA64_EN); dev->CFG_cache &= (CFG_TBI_EN | CFG_MRM_DIS | CFG_MWI_DIS | CFG_T64ADDR | CFG_DATA64_EN | CFG_EXT_125 | CFG_M64ADDR); dev->CFG_cache |= CFG_PINT_DUPSTS | CFG_PINT_LNKSTS | CFG_PINT_SPDSTS | CFG_EXTSTS_EN | CFG_EXD | CFG_PESEL; dev->CFG_cache |= CFG_REQALG; dev->CFG_cache |= CFG_POW; dev->CFG_cache |= CFG_TMRTEST; /* When compiled with 64 bit addressing, we must always enable * the 64 bit descriptor format. */ if (sizeof(dma_addr_t) == 8) dev->CFG_cache |= CFG_M64ADDR; if (using_dac) dev->CFG_cache |= CFG_T64ADDR; /* Big endian mode does not seem to do what the docs suggest */ dev->CFG_cache &= ~CFG_BEM; /* setup optical transceiver if we have one */ if (dev->CFG_cache & CFG_TBI_EN) { printk(KERN_INFO "%s: enabling optical transceiver\n", ndev->name); writel(readl(dev->base + GPIOR) | 0x3e8, dev->base + GPIOR); /* setup auto negotiation feature advertisement */ writel(readl(dev->base + TANAR) | TANAR_HALF_DUP | TANAR_FULL_DUP, dev->base + TANAR); /* start auto negotiation */ writel(TBICR_MR_AN_ENABLE | TBICR_MR_RESTART_AN, dev->base + TBICR); writel(TBICR_MR_AN_ENABLE, dev->base + TBICR); dev->linkstate = LINK_AUTONEGOTIATE; dev->CFG_cache |= CFG_MODE_1000; } writel(dev->CFG_cache, dev->base + CFG); dprintk("CFG: %08x\n", dev->CFG_cache); if (reset_phy) { printk(KERN_INFO "%s: resetting phy\n", ndev->name); writel(dev->CFG_cache | CFG_PHY_RST, dev->base + CFG); msleep(10); writel(dev->CFG_cache, dev->base + CFG); } #if 0 /* Huh? This sets the PCI latency register. Should be done via * the PCI layer. FIXME. */ if (readl(dev->base + SRR)) writel(readl(dev->base+0x20c) | 0xfe00, dev->base + 0x20c); #endif /* Note! The DMA burst size interacts with packet * transmission, such that the largest packet that * can be transmitted is 8192 - FLTH - burst size. * If only the transmit fifo was larger... */ /* Ramit : 1024 DMA is not a good idea, it ends up banging * some DELL and COMPAQ SMP systems */ writel(TXCFG_CSI | TXCFG_HBI | TXCFG_ATP | TXCFG_MXDMA512 | ((1600 / 32) * 0x100), dev->base + TXCFG); /* Flush the interrupt holdoff timer */ writel(0x000, dev->base + IHR); writel(0x100, dev->base + IHR); writel(0x000, dev->base + IHR); /* Set Rx to full duplex, don't accept runt, errored, long or length * range errored packets. Use 512 byte DMA. */ /* Ramit : 1024 DMA is not a good idea, it ends up banging * some DELL and COMPAQ SMP systems * Turn on ALP, only we are accpeting Jumbo Packets */ writel(RXCFG_AEP | RXCFG_ARP | RXCFG_AIRL | RXCFG_RX_FD | RXCFG_STRIPCRC //| RXCFG_ALP | (RXCFG_MXDMA512) | 0, dev->base + RXCFG); /* Disable priority queueing */ writel(0, dev->base + PQCR); /* Enable IP checksum validation and detetion of VLAN headers. * Note: do not set the reject options as at least the 0x102 * revision of the chip does not properly accept IP fragments * at least for UDP. */ /* Ramit : Be sure to turn on RXCFG_ARP if VLAN's are enabled, since * the MAC it calculates the packetsize AFTER stripping the VLAN * header, and if a VLAN Tagged packet of 64 bytes is received (like * a ping with a VLAN header) then the card, strips the 4 byte VLAN * tag and then checks the packet size, so if RXCFG_ARP is not enabled, * it discrards it!. These guys...... * also turn on tag stripping if hardware acceleration is enabled */ #ifdef NS83820_VLAN_ACCEL_SUPPORT #define VRCR_INIT_VALUE (VRCR_IPEN|VRCR_VTDEN|VRCR_VTREN) #else #define VRCR_INIT_VALUE (VRCR_IPEN|VRCR_VTDEN) #endif writel(VRCR_INIT_VALUE, dev->base + VRCR); /* Enable per-packet TCP/UDP/IP checksumming * and per packet vlan tag insertion if * vlan hardware acceleration is enabled */ #ifdef NS83820_VLAN_ACCEL_SUPPORT #define VTCR_INIT_VALUE (VTCR_PPCHK|VTCR_VPPTI) #else #define VTCR_INIT_VALUE VTCR_PPCHK #endif writel(VTCR_INIT_VALUE, dev->base + VTCR); /* Ramit : Enable async and sync pause frames */ /* writel(0, dev->base + PCR); */ writel((PCR_PS_MCAST | PCR_PS_DA | PCR_PSEN | PCR_FFLO_4K | PCR_FFHI_8K | PCR_STLO_4 | PCR_STHI_8 | PCR_PAUSE_CNT), dev->base + PCR); /* Disable Wake On Lan */ writel(0, dev->base + WCSR); ns83820_getmac(dev, ndev); /* Yes, we support dumb IP checksum on transmit */ ndev->features |= NETIF_F_SG; ndev->features |= NETIF_F_IP_CSUM; ndev->min_mtu = 0; #ifdef NS83820_VLAN_ACCEL_SUPPORT /* We also support hardware vlan acceleration */ ndev->features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX; #endif if (using_dac) { printk(KERN_INFO "%s: using 64 bit addressing.\n", ndev->name); ndev->features |= NETIF_F_HIGHDMA; } printk(KERN_INFO "%s: ns83820 v" VERSION ": DP83820 v%u.%u: %pM io=0x%08lx irq=%d f=%s\n", ndev->name, (unsigned)readl(dev->base + SRR) >> 8, (unsigned)readl(dev->base + SRR) & 0xff, ndev->dev_addr, addr, pci_dev->irq, (ndev->features & NETIF_F_HIGHDMA) ? "h,sg" : "sg" ); #ifdef PHY_CODE_IS_FINISHED ns83820_probe_phy(ndev); #endif err = register_netdevice(ndev); if (err) { printk(KERN_INFO "ns83820: unable to register netdev: %d\n", err); goto out_cleanup; } rtnl_unlock(); return 0; out_cleanup: ns83820_disable_interrupts(dev); /* paranoia */ out_free_irq: rtnl_unlock(); free_irq(pci_dev->irq, ndev); out_disable: if (dev->base) iounmap(dev->base); dma_free_coherent(&pci_dev->dev, 4 * DESC_SIZE * NR_TX_DESC, dev->tx_descs, dev->tx_phy_descs); dma_free_coherent(&pci_dev->dev, 4 * DESC_SIZE * NR_RX_DESC, dev->rx_info.descs, dev->rx_info.phy_descs); pci_disable_device(pci_dev); out_free: free_netdev(ndev); out: return err; } static void ns83820_remove_one(struct pci_dev *pci_dev) { struct net_device *ndev = pci_get_drvdata(pci_dev); struct ns83820 *dev = PRIV(ndev); /* ok even if NULL */ if (!ndev) /* paranoia */ return; ns83820_disable_interrupts(dev); /* paranoia */ unregister_netdev(ndev); free_irq(dev->pci_dev->irq, ndev); iounmap(dev->base); dma_free_coherent(&dev->pci_dev->dev, 4 * DESC_SIZE * NR_TX_DESC, dev->tx_descs, dev->tx_phy_descs); dma_free_coherent(&dev->pci_dev->dev, 4 * DESC_SIZE * NR_RX_DESC, dev->rx_info.descs, dev->rx_info.phy_descs); pci_disable_device(dev->pci_dev); free_netdev(ndev); } static const struct pci_device_id ns83820_pci_tbl[] = { { 0x100b, 0x0022, PCI_ANY_ID, PCI_ANY_ID, 0, .driver_data = 0, }, { 0, }, }; static struct pci_driver driver = { .name = "ns83820", .id_table = ns83820_pci_tbl, .probe = ns83820_init_one, .remove = ns83820_remove_one, #if 0 /* FIXME: implement */ .suspend = , .resume = , #endif }; static int __init ns83820_init(void) { printk(KERN_INFO "ns83820.c: National Semiconductor DP83820 10/100/1000 driver.\n"); return pci_register_driver(&driver); } static void __exit ns83820_exit(void) { pci_unregister_driver(&driver); } MODULE_AUTHOR("Benjamin LaHaise <[email protected]>"); MODULE_DESCRIPTION("National Semiconductor DP83820 10/100/1000 driver"); MODULE_LICENSE("GPL"); MODULE_DEVICE_TABLE(pci, ns83820_pci_tbl); module_param(lnksts, int, 0); MODULE_PARM_DESC(lnksts, "Polarity of LNKSTS bit"); module_param(ihr, int, 0); MODULE_PARM_DESC(ihr, "Time in 100 us increments to delay interrupts (range 0-127)"); module_param(reset_phy, int, 0); MODULE_PARM_DESC(reset_phy, "Set to 1 to reset the PHY on startup"); module_init(ns83820_init); module_exit(ns83820_exit);
linux-master
drivers/net/ethernet/natsemi/ns83820.c
/* natsemi.c: A Linux PCI Ethernet driver for the NatSemi DP8381x series. */ /* Written/copyright 1999-2001 by Donald Becker. Portions copyright (c) 2001,2002 Sun Microsystems ([email protected]) Portions copyright 2001,2002 Manfred Spraul ([email protected]) Portions copyright 2004 Harald Welte <[email protected]> This software may be used and distributed according to the terms of the GNU General Public License (GPL), incorporated herein by reference. Drivers based on or derived from this code fall under the GPL and must retain the authorship, copyright and license notice. This file is not a complete program and may only be used when the entire operating system is licensed under the GPL. License for under other terms may be available. Contact the original author for details. The original author may be reached as [email protected], or at Scyld Computing Corporation 410 Severn Ave., Suite 210 Annapolis MD 21403 Support information and updates available at http://www.scyld.com/network/netsemi.html [link no longer provides useful info -jgarzik] TODO: * big endian support with CFG:BEM instead of cpu_to_le32 */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/string.h> #include <linux/timer.h> #include <linux/errno.h> #include <linux/ioport.h> #include <linux/slab.h> #include <linux/interrupt.h> #include <linux/pci.h> #include <linux/netdevice.h> #include <linux/etherdevice.h> #include <linux/skbuff.h> #include <linux/init.h> #include <linux/spinlock.h> #include <linux/ethtool.h> #include <linux/delay.h> #include <linux/rtnetlink.h> #include <linux/mii.h> #include <linux/crc32.h> #include <linux/bitops.h> #include <linux/prefetch.h> #include <asm/processor.h> /* Processor type for cache alignment. */ #include <asm/io.h> #include <asm/irq.h> #include <linux/uaccess.h> #define DRV_NAME "natsemi" #define DRV_VERSION "2.1" #define DRV_RELDATE "Sept 11, 2006" #define RX_OFFSET 2 /* Updated to recommendations in pci-skeleton v2.03. */ /* The user-configurable values. These may be modified when a driver module is loaded.*/ #define NATSEMI_DEF_MSG (NETIF_MSG_DRV | \ NETIF_MSG_LINK | \ NETIF_MSG_WOL | \ NETIF_MSG_RX_ERR | \ NETIF_MSG_TX_ERR) static int debug = -1; static int mtu; /* Maximum number of multicast addresses to filter (vs. rx-all-multicast). This chip uses a 512 element hash table based on the Ethernet CRC. */ static const int multicast_filter_limit = 100; /* Set the copy breakpoint for the copy-only-tiny-frames scheme. Setting to > 1518 effectively disables this feature. */ static int rx_copybreak; static int dspcfg_workaround = 1; /* Used to pass the media type, etc. Both 'options[]' and 'full_duplex[]' should exist for driver interoperability. The media type is usually passed in 'options[]'. */ #define MAX_UNITS 8 /* More are supported, limit only on options */ static int options[MAX_UNITS]; static int full_duplex[MAX_UNITS]; /* Operational parameters that are set at compile time. */ /* Keep the ring sizes a power of two for compile efficiency. The compiler will convert <unsigned>'%'<2^N> into a bit mask. Making the Tx ring too large decreases the effectiveness of channel bonding and packet priority. There are no ill effects from too-large receive rings. */ #define TX_RING_SIZE 16 #define TX_QUEUE_LEN 10 /* Limit ring entries actually used, min 4. */ #define RX_RING_SIZE 32 /* Operational parameters that usually are not changed. */ /* Time in jiffies before concluding the transmitter is hung. */ #define TX_TIMEOUT (2*HZ) #define NATSEMI_HW_TIMEOUT 400 #define NATSEMI_TIMER_FREQ 5*HZ #define NATSEMI_PG0_NREGS 64 #define NATSEMI_RFDR_NREGS 8 #define NATSEMI_PG1_NREGS 4 #define NATSEMI_NREGS (NATSEMI_PG0_NREGS + NATSEMI_RFDR_NREGS + \ NATSEMI_PG1_NREGS) #define NATSEMI_REGS_VER 1 /* v1 added RFDR registers */ #define NATSEMI_REGS_SIZE (NATSEMI_NREGS * sizeof(u32)) /* Buffer sizes: * The nic writes 32-bit values, even if the upper bytes of * a 32-bit value are beyond the end of the buffer. */ #define NATSEMI_HEADERS 22 /* 2*mac,type,vlan,crc */ #define NATSEMI_PADDING 16 /* 2 bytes should be sufficient */ #define NATSEMI_LONGPKT 1518 /* limit for normal packets */ #define NATSEMI_RX_LIMIT 2046 /* maximum supported by hardware */ /* These identify the driver base version and may not be removed. */ static const char version[] = KERN_INFO DRV_NAME " dp8381x driver, version " DRV_VERSION ", " DRV_RELDATE "\n" " originally by Donald Becker <[email protected]>\n" " 2.4.x kernel port by Jeff Garzik, Tjeerd Mulder\n"; MODULE_AUTHOR("Donald Becker <[email protected]>"); MODULE_DESCRIPTION("National Semiconductor DP8381x series PCI Ethernet driver"); MODULE_LICENSE("GPL"); module_param(mtu, int, 0); module_param(debug, int, 0); module_param(rx_copybreak, int, 0); module_param(dspcfg_workaround, int, 0); module_param_array(options, int, NULL, 0); module_param_array(full_duplex, int, NULL, 0); MODULE_PARM_DESC(mtu, "DP8381x MTU (all boards)"); MODULE_PARM_DESC(debug, "DP8381x default debug level"); MODULE_PARM_DESC(rx_copybreak, "DP8381x copy breakpoint for copy-only-tiny-frames"); MODULE_PARM_DESC(dspcfg_workaround, "DP8381x: control DspCfg workaround"); MODULE_PARM_DESC(options, "DP8381x: Bits 0-3: media type, bit 17: full duplex"); MODULE_PARM_DESC(full_duplex, "DP8381x full duplex setting(s) (1)"); /* Theory of Operation I. Board Compatibility This driver is designed for National Semiconductor DP83815 PCI Ethernet NIC. It also works with other chips in the DP83810 series. II. Board-specific settings This driver requires the PCI interrupt line to be valid. It honors the EEPROM-set values. III. Driver operation IIIa. Ring buffers This driver uses two statically allocated fixed-size descriptor lists formed into rings by a branch from the final descriptor to the beginning of the list. The ring sizes are set at compile time by RX/TX_RING_SIZE. The NatSemi design uses a 'next descriptor' pointer that the driver forms into a list. IIIb/c. Transmit/Receive Structure This driver uses a zero-copy receive and transmit scheme. The driver allocates full frame size skbuffs for the Rx ring buffers at open() time and passes the skb->data field to the chip as receive data buffers. When an incoming frame is less than RX_COPYBREAK bytes long, a fresh skbuff is allocated and the frame is copied to the new skbuff. When the incoming frame is larger, the skbuff is passed directly up the protocol stack. Buffers consumed this way are replaced by newly allocated skbuffs in a later phase of receives. The RX_COPYBREAK value is chosen to trade-off the memory wasted by using a full-sized skbuff for small frames vs. the copying costs of larger frames. New boards are typically used in generously configured machines and the underfilled buffers have negligible impact compared to the benefit of a single allocation size, so the default value of zero results in never copying packets. When copying is done, the cost is usually mitigated by using a combined copy/checksum routine. Copying also preloads the cache, which is most useful with small frames. A subtle aspect of the operation is that unaligned buffers are not permitted by the hardware. Thus the IP header at offset 14 in an ethernet frame isn't longword aligned for further processing. On copies frames are put into the skbuff at an offset of "+2", 16-byte aligning the IP header. IIId. Synchronization Most operations are synchronized on the np->lock irq spinlock, except the receive and transmit paths which are synchronised using a combination of hardware descriptor ownership, disabling interrupts and NAPI poll scheduling. IVb. References http://www.scyld.com/expert/100mbps.html http://www.scyld.com/expert/NWay.html Datasheet is available from: http://www.national.com/pf/DP/DP83815.html IVc. Errata None characterised. */ /* * Support for fibre connections on Am79C874: * This phy needs a special setup when connected to a fibre cable. * http://www.amd.com/files/connectivitysolutions/networking/archivednetworking/22235.pdf */ #define PHYID_AM79C874 0x0022561b enum { MII_MCTRL = 0x15, /* mode control register */ MII_FX_SEL = 0x0001, /* 100BASE-FX (fiber) */ MII_EN_SCRM = 0x0004, /* enable scrambler (tp) */ }; enum { NATSEMI_FLAG_IGNORE_PHY = 0x1, }; /* array of board data directly indexed by pci_tbl[x].driver_data */ static struct { const char *name; unsigned long flags; unsigned int eeprom_size; } natsemi_pci_info[] = { { "Aculab E1/T1 PMXc cPCI carrier card", NATSEMI_FLAG_IGNORE_PHY, 128 }, { "NatSemi DP8381[56]", 0, 24 }, }; static const struct pci_device_id natsemi_pci_tbl[] = { { PCI_VENDOR_ID_NS, 0x0020, 0x12d9, 0x000c, 0, 0, 0 }, { PCI_VENDOR_ID_NS, 0x0020, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 1 }, { } /* terminate list */ }; MODULE_DEVICE_TABLE(pci, natsemi_pci_tbl); /* Offsets to the device registers. Unlike software-only systems, device drivers interact with complex hardware. It's not useful to define symbolic names for every register bit in the device. */ enum register_offsets { ChipCmd = 0x00, ChipConfig = 0x04, EECtrl = 0x08, PCIBusCfg = 0x0C, IntrStatus = 0x10, IntrMask = 0x14, IntrEnable = 0x18, IntrHoldoff = 0x1C, /* DP83816 only */ TxRingPtr = 0x20, TxConfig = 0x24, RxRingPtr = 0x30, RxConfig = 0x34, ClkRun = 0x3C, WOLCmd = 0x40, PauseCmd = 0x44, RxFilterAddr = 0x48, RxFilterData = 0x4C, BootRomAddr = 0x50, BootRomData = 0x54, SiliconRev = 0x58, StatsCtrl = 0x5C, StatsData = 0x60, RxPktErrs = 0x60, RxMissed = 0x68, RxCRCErrs = 0x64, BasicControl = 0x80, BasicStatus = 0x84, AnegAdv = 0x90, AnegPeer = 0x94, PhyStatus = 0xC0, MIntrCtrl = 0xC4, MIntrStatus = 0xC8, PhyCtrl = 0xE4, /* These are from the spec, around page 78... on a separate table. * The meaning of these registers depend on the value of PGSEL. */ PGSEL = 0xCC, PMDCSR = 0xE4, TSTDAT = 0xFC, DSPCFG = 0xF4, SDCFG = 0xF8 }; /* the values for the 'magic' registers above (PGSEL=1) */ #define PMDCSR_VAL 0x189c /* enable preferred adaptation circuitry */ #define TSTDAT_VAL 0x0 #define DSPCFG_VAL 0x5040 #define SDCFG_VAL 0x008c /* set voltage thresholds for Signal Detect */ #define DSPCFG_LOCK 0x20 /* coefficient lock bit in DSPCFG */ #define DSPCFG_COEF 0x1000 /* see coefficient (in TSTDAT) bit in DSPCFG */ #define TSTDAT_FIXED 0xe8 /* magic number for bad coefficients */ /* misc PCI space registers */ enum pci_register_offsets { PCIPM = 0x44, }; enum ChipCmd_bits { ChipReset = 0x100, RxReset = 0x20, TxReset = 0x10, RxOff = 0x08, RxOn = 0x04, TxOff = 0x02, TxOn = 0x01, }; enum ChipConfig_bits { CfgPhyDis = 0x200, CfgPhyRst = 0x400, CfgExtPhy = 0x1000, CfgAnegEnable = 0x2000, CfgAneg100 = 0x4000, CfgAnegFull = 0x8000, CfgAnegDone = 0x8000000, CfgFullDuplex = 0x20000000, CfgSpeed100 = 0x40000000, CfgLink = 0x80000000, }; enum EECtrl_bits { EE_ShiftClk = 0x04, EE_DataIn = 0x01, EE_ChipSelect = 0x08, EE_DataOut = 0x02, MII_Data = 0x10, MII_Write = 0x20, MII_ShiftClk = 0x40, }; enum PCIBusCfg_bits { EepromReload = 0x4, }; /* Bits in the interrupt status/mask registers. */ enum IntrStatus_bits { IntrRxDone = 0x0001, IntrRxIntr = 0x0002, IntrRxErr = 0x0004, IntrRxEarly = 0x0008, IntrRxIdle = 0x0010, IntrRxOverrun = 0x0020, IntrTxDone = 0x0040, IntrTxIntr = 0x0080, IntrTxErr = 0x0100, IntrTxIdle = 0x0200, IntrTxUnderrun = 0x0400, StatsMax = 0x0800, SWInt = 0x1000, WOLPkt = 0x2000, LinkChange = 0x4000, IntrHighBits = 0x8000, RxStatusFIFOOver = 0x10000, IntrPCIErr = 0xf00000, RxResetDone = 0x1000000, TxResetDone = 0x2000000, IntrAbnormalSummary = 0xCD20, }; /* * Default Interrupts: * Rx OK, Rx Packet Error, Rx Overrun, * Tx OK, Tx Packet Error, Tx Underrun, * MIB Service, Phy Interrupt, High Bits, * Rx Status FIFO overrun, * Received Target Abort, Received Master Abort, * Signalled System Error, Received Parity Error */ #define DEFAULT_INTR 0x00f1cd65 enum TxConfig_bits { TxDrthMask = 0x3f, TxFlthMask = 0x3f00, TxMxdmaMask = 0x700000, TxMxdma_512 = 0x0, TxMxdma_4 = 0x100000, TxMxdma_8 = 0x200000, TxMxdma_16 = 0x300000, TxMxdma_32 = 0x400000, TxMxdma_64 = 0x500000, TxMxdma_128 = 0x600000, TxMxdma_256 = 0x700000, TxCollRetry = 0x800000, TxAutoPad = 0x10000000, TxMacLoop = 0x20000000, TxHeartIgn = 0x40000000, TxCarrierIgn = 0x80000000 }; /* * Tx Configuration: * - 256 byte DMA burst length * - fill threshold 512 bytes (i.e. restart DMA when 512 bytes are free) * - 64 bytes initial drain threshold (i.e. begin actual transmission * when 64 byte are in the fifo) * - on tx underruns, increase drain threshold by 64. * - at most use a drain threshold of 1472 bytes: The sum of the fill * threshold and the drain threshold must be less than 2016 bytes. * */ #define TX_FLTH_VAL ((512/32) << 8) #define TX_DRTH_VAL_START (64/32) #define TX_DRTH_VAL_INC 2 #define TX_DRTH_VAL_LIMIT (1472/32) enum RxConfig_bits { RxDrthMask = 0x3e, RxMxdmaMask = 0x700000, RxMxdma_512 = 0x0, RxMxdma_4 = 0x100000, RxMxdma_8 = 0x200000, RxMxdma_16 = 0x300000, RxMxdma_32 = 0x400000, RxMxdma_64 = 0x500000, RxMxdma_128 = 0x600000, RxMxdma_256 = 0x700000, RxAcceptLong = 0x8000000, RxAcceptTx = 0x10000000, RxAcceptRunt = 0x40000000, RxAcceptErr = 0x80000000 }; #define RX_DRTH_VAL (128/8) enum ClkRun_bits { PMEEnable = 0x100, PMEStatus = 0x8000, }; enum WolCmd_bits { WakePhy = 0x1, WakeUnicast = 0x2, WakeMulticast = 0x4, WakeBroadcast = 0x8, WakeArp = 0x10, WakePMatch0 = 0x20, WakePMatch1 = 0x40, WakePMatch2 = 0x80, WakePMatch3 = 0x100, WakeMagic = 0x200, WakeMagicSecure = 0x400, SecureHack = 0x100000, WokePhy = 0x400000, WokeUnicast = 0x800000, WokeMulticast = 0x1000000, WokeBroadcast = 0x2000000, WokeArp = 0x4000000, WokePMatch0 = 0x8000000, WokePMatch1 = 0x10000000, WokePMatch2 = 0x20000000, WokePMatch3 = 0x40000000, WokeMagic = 0x80000000, WakeOptsSummary = 0x7ff }; enum RxFilterAddr_bits { RFCRAddressMask = 0x3ff, AcceptMulticast = 0x00200000, AcceptMyPhys = 0x08000000, AcceptAllPhys = 0x10000000, AcceptAllMulticast = 0x20000000, AcceptBroadcast = 0x40000000, RxFilterEnable = 0x80000000 }; enum StatsCtrl_bits { StatsWarn = 0x1, StatsFreeze = 0x2, StatsClear = 0x4, StatsStrobe = 0x8, }; enum MIntrCtrl_bits { MICRIntEn = 0x2, }; enum PhyCtrl_bits { PhyAddrMask = 0x1f, }; #define PHY_ADDR_NONE 32 #define PHY_ADDR_INTERNAL 1 /* values we might find in the silicon revision register */ #define SRR_DP83815_C 0x0302 #define SRR_DP83815_D 0x0403 #define SRR_DP83816_A4 0x0504 #define SRR_DP83816_A5 0x0505 /* The Rx and Tx buffer descriptors. */ /* Note that using only 32 bit fields simplifies conversion to big-endian architectures. */ struct netdev_desc { __le32 next_desc; __le32 cmd_status; __le32 addr; __le32 software_use; }; /* Bits in network_desc.status */ enum desc_status_bits { DescOwn=0x80000000, DescMore=0x40000000, DescIntr=0x20000000, DescNoCRC=0x10000000, DescPktOK=0x08000000, DescSizeMask=0xfff, DescTxAbort=0x04000000, DescTxFIFO=0x02000000, DescTxCarrier=0x01000000, DescTxDefer=0x00800000, DescTxExcDefer=0x00400000, DescTxOOWCol=0x00200000, DescTxExcColl=0x00100000, DescTxCollCount=0x000f0000, DescRxAbort=0x04000000, DescRxOver=0x02000000, DescRxDest=0x01800000, DescRxLong=0x00400000, DescRxRunt=0x00200000, DescRxInvalid=0x00100000, DescRxCRC=0x00080000, DescRxAlign=0x00040000, DescRxLoop=0x00020000, DesRxColl=0x00010000, }; struct netdev_private { /* Descriptor rings first for alignment */ dma_addr_t ring_dma; struct netdev_desc *rx_ring; struct netdev_desc *tx_ring; /* The addresses of receive-in-place skbuffs */ struct sk_buff *rx_skbuff[RX_RING_SIZE]; dma_addr_t rx_dma[RX_RING_SIZE]; /* address of a sent-in-place packet/buffer, for later free() */ struct sk_buff *tx_skbuff[TX_RING_SIZE]; dma_addr_t tx_dma[TX_RING_SIZE]; struct net_device *dev; void __iomem *ioaddr; struct napi_struct napi; /* Media monitoring timer */ struct timer_list timer; /* Frequently used values: keep some adjacent for cache effect */ struct pci_dev *pci_dev; struct netdev_desc *rx_head_desc; /* Producer/consumer ring indices */ unsigned int cur_rx, dirty_rx; unsigned int cur_tx, dirty_tx; /* Based on MTU+slack. */ unsigned int rx_buf_sz; int oom; /* Interrupt status */ u32 intr_status; /* Do not touch the nic registers */ int hands_off; /* Don't pay attention to the reported link state. */ int ignore_phy; /* external phy that is used: only valid if dev->if_port != PORT_TP */ int mii; int phy_addr_external; unsigned int full_duplex; /* Rx filter */ u32 cur_rx_mode; u32 rx_filter[16]; /* FIFO and PCI burst thresholds */ u32 tx_config, rx_config; /* original contents of ClkRun register */ u32 SavedClkRun; /* silicon revision */ u32 srr; /* expected DSPCFG value */ u16 dspcfg; int dspcfg_workaround; /* parms saved in ethtool format */ u16 speed; /* The forced speed, 10Mb, 100Mb, gigabit */ u8 duplex; /* Duplex, half or full */ u8 autoneg; /* Autonegotiation enabled */ /* MII transceiver section */ u16 advertising; unsigned int iosize; spinlock_t lock; u32 msg_enable; /* EEPROM data */ int eeprom_size; }; static void move_int_phy(struct net_device *dev, int addr); static int eeprom_read(void __iomem *ioaddr, int location); static int mdio_read(struct net_device *dev, int reg); static void mdio_write(struct net_device *dev, int reg, u16 data); static void init_phy_fixup(struct net_device *dev); static int miiport_read(struct net_device *dev, int phy_id, int reg); static void miiport_write(struct net_device *dev, int phy_id, int reg, u16 data); static int find_mii(struct net_device *dev); static void natsemi_reset(struct net_device *dev); static void natsemi_reload_eeprom(struct net_device *dev); static void natsemi_stop_rxtx(struct net_device *dev); static int netdev_open(struct net_device *dev); static void do_cable_magic(struct net_device *dev); static void undo_cable_magic(struct net_device *dev); static void check_link(struct net_device *dev); static void netdev_timer(struct timer_list *t); static void dump_ring(struct net_device *dev); static void ns_tx_timeout(struct net_device *dev, unsigned int txqueue); static int alloc_ring(struct net_device *dev); static void refill_rx(struct net_device *dev); static void init_ring(struct net_device *dev); static void drain_tx(struct net_device *dev); static void drain_ring(struct net_device *dev); static void free_ring(struct net_device *dev); static void reinit_ring(struct net_device *dev); static void init_registers(struct net_device *dev); static netdev_tx_t start_tx(struct sk_buff *skb, struct net_device *dev); static irqreturn_t intr_handler(int irq, void *dev_instance); static void netdev_error(struct net_device *dev, int intr_status); static int natsemi_poll(struct napi_struct *napi, int budget); static void netdev_rx(struct net_device *dev, int *work_done, int work_to_do); static void netdev_tx_done(struct net_device *dev); static int natsemi_change_mtu(struct net_device *dev, int new_mtu); #ifdef CONFIG_NET_POLL_CONTROLLER static void natsemi_poll_controller(struct net_device *dev); #endif static void __set_rx_mode(struct net_device *dev); static void set_rx_mode(struct net_device *dev); static void __get_stats(struct net_device *dev); static struct net_device_stats *get_stats(struct net_device *dev); static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd); static int netdev_set_wol(struct net_device *dev, u32 newval); static int netdev_get_wol(struct net_device *dev, u32 *supported, u32 *cur); static int netdev_set_sopass(struct net_device *dev, u8 *newval); static int netdev_get_sopass(struct net_device *dev, u8 *data); static int netdev_get_ecmd(struct net_device *dev, struct ethtool_link_ksettings *ecmd); static int netdev_set_ecmd(struct net_device *dev, const struct ethtool_link_ksettings *ecmd); static void enable_wol_mode(struct net_device *dev, int enable_intr); static int netdev_close(struct net_device *dev); static int netdev_get_regs(struct net_device *dev, u8 *buf); static int netdev_get_eeprom(struct net_device *dev, u8 *buf); static const struct ethtool_ops ethtool_ops; #define NATSEMI_ATTR(_name) \ static ssize_t natsemi_show_##_name(struct device *dev, \ struct device_attribute *attr, char *buf); \ static ssize_t natsemi_set_##_name(struct device *dev, \ struct device_attribute *attr, \ const char *buf, size_t count); \ static DEVICE_ATTR(_name, 0644, natsemi_show_##_name, natsemi_set_##_name) #define NATSEMI_CREATE_FILE(_dev, _name) \ device_create_file(&_dev->dev, &dev_attr_##_name) #define NATSEMI_REMOVE_FILE(_dev, _name) \ device_remove_file(&_dev->dev, &dev_attr_##_name) NATSEMI_ATTR(dspcfg_workaround); static ssize_t natsemi_show_dspcfg_workaround(struct device *dev, struct device_attribute *attr, char *buf) { struct netdev_private *np = netdev_priv(to_net_dev(dev)); return sprintf(buf, "%s\n", np->dspcfg_workaround ? "on" : "off"); } static ssize_t natsemi_set_dspcfg_workaround(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct netdev_private *np = netdev_priv(to_net_dev(dev)); int new_setting; unsigned long flags; /* Find out the new setting */ if (!strncmp("on", buf, count - 1) || !strncmp("1", buf, count - 1)) new_setting = 1; else if (!strncmp("off", buf, count - 1) || !strncmp("0", buf, count - 1)) new_setting = 0; else return count; spin_lock_irqsave(&np->lock, flags); np->dspcfg_workaround = new_setting; spin_unlock_irqrestore(&np->lock, flags); return count; } static inline void __iomem *ns_ioaddr(struct net_device *dev) { struct netdev_private *np = netdev_priv(dev); return np->ioaddr; } static inline void natsemi_irq_enable(struct net_device *dev) { writel(1, ns_ioaddr(dev) + IntrEnable); readl(ns_ioaddr(dev) + IntrEnable); } static inline void natsemi_irq_disable(struct net_device *dev) { writel(0, ns_ioaddr(dev) + IntrEnable); readl(ns_ioaddr(dev) + IntrEnable); } static void move_int_phy(struct net_device *dev, int addr) { struct netdev_private *np = netdev_priv(dev); void __iomem *ioaddr = ns_ioaddr(dev); int target = 31; /* * The internal phy is visible on the external mii bus. Therefore we must * move it away before we can send commands to an external phy. * There are two addresses we must avoid: * - the address on the external phy that is used for transmission. * - the address that we want to access. User space can access phys * on the mii bus with SIOCGMIIREG/SIOCSMIIREG, independent from the * phy that is used for transmission. */ if (target == addr) target--; if (target == np->phy_addr_external) target--; writew(target, ioaddr + PhyCtrl); readw(ioaddr + PhyCtrl); udelay(1); } static void natsemi_init_media(struct net_device *dev) { struct netdev_private *np = netdev_priv(dev); u32 tmp; if (np->ignore_phy) netif_carrier_on(dev); else netif_carrier_off(dev); /* get the initial settings from hardware */ tmp = mdio_read(dev, MII_BMCR); np->speed = (tmp & BMCR_SPEED100)? SPEED_100 : SPEED_10; np->duplex = (tmp & BMCR_FULLDPLX)? DUPLEX_FULL : DUPLEX_HALF; np->autoneg = (tmp & BMCR_ANENABLE)? AUTONEG_ENABLE: AUTONEG_DISABLE; np->advertising= mdio_read(dev, MII_ADVERTISE); if ((np->advertising & ADVERTISE_ALL) != ADVERTISE_ALL && netif_msg_probe(np)) { printk(KERN_INFO "natsemi %s: Transceiver default autonegotiation %s " "10%s %s duplex.\n", pci_name(np->pci_dev), (mdio_read(dev, MII_BMCR) & BMCR_ANENABLE)? "enabled, advertise" : "disabled, force", (np->advertising & (ADVERTISE_100FULL|ADVERTISE_100HALF))? "0" : "", (np->advertising & (ADVERTISE_100FULL|ADVERTISE_10FULL))? "full" : "half"); } if (netif_msg_probe(np)) printk(KERN_INFO "natsemi %s: Transceiver status %#04x advertising %#04x.\n", pci_name(np->pci_dev), mdio_read(dev, MII_BMSR), np->advertising); } static const struct net_device_ops natsemi_netdev_ops = { .ndo_open = netdev_open, .ndo_stop = netdev_close, .ndo_start_xmit = start_tx, .ndo_get_stats = get_stats, .ndo_set_rx_mode = set_rx_mode, .ndo_change_mtu = natsemi_change_mtu, .ndo_eth_ioctl = netdev_ioctl, .ndo_tx_timeout = ns_tx_timeout, .ndo_set_mac_address = eth_mac_addr, .ndo_validate_addr = eth_validate_addr, #ifdef CONFIG_NET_POLL_CONTROLLER .ndo_poll_controller = natsemi_poll_controller, #endif }; static int natsemi_probe1(struct pci_dev *pdev, const struct pci_device_id *ent) { struct net_device *dev; struct netdev_private *np; int i, option, irq, chip_idx = ent->driver_data; static int find_cnt = -1; resource_size_t iostart; unsigned long iosize; void __iomem *ioaddr; const int pcibar = 1; /* PCI base address register */ u8 addr[ETH_ALEN]; int prev_eedata; u32 tmp; /* when built into the kernel, we only print version if device is found */ #ifndef MODULE static int printed_version; if (!printed_version++) printk(version); #endif i = pcim_enable_device(pdev); if (i) return i; /* natsemi has a non-standard PM control register * in PCI config space. Some boards apparently need * to be brought to D0 in this manner. */ pci_read_config_dword(pdev, PCIPM, &tmp); if (tmp & PCI_PM_CTRL_STATE_MASK) { /* D0 state, disable PME assertion */ u32 newtmp = tmp & ~PCI_PM_CTRL_STATE_MASK; pci_write_config_dword(pdev, PCIPM, newtmp); } find_cnt++; iostart = pci_resource_start(pdev, pcibar); iosize = pci_resource_len(pdev, pcibar); irq = pdev->irq; pci_set_master(pdev); dev = alloc_etherdev(sizeof (struct netdev_private)); if (!dev) return -ENOMEM; SET_NETDEV_DEV(dev, &pdev->dev); i = pci_request_regions(pdev, DRV_NAME); if (i) goto err_pci_request_regions; ioaddr = ioremap(iostart, iosize); if (!ioaddr) { i = -ENOMEM; goto err_pci_request_regions; } /* Work around the dropped serial bit. */ prev_eedata = eeprom_read(ioaddr, 6); for (i = 0; i < 3; i++) { int eedata = eeprom_read(ioaddr, i + 7); addr[i*2] = (eedata << 1) + (prev_eedata >> 15); addr[i*2+1] = eedata >> 7; prev_eedata = eedata; } eth_hw_addr_set(dev, addr); np = netdev_priv(dev); np->ioaddr = ioaddr; netif_napi_add(dev, &np->napi, natsemi_poll); np->dev = dev; np->pci_dev = pdev; pci_set_drvdata(pdev, dev); np->iosize = iosize; spin_lock_init(&np->lock); np->msg_enable = (debug >= 0) ? (1<<debug)-1 : NATSEMI_DEF_MSG; np->hands_off = 0; np->intr_status = 0; np->eeprom_size = natsemi_pci_info[chip_idx].eeprom_size; if (natsemi_pci_info[chip_idx].flags & NATSEMI_FLAG_IGNORE_PHY) np->ignore_phy = 1; else np->ignore_phy = 0; np->dspcfg_workaround = dspcfg_workaround; /* Initial port: * - If configured to ignore the PHY set up for external. * - If the nic was configured to use an external phy and if find_mii * finds a phy: use external port, first phy that replies. * - Otherwise: internal port. * Note that the phy address for the internal phy doesn't matter: * The address would be used to access a phy over the mii bus, but * the internal phy is accessed through mapped registers. */ if (np->ignore_phy || readl(ioaddr + ChipConfig) & CfgExtPhy) dev->if_port = PORT_MII; else dev->if_port = PORT_TP; /* Reset the chip to erase previous misconfiguration. */ natsemi_reload_eeprom(dev); natsemi_reset(dev); if (dev->if_port != PORT_TP) { np->phy_addr_external = find_mii(dev); /* If we're ignoring the PHY it doesn't matter if we can't * find one. */ if (!np->ignore_phy && np->phy_addr_external == PHY_ADDR_NONE) { dev->if_port = PORT_TP; np->phy_addr_external = PHY_ADDR_INTERNAL; } } else { np->phy_addr_external = PHY_ADDR_INTERNAL; } option = find_cnt < MAX_UNITS ? options[find_cnt] : 0; /* The lower four bits are the media type. */ if (option) { if (option & 0x200) np->full_duplex = 1; if (option & 15) printk(KERN_INFO "natsemi %s: ignoring user supplied media type %d", pci_name(np->pci_dev), option & 15); } if (find_cnt < MAX_UNITS && full_duplex[find_cnt]) np->full_duplex = 1; dev->netdev_ops = &natsemi_netdev_ops; dev->watchdog_timeo = TX_TIMEOUT; dev->ethtool_ops = &ethtool_ops; /* MTU range: 64 - 2024 */ dev->min_mtu = ETH_ZLEN + ETH_FCS_LEN; dev->max_mtu = NATSEMI_RX_LIMIT - NATSEMI_HEADERS; if (mtu) dev->mtu = mtu; natsemi_init_media(dev); /* save the silicon revision for later querying */ np->srr = readl(ioaddr + SiliconRev); if (netif_msg_hw(np)) printk(KERN_INFO "natsemi %s: silicon revision %#04x.\n", pci_name(np->pci_dev), np->srr); i = register_netdev(dev); if (i) goto err_register_netdev; i = NATSEMI_CREATE_FILE(pdev, dspcfg_workaround); if (i) goto err_create_file; if (netif_msg_drv(np)) { printk(KERN_INFO "natsemi %s: %s at %#08llx " "(%s), %pM, IRQ %d", dev->name, natsemi_pci_info[chip_idx].name, (unsigned long long)iostart, pci_name(np->pci_dev), dev->dev_addr, irq); if (dev->if_port == PORT_TP) printk(", port TP.\n"); else if (np->ignore_phy) printk(", port MII, ignoring PHY\n"); else printk(", port MII, phy ad %d.\n", np->phy_addr_external); } return 0; err_create_file: unregister_netdev(dev); err_register_netdev: iounmap(ioaddr); err_pci_request_regions: free_netdev(dev); return i; } /* Read the EEPROM and MII Management Data I/O (MDIO) interfaces. The EEPROM code is for the common 93c06/46 EEPROMs with 6 bit addresses. */ /* Delay between EEPROM clock transitions. No extra delay is needed with 33Mhz PCI, but future 66Mhz access may need a delay. Note that pre-2.0.34 kernels had a cache-alignment bug that made udelay() unreliable. */ #define eeprom_delay(ee_addr) readl(ee_addr) #define EE_Write0 (EE_ChipSelect) #define EE_Write1 (EE_ChipSelect | EE_DataIn) /* The EEPROM commands include the alway-set leading bit. */ enum EEPROM_Cmds { EE_WriteCmd=(5 << 6), EE_ReadCmd=(6 << 6), EE_EraseCmd=(7 << 6), }; static int eeprom_read(void __iomem *addr, int location) { int i; int retval = 0; void __iomem *ee_addr = addr + EECtrl; int read_cmd = location | EE_ReadCmd; writel(EE_Write0, ee_addr); /* Shift the read command bits out. */ for (i = 10; i >= 0; i--) { short dataval = (read_cmd & (1 << i)) ? EE_Write1 : EE_Write0; writel(dataval, ee_addr); eeprom_delay(ee_addr); writel(dataval | EE_ShiftClk, ee_addr); eeprom_delay(ee_addr); } writel(EE_ChipSelect, ee_addr); eeprom_delay(ee_addr); for (i = 0; i < 16; i++) { writel(EE_ChipSelect | EE_ShiftClk, ee_addr); eeprom_delay(ee_addr); retval |= (readl(ee_addr) & EE_DataOut) ? 1 << i : 0; writel(EE_ChipSelect, ee_addr); eeprom_delay(ee_addr); } /* Terminate the EEPROM access. */ writel(EE_Write0, ee_addr); writel(0, ee_addr); return retval; } /* MII transceiver control section. * The 83815 series has an internal transceiver, and we present the * internal management registers as if they were MII connected. * External Phy registers are referenced through the MII interface. */ /* clock transitions >= 20ns (25MHz) * One readl should be good to PCI @ 100MHz */ #define mii_delay(ioaddr) readl(ioaddr + EECtrl) static int mii_getbit (struct net_device *dev) { int data; void __iomem *ioaddr = ns_ioaddr(dev); writel(MII_ShiftClk, ioaddr + EECtrl); data = readl(ioaddr + EECtrl); writel(0, ioaddr + EECtrl); mii_delay(ioaddr); return (data & MII_Data)? 1 : 0; } static void mii_send_bits (struct net_device *dev, u32 data, int len) { u32 i; void __iomem *ioaddr = ns_ioaddr(dev); for (i = (1 << (len-1)); i; i >>= 1) { u32 mdio_val = MII_Write | ((data & i)? MII_Data : 0); writel(mdio_val, ioaddr + EECtrl); mii_delay(ioaddr); writel(mdio_val | MII_ShiftClk, ioaddr + EECtrl); mii_delay(ioaddr); } writel(0, ioaddr + EECtrl); mii_delay(ioaddr); } static int miiport_read(struct net_device *dev, int phy_id, int reg) { u32 cmd; int i; u32 retval = 0; /* Ensure sync */ mii_send_bits (dev, 0xffffffff, 32); /* ST(2), OP(2), ADDR(5), REG#(5), TA(2), Data(16) total 32 bits */ /* ST,OP = 0110'b for read operation */ cmd = (0x06 << 10) | (phy_id << 5) | reg; mii_send_bits (dev, cmd, 14); /* Turnaround */ if (mii_getbit (dev)) return 0; /* Read data */ for (i = 0; i < 16; i++) { retval <<= 1; retval |= mii_getbit (dev); } /* End cycle */ mii_getbit (dev); return retval; } static void miiport_write(struct net_device *dev, int phy_id, int reg, u16 data) { u32 cmd; /* Ensure sync */ mii_send_bits (dev, 0xffffffff, 32); /* ST(2), OP(2), ADDR(5), REG#(5), TA(2), Data(16) total 32 bits */ /* ST,OP,AAAAA,RRRRR,TA = 0101xxxxxxxxxx10'b = 0x5002 for write */ cmd = (0x5002 << 16) | (phy_id << 23) | (reg << 18) | data; mii_send_bits (dev, cmd, 32); /* End cycle */ mii_getbit (dev); } static int mdio_read(struct net_device *dev, int reg) { struct netdev_private *np = netdev_priv(dev); void __iomem *ioaddr = ns_ioaddr(dev); /* The 83815 series has two ports: * - an internal transceiver * - an external mii bus */ if (dev->if_port == PORT_TP) return readw(ioaddr+BasicControl+(reg<<2)); else return miiport_read(dev, np->phy_addr_external, reg); } static void mdio_write(struct net_device *dev, int reg, u16 data) { struct netdev_private *np = netdev_priv(dev); void __iomem *ioaddr = ns_ioaddr(dev); /* The 83815 series has an internal transceiver; handle separately */ if (dev->if_port == PORT_TP) writew(data, ioaddr+BasicControl+(reg<<2)); else miiport_write(dev, np->phy_addr_external, reg, data); } static void init_phy_fixup(struct net_device *dev) { struct netdev_private *np = netdev_priv(dev); void __iomem *ioaddr = ns_ioaddr(dev); int i; u32 cfg; u16 tmp; /* restore stuff lost when power was out */ tmp = mdio_read(dev, MII_BMCR); if (np->autoneg == AUTONEG_ENABLE) { /* renegotiate if something changed */ if ((tmp & BMCR_ANENABLE) == 0 || np->advertising != mdio_read(dev, MII_ADVERTISE)) { /* turn on autonegotiation and force negotiation */ tmp |= (BMCR_ANENABLE | BMCR_ANRESTART); mdio_write(dev, MII_ADVERTISE, np->advertising); } } else { /* turn off auto negotiation, set speed and duplexity */ tmp &= ~(BMCR_ANENABLE | BMCR_SPEED100 | BMCR_FULLDPLX); if (np->speed == SPEED_100) tmp |= BMCR_SPEED100; if (np->duplex == DUPLEX_FULL) tmp |= BMCR_FULLDPLX; /* * Note: there is no good way to inform the link partner * that our capabilities changed. The user has to unplug * and replug the network cable after some changes, e.g. * after switching from 10HD, autoneg off to 100 HD, * autoneg off. */ } mdio_write(dev, MII_BMCR, tmp); readl(ioaddr + ChipConfig); udelay(1); /* find out what phy this is */ np->mii = (mdio_read(dev, MII_PHYSID1) << 16) + mdio_read(dev, MII_PHYSID2); /* handle external phys here */ switch (np->mii) { case PHYID_AM79C874: /* phy specific configuration for fibre/tp operation */ tmp = mdio_read(dev, MII_MCTRL); tmp &= ~(MII_FX_SEL | MII_EN_SCRM); if (dev->if_port == PORT_FIBRE) tmp |= MII_FX_SEL; else tmp |= MII_EN_SCRM; mdio_write(dev, MII_MCTRL, tmp); break; default: break; } cfg = readl(ioaddr + ChipConfig); if (cfg & CfgExtPhy) return; /* On page 78 of the spec, they recommend some settings for "optimum performance" to be done in sequence. These settings optimize some of the 100Mbit autodetection circuitry. They say we only want to do this for rev C of the chip, but engineers at NSC (Bradley Kennedy) recommends always setting them. If you don't, you get errors on some autonegotiations that make the device unusable. It seems that the DSP needs a few usec to reinitialize after the start of the phy. Just retry writing these values until they stick. */ for (i=0;i<NATSEMI_HW_TIMEOUT;i++) { int dspcfg; writew(1, ioaddr + PGSEL); writew(PMDCSR_VAL, ioaddr + PMDCSR); writew(TSTDAT_VAL, ioaddr + TSTDAT); np->dspcfg = (np->srr <= SRR_DP83815_C)? DSPCFG_VAL : (DSPCFG_COEF | readw(ioaddr + DSPCFG)); writew(np->dspcfg, ioaddr + DSPCFG); writew(SDCFG_VAL, ioaddr + SDCFG); writew(0, ioaddr + PGSEL); readl(ioaddr + ChipConfig); udelay(10); writew(1, ioaddr + PGSEL); dspcfg = readw(ioaddr + DSPCFG); writew(0, ioaddr + PGSEL); if (np->dspcfg == dspcfg) break; } if (netif_msg_link(np)) { if (i==NATSEMI_HW_TIMEOUT) { printk(KERN_INFO "%s: DSPCFG mismatch after retrying for %d usec.\n", dev->name, i*10); } else { printk(KERN_INFO "%s: DSPCFG accepted after %d usec.\n", dev->name, i*10); } } /* * Enable PHY Specific event based interrupts. Link state change * and Auto-Negotiation Completion are among the affected. * Read the intr status to clear it (needed for wake events). */ readw(ioaddr + MIntrStatus); writew(MICRIntEn, ioaddr + MIntrCtrl); } static int switch_port_external(struct net_device *dev) { struct netdev_private *np = netdev_priv(dev); void __iomem *ioaddr = ns_ioaddr(dev); u32 cfg; cfg = readl(ioaddr + ChipConfig); if (cfg & CfgExtPhy) return 0; if (netif_msg_link(np)) { printk(KERN_INFO "%s: switching to external transceiver.\n", dev->name); } /* 1) switch back to external phy */ writel(cfg | (CfgExtPhy | CfgPhyDis), ioaddr + ChipConfig); readl(ioaddr + ChipConfig); udelay(1); /* 2) reset the external phy: */ /* resetting the external PHY has been known to cause a hub supplying * power over Ethernet to kill the power. We don't want to kill * power to this computer, so we avoid resetting the phy. */ /* 3) reinit the phy fixup, it got lost during power down. */ move_int_phy(dev, np->phy_addr_external); init_phy_fixup(dev); return 1; } static int switch_port_internal(struct net_device *dev) { struct netdev_private *np = netdev_priv(dev); void __iomem *ioaddr = ns_ioaddr(dev); int i; u32 cfg; u16 bmcr; cfg = readl(ioaddr + ChipConfig); if (!(cfg &CfgExtPhy)) return 0; if (netif_msg_link(np)) { printk(KERN_INFO "%s: switching to internal transceiver.\n", dev->name); } /* 1) switch back to internal phy: */ cfg = cfg & ~(CfgExtPhy | CfgPhyDis); writel(cfg, ioaddr + ChipConfig); readl(ioaddr + ChipConfig); udelay(1); /* 2) reset the internal phy: */ bmcr = readw(ioaddr+BasicControl+(MII_BMCR<<2)); writel(bmcr | BMCR_RESET, ioaddr+BasicControl+(MII_BMCR<<2)); readl(ioaddr + ChipConfig); udelay(10); for (i=0;i<NATSEMI_HW_TIMEOUT;i++) { bmcr = readw(ioaddr+BasicControl+(MII_BMCR<<2)); if (!(bmcr & BMCR_RESET)) break; udelay(10); } if (i==NATSEMI_HW_TIMEOUT && netif_msg_link(np)) { printk(KERN_INFO "%s: phy reset did not complete in %d usec.\n", dev->name, i*10); } /* 3) reinit the phy fixup, it got lost during power down. */ init_phy_fixup(dev); return 1; } /* Scan for a PHY on the external mii bus. * There are two tricky points: * - Do not scan while the internal phy is enabled. The internal phy will * crash: e.g. reads from the DSPCFG register will return odd values and * the nasty random phy reset code will reset the nic every few seconds. * - The internal phy must be moved around, an external phy could * have the same address as the internal phy. */ static int find_mii(struct net_device *dev) { struct netdev_private *np = netdev_priv(dev); int tmp; int i; int did_switch; /* Switch to external phy */ did_switch = switch_port_external(dev); /* Scan the possible phy addresses: * * PHY address 0 means that the phy is in isolate mode. Not yet * supported due to lack of test hardware. User space should * handle it through ethtool. */ for (i = 1; i <= 31; i++) { move_int_phy(dev, i); tmp = miiport_read(dev, i, MII_BMSR); if (tmp != 0xffff && tmp != 0x0000) { /* found something! */ np->mii = (mdio_read(dev, MII_PHYSID1) << 16) + mdio_read(dev, MII_PHYSID2); if (netif_msg_probe(np)) { printk(KERN_INFO "natsemi %s: found external phy %08x at address %d.\n", pci_name(np->pci_dev), np->mii, i); } break; } } /* And switch back to internal phy: */ if (did_switch) switch_port_internal(dev); return i; } /* CFG bits [13:16] [18:23] */ #define CFG_RESET_SAVE 0xfde000 /* WCSR bits [0:4] [9:10] */ #define WCSR_RESET_SAVE 0x61f /* RFCR bits [20] [22] [27:31] */ #define RFCR_RESET_SAVE 0xf8500000 static void natsemi_reset(struct net_device *dev) { int i; u32 cfg; u32 wcsr; u32 rfcr; u16 pmatch[3]; u16 sopass[3]; struct netdev_private *np = netdev_priv(dev); void __iomem *ioaddr = ns_ioaddr(dev); /* * Resetting the chip causes some registers to be lost. * Natsemi suggests NOT reloading the EEPROM while live, so instead * we save the state that would have been loaded from EEPROM * on a normal power-up (see the spec EEPROM map). This assumes * whoever calls this will follow up with init_registers() eventually. */ /* CFG */ cfg = readl(ioaddr + ChipConfig) & CFG_RESET_SAVE; /* WCSR */ wcsr = readl(ioaddr + WOLCmd) & WCSR_RESET_SAVE; /* RFCR */ rfcr = readl(ioaddr + RxFilterAddr) & RFCR_RESET_SAVE; /* PMATCH */ for (i = 0; i < 3; i++) { writel(i*2, ioaddr + RxFilterAddr); pmatch[i] = readw(ioaddr + RxFilterData); } /* SOPAS */ for (i = 0; i < 3; i++) { writel(0xa+(i*2), ioaddr + RxFilterAddr); sopass[i] = readw(ioaddr + RxFilterData); } /* now whack the chip */ writel(ChipReset, ioaddr + ChipCmd); for (i=0;i<NATSEMI_HW_TIMEOUT;i++) { if (!(readl(ioaddr + ChipCmd) & ChipReset)) break; udelay(5); } if (i==NATSEMI_HW_TIMEOUT) { printk(KERN_WARNING "%s: reset did not complete in %d usec.\n", dev->name, i*5); } else if (netif_msg_hw(np)) { printk(KERN_DEBUG "%s: reset completed in %d usec.\n", dev->name, i*5); } /* restore CFG */ cfg |= readl(ioaddr + ChipConfig) & ~CFG_RESET_SAVE; /* turn on external phy if it was selected */ if (dev->if_port == PORT_TP) cfg &= ~(CfgExtPhy | CfgPhyDis); else cfg |= (CfgExtPhy | CfgPhyDis); writel(cfg, ioaddr + ChipConfig); /* restore WCSR */ wcsr |= readl(ioaddr + WOLCmd) & ~WCSR_RESET_SAVE; writel(wcsr, ioaddr + WOLCmd); /* read RFCR */ rfcr |= readl(ioaddr + RxFilterAddr) & ~RFCR_RESET_SAVE; /* restore PMATCH */ for (i = 0; i < 3; i++) { writel(i*2, ioaddr + RxFilterAddr); writew(pmatch[i], ioaddr + RxFilterData); } for (i = 0; i < 3; i++) { writel(0xa+(i*2), ioaddr + RxFilterAddr); writew(sopass[i], ioaddr + RxFilterData); } /* restore RFCR */ writel(rfcr, ioaddr + RxFilterAddr); } static void reset_rx(struct net_device *dev) { int i; struct netdev_private *np = netdev_priv(dev); void __iomem *ioaddr = ns_ioaddr(dev); np->intr_status &= ~RxResetDone; writel(RxReset, ioaddr + ChipCmd); for (i=0;i<NATSEMI_HW_TIMEOUT;i++) { np->intr_status |= readl(ioaddr + IntrStatus); if (np->intr_status & RxResetDone) break; udelay(15); } if (i==NATSEMI_HW_TIMEOUT) { printk(KERN_WARNING "%s: RX reset did not complete in %d usec.\n", dev->name, i*15); } else if (netif_msg_hw(np)) { printk(KERN_WARNING "%s: RX reset took %d usec.\n", dev->name, i*15); } } static void natsemi_reload_eeprom(struct net_device *dev) { struct netdev_private *np = netdev_priv(dev); void __iomem *ioaddr = ns_ioaddr(dev); int i; writel(EepromReload, ioaddr + PCIBusCfg); for (i=0;i<NATSEMI_HW_TIMEOUT;i++) { udelay(50); if (!(readl(ioaddr + PCIBusCfg) & EepromReload)) break; } if (i==NATSEMI_HW_TIMEOUT) { printk(KERN_WARNING "natsemi %s: EEPROM did not reload in %d usec.\n", pci_name(np->pci_dev), i*50); } else if (netif_msg_hw(np)) { printk(KERN_DEBUG "natsemi %s: EEPROM reloaded in %d usec.\n", pci_name(np->pci_dev), i*50); } } static void natsemi_stop_rxtx(struct net_device *dev) { void __iomem * ioaddr = ns_ioaddr(dev); struct netdev_private *np = netdev_priv(dev); int i; writel(RxOff | TxOff, ioaddr + ChipCmd); for(i=0;i< NATSEMI_HW_TIMEOUT;i++) { if ((readl(ioaddr + ChipCmd) & (TxOn|RxOn)) == 0) break; udelay(5); } if (i==NATSEMI_HW_TIMEOUT) { printk(KERN_WARNING "%s: Tx/Rx process did not stop in %d usec.\n", dev->name, i*5); } else if (netif_msg_hw(np)) { printk(KERN_DEBUG "%s: Tx/Rx process stopped in %d usec.\n", dev->name, i*5); } } static int netdev_open(struct net_device *dev) { struct netdev_private *np = netdev_priv(dev); void __iomem * ioaddr = ns_ioaddr(dev); const int irq = np->pci_dev->irq; int i; /* Reset the chip, just in case. */ natsemi_reset(dev); i = request_irq(irq, intr_handler, IRQF_SHARED, dev->name, dev); if (i) return i; if (netif_msg_ifup(np)) printk(KERN_DEBUG "%s: netdev_open() irq %d.\n", dev->name, irq); i = alloc_ring(dev); if (i < 0) { free_irq(irq, dev); return i; } napi_enable(&np->napi); init_ring(dev); spin_lock_irq(&np->lock); init_registers(dev); /* now set the MAC address according to dev->dev_addr */ for (i = 0; i < 3; i++) { u16 mac = (dev->dev_addr[2*i+1]<<8) + dev->dev_addr[2*i]; writel(i*2, ioaddr + RxFilterAddr); writew(mac, ioaddr + RxFilterData); } writel(np->cur_rx_mode, ioaddr + RxFilterAddr); spin_unlock_irq(&np->lock); netif_start_queue(dev); if (netif_msg_ifup(np)) printk(KERN_DEBUG "%s: Done netdev_open(), status: %#08x.\n", dev->name, (int)readl(ioaddr + ChipCmd)); /* Set the timer to check for link beat. */ timer_setup(&np->timer, netdev_timer, 0); np->timer.expires = round_jiffies(jiffies + NATSEMI_TIMER_FREQ); add_timer(&np->timer); return 0; } static void do_cable_magic(struct net_device *dev) { struct netdev_private *np = netdev_priv(dev); void __iomem *ioaddr = ns_ioaddr(dev); if (dev->if_port != PORT_TP) return; if (np->srr >= SRR_DP83816_A5) return; /* * 100 MBit links with short cables can trip an issue with the chip. * The problem manifests as lots of CRC errors and/or flickering * activity LED while idle. This process is based on instructions * from engineers at National. */ if (readl(ioaddr + ChipConfig) & CfgSpeed100) { u16 data; writew(1, ioaddr + PGSEL); /* * coefficient visibility should already be enabled via * DSPCFG | 0x1000 */ data = readw(ioaddr + TSTDAT) & 0xff; /* * the value must be negative, and within certain values * (these values all come from National) */ if (!(data & 0x80) || ((data >= 0xd8) && (data <= 0xff))) { np = netdev_priv(dev); /* the bug has been triggered - fix the coefficient */ writew(TSTDAT_FIXED, ioaddr + TSTDAT); /* lock the value */ data = readw(ioaddr + DSPCFG); np->dspcfg = data | DSPCFG_LOCK; writew(np->dspcfg, ioaddr + DSPCFG); } writew(0, ioaddr + PGSEL); } } static void undo_cable_magic(struct net_device *dev) { u16 data; struct netdev_private *np = netdev_priv(dev); void __iomem * ioaddr = ns_ioaddr(dev); if (dev->if_port != PORT_TP) return; if (np->srr >= SRR_DP83816_A5) return; writew(1, ioaddr + PGSEL); /* make sure the lock bit is clear */ data = readw(ioaddr + DSPCFG); np->dspcfg = data & ~DSPCFG_LOCK; writew(np->dspcfg, ioaddr + DSPCFG); writew(0, ioaddr + PGSEL); } static void check_link(struct net_device *dev) { struct netdev_private *np = netdev_priv(dev); void __iomem * ioaddr = ns_ioaddr(dev); int duplex = np->duplex; u16 bmsr; /* If we are ignoring the PHY then don't try reading it. */ if (np->ignore_phy) goto propagate_state; /* The link status field is latched: it remains low after a temporary * link failure until it's read. We need the current link status, * thus read twice. */ mdio_read(dev, MII_BMSR); bmsr = mdio_read(dev, MII_BMSR); if (!(bmsr & BMSR_LSTATUS)) { if (netif_carrier_ok(dev)) { if (netif_msg_link(np)) printk(KERN_NOTICE "%s: link down.\n", dev->name); netif_carrier_off(dev); undo_cable_magic(dev); } return; } if (!netif_carrier_ok(dev)) { if (netif_msg_link(np)) printk(KERN_NOTICE "%s: link up.\n", dev->name); netif_carrier_on(dev); do_cable_magic(dev); } duplex = np->full_duplex; if (!duplex) { if (bmsr & BMSR_ANEGCOMPLETE) { int tmp = mii_nway_result( np->advertising & mdio_read(dev, MII_LPA)); if (tmp == LPA_100FULL || tmp == LPA_10FULL) duplex = 1; } else if (mdio_read(dev, MII_BMCR) & BMCR_FULLDPLX) duplex = 1; } propagate_state: /* if duplex is set then bit 28 must be set, too */ if (duplex ^ !!(np->rx_config & RxAcceptTx)) { if (netif_msg_link(np)) printk(KERN_INFO "%s: Setting %s-duplex based on negotiated " "link capability.\n", dev->name, duplex ? "full" : "half"); if (duplex) { np->rx_config |= RxAcceptTx; np->tx_config |= TxCarrierIgn | TxHeartIgn; } else { np->rx_config &= ~RxAcceptTx; np->tx_config &= ~(TxCarrierIgn | TxHeartIgn); } writel(np->tx_config, ioaddr + TxConfig); writel(np->rx_config, ioaddr + RxConfig); } } static void init_registers(struct net_device *dev) { struct netdev_private *np = netdev_priv(dev); void __iomem * ioaddr = ns_ioaddr(dev); init_phy_fixup(dev); /* clear any interrupts that are pending, such as wake events */ readl(ioaddr + IntrStatus); writel(np->ring_dma, ioaddr + RxRingPtr); writel(np->ring_dma + RX_RING_SIZE * sizeof(struct netdev_desc), ioaddr + TxRingPtr); /* Initialize other registers. * Configure the PCI bus bursts and FIFO thresholds. * Configure for standard, in-spec Ethernet. * Start with half-duplex. check_link will update * to the correct settings. */ /* DRTH: 2: start tx if 64 bytes are in the fifo * FLTH: 0x10: refill with next packet if 512 bytes are free * MXDMA: 0: up to 256 byte bursts. * MXDMA must be <= FLTH * ECRETRY=1 * ATP=1 */ np->tx_config = TxAutoPad | TxCollRetry | TxMxdma_256 | TX_FLTH_VAL | TX_DRTH_VAL_START; writel(np->tx_config, ioaddr + TxConfig); /* DRTH 0x10: start copying to memory if 128 bytes are in the fifo * MXDMA 0: up to 256 byte bursts */ np->rx_config = RxMxdma_256 | RX_DRTH_VAL; /* if receive ring now has bigger buffers than normal, enable jumbo */ if (np->rx_buf_sz > NATSEMI_LONGPKT) np->rx_config |= RxAcceptLong; writel(np->rx_config, ioaddr + RxConfig); /* Disable PME: * The PME bit is initialized from the EEPROM contents. * PCI cards probably have PME disabled, but motherboard * implementations may have PME set to enable WakeOnLan. * With PME set the chip will scan incoming packets but * nothing will be written to memory. */ np->SavedClkRun = readl(ioaddr + ClkRun); writel(np->SavedClkRun & ~PMEEnable, ioaddr + ClkRun); if (np->SavedClkRun & PMEStatus && netif_msg_wol(np)) { printk(KERN_NOTICE "%s: Wake-up event %#08x\n", dev->name, readl(ioaddr + WOLCmd)); } check_link(dev); __set_rx_mode(dev); /* Enable interrupts by setting the interrupt mask. */ writel(DEFAULT_INTR, ioaddr + IntrMask); natsemi_irq_enable(dev); writel(RxOn | TxOn, ioaddr + ChipCmd); writel(StatsClear, ioaddr + StatsCtrl); /* Clear Stats */ } /* * netdev_timer: * Purpose: * 1) check for link changes. Usually they are handled by the MII interrupt * but it doesn't hurt to check twice. * 2) check for sudden death of the NIC: * It seems that a reference set for this chip went out with incorrect info, * and there exist boards that aren't quite right. An unexpected voltage * drop can cause the PHY to get itself in a weird state (basically reset). * NOTE: this only seems to affect revC chips. The user can disable * this check via dspcfg_workaround sysfs option. * 3) check of death of the RX path due to OOM */ static void netdev_timer(struct timer_list *t) { struct netdev_private *np = from_timer(np, t, timer); struct net_device *dev = np->dev; void __iomem * ioaddr = ns_ioaddr(dev); int next_tick = NATSEMI_TIMER_FREQ; const int irq = np->pci_dev->irq; if (netif_msg_timer(np)) { /* DO NOT read the IntrStatus register, * a read clears any pending interrupts. */ printk(KERN_DEBUG "%s: Media selection timer tick.\n", dev->name); } if (dev->if_port == PORT_TP) { u16 dspcfg; spin_lock_irq(&np->lock); /* check for a nasty random phy-reset - use dspcfg as a flag */ writew(1, ioaddr+PGSEL); dspcfg = readw(ioaddr+DSPCFG); writew(0, ioaddr+PGSEL); if (np->dspcfg_workaround && dspcfg != np->dspcfg) { if (!netif_queue_stopped(dev)) { spin_unlock_irq(&np->lock); if (netif_msg_drv(np)) printk(KERN_NOTICE "%s: possible phy reset: " "re-initializing\n", dev->name); disable_irq(irq); spin_lock_irq(&np->lock); natsemi_stop_rxtx(dev); dump_ring(dev); reinit_ring(dev); init_registers(dev); spin_unlock_irq(&np->lock); enable_irq(irq); } else { /* hurry back */ next_tick = HZ; spin_unlock_irq(&np->lock); } } else { /* init_registers() calls check_link() for the above case */ check_link(dev); spin_unlock_irq(&np->lock); } } else { spin_lock_irq(&np->lock); check_link(dev); spin_unlock_irq(&np->lock); } if (np->oom) { disable_irq(irq); np->oom = 0; refill_rx(dev); enable_irq(irq); if (!np->oom) { writel(RxOn, ioaddr + ChipCmd); } else { next_tick = 1; } } if (next_tick > 1) mod_timer(&np->timer, round_jiffies(jiffies + next_tick)); else mod_timer(&np->timer, jiffies + next_tick); } static void dump_ring(struct net_device *dev) { struct netdev_private *np = netdev_priv(dev); if (netif_msg_pktdata(np)) { int i; printk(KERN_DEBUG " Tx ring at %p:\n", np->tx_ring); for (i = 0; i < TX_RING_SIZE; i++) { printk(KERN_DEBUG " #%d desc. %#08x %#08x %#08x.\n", i, np->tx_ring[i].next_desc, np->tx_ring[i].cmd_status, np->tx_ring[i].addr); } printk(KERN_DEBUG " Rx ring %p:\n", np->rx_ring); for (i = 0; i < RX_RING_SIZE; i++) { printk(KERN_DEBUG " #%d desc. %#08x %#08x %#08x.\n", i, np->rx_ring[i].next_desc, np->rx_ring[i].cmd_status, np->rx_ring[i].addr); } } } static void ns_tx_timeout(struct net_device *dev, unsigned int txqueue) { struct netdev_private *np = netdev_priv(dev); void __iomem * ioaddr = ns_ioaddr(dev); const int irq = np->pci_dev->irq; disable_irq(irq); spin_lock_irq(&np->lock); if (!np->hands_off) { if (netif_msg_tx_err(np)) printk(KERN_WARNING "%s: Transmit timed out, status %#08x," " resetting...\n", dev->name, readl(ioaddr + IntrStatus)); dump_ring(dev); natsemi_reset(dev); reinit_ring(dev); init_registers(dev); } else { printk(KERN_WARNING "%s: tx_timeout while in hands_off state?\n", dev->name); } spin_unlock_irq(&np->lock); enable_irq(irq); netif_trans_update(dev); /* prevent tx timeout */ dev->stats.tx_errors++; netif_wake_queue(dev); } static int alloc_ring(struct net_device *dev) { struct netdev_private *np = netdev_priv(dev); np->rx_ring = dma_alloc_coherent(&np->pci_dev->dev, sizeof(struct netdev_desc) * (RX_RING_SIZE + TX_RING_SIZE), &np->ring_dma, GFP_KERNEL); if (!np->rx_ring) return -ENOMEM; np->tx_ring = &np->rx_ring[RX_RING_SIZE]; return 0; } static void refill_rx(struct net_device *dev) { struct netdev_private *np = netdev_priv(dev); /* Refill the Rx ring buffers. */ for (; np->cur_rx - np->dirty_rx > 0; np->dirty_rx++) { struct sk_buff *skb; int entry = np->dirty_rx % RX_RING_SIZE; if (np->rx_skbuff[entry] == NULL) { unsigned int buflen = np->rx_buf_sz+NATSEMI_PADDING; skb = netdev_alloc_skb(dev, buflen); np->rx_skbuff[entry] = skb; if (skb == NULL) break; /* Better luck next round. */ np->rx_dma[entry] = dma_map_single(&np->pci_dev->dev, skb->data, buflen, DMA_FROM_DEVICE); if (dma_mapping_error(&np->pci_dev->dev, np->rx_dma[entry])) { dev_kfree_skb_any(skb); np->rx_skbuff[entry] = NULL; break; /* Better luck next round. */ } np->rx_ring[entry].addr = cpu_to_le32(np->rx_dma[entry]); } np->rx_ring[entry].cmd_status = cpu_to_le32(np->rx_buf_sz); } if (np->cur_rx - np->dirty_rx == RX_RING_SIZE) { if (netif_msg_rx_err(np)) printk(KERN_WARNING "%s: going OOM.\n", dev->name); np->oom = 1; } } static void set_bufsize(struct net_device *dev) { struct netdev_private *np = netdev_priv(dev); if (dev->mtu <= ETH_DATA_LEN) np->rx_buf_sz = ETH_DATA_LEN + NATSEMI_HEADERS; else np->rx_buf_sz = dev->mtu + NATSEMI_HEADERS; } /* Initialize the Rx and Tx rings, along with various 'dev' bits. */ static void init_ring(struct net_device *dev) { struct netdev_private *np = netdev_priv(dev); int i; /* 1) TX ring */ np->dirty_tx = np->cur_tx = 0; for (i = 0; i < TX_RING_SIZE; i++) { np->tx_skbuff[i] = NULL; np->tx_ring[i].next_desc = cpu_to_le32(np->ring_dma +sizeof(struct netdev_desc) *((i+1)%TX_RING_SIZE+RX_RING_SIZE)); np->tx_ring[i].cmd_status = 0; } /* 2) RX ring */ np->dirty_rx = 0; np->cur_rx = RX_RING_SIZE; np->oom = 0; set_bufsize(dev); np->rx_head_desc = &np->rx_ring[0]; /* Please be careful before changing this loop - at least gcc-2.95.1 * miscompiles it otherwise. */ /* Initialize all Rx descriptors. */ for (i = 0; i < RX_RING_SIZE; i++) { np->rx_ring[i].next_desc = cpu_to_le32(np->ring_dma +sizeof(struct netdev_desc) *((i+1)%RX_RING_SIZE)); np->rx_ring[i].cmd_status = cpu_to_le32(DescOwn); np->rx_skbuff[i] = NULL; } refill_rx(dev); dump_ring(dev); } static void drain_tx(struct net_device *dev) { struct netdev_private *np = netdev_priv(dev); int i; for (i = 0; i < TX_RING_SIZE; i++) { if (np->tx_skbuff[i]) { dma_unmap_single(&np->pci_dev->dev, np->tx_dma[i], np->tx_skbuff[i]->len, DMA_TO_DEVICE); dev_kfree_skb(np->tx_skbuff[i]); dev->stats.tx_dropped++; } np->tx_skbuff[i] = NULL; } } static void drain_rx(struct net_device *dev) { struct netdev_private *np = netdev_priv(dev); unsigned int buflen = np->rx_buf_sz; int i; /* Free all the skbuffs in the Rx queue. */ for (i = 0; i < RX_RING_SIZE; i++) { np->rx_ring[i].cmd_status = 0; np->rx_ring[i].addr = cpu_to_le32(0xBADF00D0); /* An invalid address. */ if (np->rx_skbuff[i]) { dma_unmap_single(&np->pci_dev->dev, np->rx_dma[i], buflen + NATSEMI_PADDING, DMA_FROM_DEVICE); dev_kfree_skb(np->rx_skbuff[i]); } np->rx_skbuff[i] = NULL; } } static void drain_ring(struct net_device *dev) { drain_rx(dev); drain_tx(dev); } static void free_ring(struct net_device *dev) { struct netdev_private *np = netdev_priv(dev); dma_free_coherent(&np->pci_dev->dev, sizeof(struct netdev_desc) * (RX_RING_SIZE + TX_RING_SIZE), np->rx_ring, np->ring_dma); } static void reinit_rx(struct net_device *dev) { struct netdev_private *np = netdev_priv(dev); int i; /* RX Ring */ np->dirty_rx = 0; np->cur_rx = RX_RING_SIZE; np->rx_head_desc = &np->rx_ring[0]; /* Initialize all Rx descriptors. */ for (i = 0; i < RX_RING_SIZE; i++) np->rx_ring[i].cmd_status = cpu_to_le32(DescOwn); refill_rx(dev); } static void reinit_ring(struct net_device *dev) { struct netdev_private *np = netdev_priv(dev); int i; /* drain TX ring */ drain_tx(dev); np->dirty_tx = np->cur_tx = 0; for (i=0;i<TX_RING_SIZE;i++) np->tx_ring[i].cmd_status = 0; reinit_rx(dev); } static netdev_tx_t start_tx(struct sk_buff *skb, struct net_device *dev) { struct netdev_private *np = netdev_priv(dev); void __iomem * ioaddr = ns_ioaddr(dev); unsigned entry; unsigned long flags; /* Note: Ordering is important here, set the field with the "ownership" bit last, and only then increment cur_tx. */ /* Calculate the next Tx descriptor entry. */ entry = np->cur_tx % TX_RING_SIZE; np->tx_skbuff[entry] = skb; np->tx_dma[entry] = dma_map_single(&np->pci_dev->dev, skb->data, skb->len, DMA_TO_DEVICE); if (dma_mapping_error(&np->pci_dev->dev, np->tx_dma[entry])) { np->tx_skbuff[entry] = NULL; dev_kfree_skb_irq(skb); dev->stats.tx_dropped++; return NETDEV_TX_OK; } np->tx_ring[entry].addr = cpu_to_le32(np->tx_dma[entry]); spin_lock_irqsave(&np->lock, flags); if (!np->hands_off) { np->tx_ring[entry].cmd_status = cpu_to_le32(DescOwn | skb->len); /* StrongARM: Explicitly cache flush np->tx_ring and * skb->data,skb->len. */ wmb(); np->cur_tx++; if (np->cur_tx - np->dirty_tx >= TX_QUEUE_LEN - 1) { netdev_tx_done(dev); if (np->cur_tx - np->dirty_tx >= TX_QUEUE_LEN - 1) netif_stop_queue(dev); } /* Wake the potentially-idle transmit channel. */ writel(TxOn, ioaddr + ChipCmd); } else { dev_kfree_skb_irq(skb); dev->stats.tx_dropped++; } spin_unlock_irqrestore(&np->lock, flags); if (netif_msg_tx_queued(np)) { printk(KERN_DEBUG "%s: Transmit frame #%d queued in slot %d.\n", dev->name, np->cur_tx, entry); } return NETDEV_TX_OK; } static void netdev_tx_done(struct net_device *dev) { struct netdev_private *np = netdev_priv(dev); for (; np->cur_tx - np->dirty_tx > 0; np->dirty_tx++) { int entry = np->dirty_tx % TX_RING_SIZE; if (np->tx_ring[entry].cmd_status & cpu_to_le32(DescOwn)) break; if (netif_msg_tx_done(np)) printk(KERN_DEBUG "%s: tx frame #%d finished, status %#08x.\n", dev->name, np->dirty_tx, le32_to_cpu(np->tx_ring[entry].cmd_status)); if (np->tx_ring[entry].cmd_status & cpu_to_le32(DescPktOK)) { dev->stats.tx_packets++; dev->stats.tx_bytes += np->tx_skbuff[entry]->len; } else { /* Various Tx errors */ int tx_status = le32_to_cpu(np->tx_ring[entry].cmd_status); if (tx_status & (DescTxAbort|DescTxExcColl)) dev->stats.tx_aborted_errors++; if (tx_status & DescTxFIFO) dev->stats.tx_fifo_errors++; if (tx_status & DescTxCarrier) dev->stats.tx_carrier_errors++; if (tx_status & DescTxOOWCol) dev->stats.tx_window_errors++; dev->stats.tx_errors++; } dma_unmap_single(&np->pci_dev->dev, np->tx_dma[entry], np->tx_skbuff[entry]->len, DMA_TO_DEVICE); /* Free the original skb. */ dev_consume_skb_irq(np->tx_skbuff[entry]); np->tx_skbuff[entry] = NULL; } if (netif_queue_stopped(dev) && np->cur_tx - np->dirty_tx < TX_QUEUE_LEN - 4) { /* The ring is no longer full, wake queue. */ netif_wake_queue(dev); } } /* The interrupt handler doesn't actually handle interrupts itself, it * schedules a NAPI poll if there is anything to do. */ static irqreturn_t intr_handler(int irq, void *dev_instance) { struct net_device *dev = dev_instance; struct netdev_private *np = netdev_priv(dev); void __iomem * ioaddr = ns_ioaddr(dev); /* Reading IntrStatus automatically acknowledges so don't do * that while interrupts are disabled, (for example, while a * poll is scheduled). */ if (np->hands_off || !readl(ioaddr + IntrEnable)) return IRQ_NONE; np->intr_status = readl(ioaddr + IntrStatus); if (!np->intr_status) return IRQ_NONE; if (netif_msg_intr(np)) printk(KERN_DEBUG "%s: Interrupt, status %#08x, mask %#08x.\n", dev->name, np->intr_status, readl(ioaddr + IntrMask)); prefetch(&np->rx_skbuff[np->cur_rx % RX_RING_SIZE]); if (napi_schedule_prep(&np->napi)) { /* Disable interrupts and register for poll */ natsemi_irq_disable(dev); __napi_schedule(&np->napi); } else printk(KERN_WARNING "%s: Ignoring interrupt, status %#08x, mask %#08x.\n", dev->name, np->intr_status, readl(ioaddr + IntrMask)); return IRQ_HANDLED; } /* This is the NAPI poll routine. As well as the standard RX handling * it also handles all other interrupts that the chip might raise. */ static int natsemi_poll(struct napi_struct *napi, int budget) { struct netdev_private *np = container_of(napi, struct netdev_private, napi); struct net_device *dev = np->dev; void __iomem * ioaddr = ns_ioaddr(dev); int work_done = 0; do { if (netif_msg_intr(np)) printk(KERN_DEBUG "%s: Poll, status %#08x, mask %#08x.\n", dev->name, np->intr_status, readl(ioaddr + IntrMask)); /* netdev_rx() may read IntrStatus again if the RX state * machine falls over so do it first. */ if (np->intr_status & (IntrRxDone | IntrRxIntr | RxStatusFIFOOver | IntrRxErr | IntrRxOverrun)) { netdev_rx(dev, &work_done, budget); } if (np->intr_status & (IntrTxDone | IntrTxIntr | IntrTxIdle | IntrTxErr)) { spin_lock(&np->lock); netdev_tx_done(dev); spin_unlock(&np->lock); } /* Abnormal error summary/uncommon events handlers. */ if (np->intr_status & IntrAbnormalSummary) netdev_error(dev, np->intr_status); if (work_done >= budget) return work_done; np->intr_status = readl(ioaddr + IntrStatus); } while (np->intr_status); napi_complete_done(napi, work_done); /* Reenable interrupts providing nothing is trying to shut * the chip down. */ spin_lock(&np->lock); if (!np->hands_off) natsemi_irq_enable(dev); spin_unlock(&np->lock); return work_done; } /* This routine is logically part of the interrupt handler, but separated for clarity and better register allocation. */ static void netdev_rx(struct net_device *dev, int *work_done, int work_to_do) { struct netdev_private *np = netdev_priv(dev); int entry = np->cur_rx % RX_RING_SIZE; int boguscnt = np->dirty_rx + RX_RING_SIZE - np->cur_rx; s32 desc_status = le32_to_cpu(np->rx_head_desc->cmd_status); unsigned int buflen = np->rx_buf_sz; void __iomem * ioaddr = ns_ioaddr(dev); /* If the driver owns the next entry it's a new packet. Send it up. */ while (desc_status < 0) { /* e.g. & DescOwn */ int pkt_len; if (netif_msg_rx_status(np)) printk(KERN_DEBUG " netdev_rx() entry %d status was %#08x.\n", entry, desc_status); if (--boguscnt < 0) break; if (*work_done >= work_to_do) break; (*work_done)++; pkt_len = (desc_status & DescSizeMask) - 4; if ((desc_status&(DescMore|DescPktOK|DescRxLong)) != DescPktOK){ if (desc_status & DescMore) { unsigned long flags; if (netif_msg_rx_err(np)) printk(KERN_WARNING "%s: Oversized(?) Ethernet " "frame spanned multiple " "buffers, entry %#08x " "status %#08x.\n", dev->name, np->cur_rx, desc_status); dev->stats.rx_length_errors++; /* The RX state machine has probably * locked up beneath us. Follow the * reset procedure documented in * AN-1287. */ spin_lock_irqsave(&np->lock, flags); reset_rx(dev); reinit_rx(dev); writel(np->ring_dma, ioaddr + RxRingPtr); check_link(dev); spin_unlock_irqrestore(&np->lock, flags); /* We'll enable RX on exit from this * function. */ break; } else { /* There was an error. */ dev->stats.rx_errors++; if (desc_status & (DescRxAbort|DescRxOver)) dev->stats.rx_over_errors++; if (desc_status & (DescRxLong|DescRxRunt)) dev->stats.rx_length_errors++; if (desc_status & (DescRxInvalid|DescRxAlign)) dev->stats.rx_frame_errors++; if (desc_status & DescRxCRC) dev->stats.rx_crc_errors++; } } else if (pkt_len > np->rx_buf_sz) { /* if this is the tail of a double buffer * packet, we've already counted the error * on the first part. Ignore the second half. */ } else { struct sk_buff *skb; /* Omit CRC size. */ /* Check if the packet is long enough to accept * without copying to a minimally-sized skbuff. */ if (pkt_len < rx_copybreak && (skb = netdev_alloc_skb(dev, pkt_len + RX_OFFSET)) != NULL) { /* 16 byte align the IP header */ skb_reserve(skb, RX_OFFSET); dma_sync_single_for_cpu(&np->pci_dev->dev, np->rx_dma[entry], buflen, DMA_FROM_DEVICE); skb_copy_to_linear_data(skb, np->rx_skbuff[entry]->data, pkt_len); skb_put(skb, pkt_len); dma_sync_single_for_device(&np->pci_dev->dev, np->rx_dma[entry], buflen, DMA_FROM_DEVICE); } else { dma_unmap_single(&np->pci_dev->dev, np->rx_dma[entry], buflen + NATSEMI_PADDING, DMA_FROM_DEVICE); skb_put(skb = np->rx_skbuff[entry], pkt_len); np->rx_skbuff[entry] = NULL; } skb->protocol = eth_type_trans(skb, dev); netif_receive_skb(skb); dev->stats.rx_packets++; dev->stats.rx_bytes += pkt_len; } entry = (++np->cur_rx) % RX_RING_SIZE; np->rx_head_desc = &np->rx_ring[entry]; desc_status = le32_to_cpu(np->rx_head_desc->cmd_status); } refill_rx(dev); /* Restart Rx engine if stopped. */ if (np->oom) mod_timer(&np->timer, jiffies + 1); else writel(RxOn, ioaddr + ChipCmd); } static void netdev_error(struct net_device *dev, int intr_status) { struct netdev_private *np = netdev_priv(dev); void __iomem * ioaddr = ns_ioaddr(dev); spin_lock(&np->lock); if (intr_status & LinkChange) { u16 lpa = mdio_read(dev, MII_LPA); if (mdio_read(dev, MII_BMCR) & BMCR_ANENABLE && netif_msg_link(np)) { printk(KERN_INFO "%s: Autonegotiation advertising" " %#04x partner %#04x.\n", dev->name, np->advertising, lpa); } /* read MII int status to clear the flag */ readw(ioaddr + MIntrStatus); check_link(dev); } if (intr_status & StatsMax) { __get_stats(dev); } if (intr_status & IntrTxUnderrun) { if ((np->tx_config & TxDrthMask) < TX_DRTH_VAL_LIMIT) { np->tx_config += TX_DRTH_VAL_INC; if (netif_msg_tx_err(np)) printk(KERN_NOTICE "%s: increased tx threshold, txcfg %#08x.\n", dev->name, np->tx_config); } else { if (netif_msg_tx_err(np)) printk(KERN_NOTICE "%s: tx underrun with maximum tx threshold, txcfg %#08x.\n", dev->name, np->tx_config); } writel(np->tx_config, ioaddr + TxConfig); } if (intr_status & WOLPkt && netif_msg_wol(np)) { int wol_status = readl(ioaddr + WOLCmd); printk(KERN_NOTICE "%s: Link wake-up event %#08x\n", dev->name, wol_status); } if (intr_status & RxStatusFIFOOver) { if (netif_msg_rx_err(np) && netif_msg_intr(np)) { printk(KERN_NOTICE "%s: Rx status FIFO overrun\n", dev->name); } dev->stats.rx_fifo_errors++; dev->stats.rx_errors++; } /* Hmmmmm, it's not clear how to recover from PCI faults. */ if (intr_status & IntrPCIErr) { printk(KERN_NOTICE "%s: PCI error %#08x\n", dev->name, intr_status & IntrPCIErr); dev->stats.tx_fifo_errors++; dev->stats.tx_errors++; dev->stats.rx_fifo_errors++; dev->stats.rx_errors++; } spin_unlock(&np->lock); } static void __get_stats(struct net_device *dev) { void __iomem * ioaddr = ns_ioaddr(dev); /* The chip only need report frame silently dropped. */ dev->stats.rx_crc_errors += readl(ioaddr + RxCRCErrs); dev->stats.rx_missed_errors += readl(ioaddr + RxMissed); } static struct net_device_stats *get_stats(struct net_device *dev) { struct netdev_private *np = netdev_priv(dev); /* The chip only need report frame silently dropped. */ spin_lock_irq(&np->lock); if (netif_running(dev) && !np->hands_off) __get_stats(dev); spin_unlock_irq(&np->lock); return &dev->stats; } #ifdef CONFIG_NET_POLL_CONTROLLER static void natsemi_poll_controller(struct net_device *dev) { struct netdev_private *np = netdev_priv(dev); const int irq = np->pci_dev->irq; disable_irq(irq); intr_handler(irq, dev); enable_irq(irq); } #endif #define HASH_TABLE 0x200 static void __set_rx_mode(struct net_device *dev) { void __iomem * ioaddr = ns_ioaddr(dev); struct netdev_private *np = netdev_priv(dev); u8 mc_filter[64]; /* Multicast hash filter */ u32 rx_mode; if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */ rx_mode = RxFilterEnable | AcceptBroadcast | AcceptAllMulticast | AcceptAllPhys | AcceptMyPhys; } else if ((netdev_mc_count(dev) > multicast_filter_limit) || (dev->flags & IFF_ALLMULTI)) { rx_mode = RxFilterEnable | AcceptBroadcast | AcceptAllMulticast | AcceptMyPhys; } else { struct netdev_hw_addr *ha; int i; memset(mc_filter, 0, sizeof(mc_filter)); netdev_for_each_mc_addr(ha, dev) { int b = (ether_crc(ETH_ALEN, ha->addr) >> 23) & 0x1ff; mc_filter[b/8] |= (1 << (b & 0x07)); } rx_mode = RxFilterEnable | AcceptBroadcast | AcceptMulticast | AcceptMyPhys; for (i = 0; i < 64; i += 2) { writel(HASH_TABLE + i, ioaddr + RxFilterAddr); writel((mc_filter[i + 1] << 8) + mc_filter[i], ioaddr + RxFilterData); } } writel(rx_mode, ioaddr + RxFilterAddr); np->cur_rx_mode = rx_mode; } static int natsemi_change_mtu(struct net_device *dev, int new_mtu) { dev->mtu = new_mtu; /* synchronized against open : rtnl_lock() held by caller */ if (netif_running(dev)) { struct netdev_private *np = netdev_priv(dev); void __iomem * ioaddr = ns_ioaddr(dev); const int irq = np->pci_dev->irq; disable_irq(irq); spin_lock(&np->lock); /* stop engines */ natsemi_stop_rxtx(dev); /* drain rx queue */ drain_rx(dev); /* change buffers */ set_bufsize(dev); reinit_rx(dev); writel(np->ring_dma, ioaddr + RxRingPtr); /* restart engines */ writel(RxOn | TxOn, ioaddr + ChipCmd); spin_unlock(&np->lock); enable_irq(irq); } return 0; } static void set_rx_mode(struct net_device *dev) { struct netdev_private *np = netdev_priv(dev); spin_lock_irq(&np->lock); if (!np->hands_off) __set_rx_mode(dev); spin_unlock_irq(&np->lock); } static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) { struct netdev_private *np = netdev_priv(dev); strscpy(info->driver, DRV_NAME, sizeof(info->driver)); strscpy(info->version, DRV_VERSION, sizeof(info->version)); strscpy(info->bus_info, pci_name(np->pci_dev), sizeof(info->bus_info)); } static int get_regs_len(struct net_device *dev) { return NATSEMI_REGS_SIZE; } static int get_eeprom_len(struct net_device *dev) { struct netdev_private *np = netdev_priv(dev); return np->eeprom_size; } static int get_link_ksettings(struct net_device *dev, struct ethtool_link_ksettings *ecmd) { struct netdev_private *np = netdev_priv(dev); spin_lock_irq(&np->lock); netdev_get_ecmd(dev, ecmd); spin_unlock_irq(&np->lock); return 0; } static int set_link_ksettings(struct net_device *dev, const struct ethtool_link_ksettings *ecmd) { struct netdev_private *np = netdev_priv(dev); int res; spin_lock_irq(&np->lock); res = netdev_set_ecmd(dev, ecmd); spin_unlock_irq(&np->lock); return res; } static void get_wol(struct net_device *dev, struct ethtool_wolinfo *wol) { struct netdev_private *np = netdev_priv(dev); spin_lock_irq(&np->lock); netdev_get_wol(dev, &wol->supported, &wol->wolopts); netdev_get_sopass(dev, wol->sopass); spin_unlock_irq(&np->lock); } static int set_wol(struct net_device *dev, struct ethtool_wolinfo *wol) { struct netdev_private *np = netdev_priv(dev); int res; spin_lock_irq(&np->lock); netdev_set_wol(dev, wol->wolopts); res = netdev_set_sopass(dev, wol->sopass); spin_unlock_irq(&np->lock); return res; } static void get_regs(struct net_device *dev, struct ethtool_regs *regs, void *buf) { struct netdev_private *np = netdev_priv(dev); regs->version = NATSEMI_REGS_VER; spin_lock_irq(&np->lock); netdev_get_regs(dev, buf); spin_unlock_irq(&np->lock); } static u32 get_msglevel(struct net_device *dev) { struct netdev_private *np = netdev_priv(dev); return np->msg_enable; } static void set_msglevel(struct net_device *dev, u32 val) { struct netdev_private *np = netdev_priv(dev); np->msg_enable = val; } static int nway_reset(struct net_device *dev) { int tmp; int r = -EINVAL; /* if autoneg is off, it's an error */ tmp = mdio_read(dev, MII_BMCR); if (tmp & BMCR_ANENABLE) { tmp |= (BMCR_ANRESTART); mdio_write(dev, MII_BMCR, tmp); r = 0; } return r; } static u32 get_link(struct net_device *dev) { /* LSTATUS is latched low until a read - so read twice */ mdio_read(dev, MII_BMSR); return (mdio_read(dev, MII_BMSR)&BMSR_LSTATUS) ? 1:0; } static int get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data) { struct netdev_private *np = netdev_priv(dev); u8 *eebuf; int res; eebuf = kmalloc(np->eeprom_size, GFP_KERNEL); if (!eebuf) return -ENOMEM; eeprom->magic = PCI_VENDOR_ID_NS | (PCI_DEVICE_ID_NS_83815<<16); spin_lock_irq(&np->lock); res = netdev_get_eeprom(dev, eebuf); spin_unlock_irq(&np->lock); if (!res) memcpy(data, eebuf+eeprom->offset, eeprom->len); kfree(eebuf); return res; } static const struct ethtool_ops ethtool_ops = { .get_drvinfo = get_drvinfo, .get_regs_len = get_regs_len, .get_eeprom_len = get_eeprom_len, .get_wol = get_wol, .set_wol = set_wol, .get_regs = get_regs, .get_msglevel = get_msglevel, .set_msglevel = set_msglevel, .nway_reset = nway_reset, .get_link = get_link, .get_eeprom = get_eeprom, .get_link_ksettings = get_link_ksettings, .set_link_ksettings = set_link_ksettings, }; static int netdev_set_wol(struct net_device *dev, u32 newval) { struct netdev_private *np = netdev_priv(dev); void __iomem * ioaddr = ns_ioaddr(dev); u32 data = readl(ioaddr + WOLCmd) & ~WakeOptsSummary; /* translate to bitmasks this chip understands */ if (newval & WAKE_PHY) data |= WakePhy; if (newval & WAKE_UCAST) data |= WakeUnicast; if (newval & WAKE_MCAST) data |= WakeMulticast; if (newval & WAKE_BCAST) data |= WakeBroadcast; if (newval & WAKE_ARP) data |= WakeArp; if (newval & WAKE_MAGIC) data |= WakeMagic; if (np->srr >= SRR_DP83815_D) { if (newval & WAKE_MAGICSECURE) { data |= WakeMagicSecure; } } writel(data, ioaddr + WOLCmd); return 0; } static int netdev_get_wol(struct net_device *dev, u32 *supported, u32 *cur) { struct netdev_private *np = netdev_priv(dev); void __iomem * ioaddr = ns_ioaddr(dev); u32 regval = readl(ioaddr + WOLCmd); *supported = (WAKE_PHY | WAKE_UCAST | WAKE_MCAST | WAKE_BCAST | WAKE_ARP | WAKE_MAGIC); if (np->srr >= SRR_DP83815_D) { /* SOPASS works on revD and higher */ *supported |= WAKE_MAGICSECURE; } *cur = 0; /* translate from chip bitmasks */ if (regval & WakePhy) *cur |= WAKE_PHY; if (regval & WakeUnicast) *cur |= WAKE_UCAST; if (regval & WakeMulticast) *cur |= WAKE_MCAST; if (regval & WakeBroadcast) *cur |= WAKE_BCAST; if (regval & WakeArp) *cur |= WAKE_ARP; if (regval & WakeMagic) *cur |= WAKE_MAGIC; if (regval & WakeMagicSecure) { /* this can be on in revC, but it's broken */ *cur |= WAKE_MAGICSECURE; } return 0; } static int netdev_set_sopass(struct net_device *dev, u8 *newval) { struct netdev_private *np = netdev_priv(dev); void __iomem * ioaddr = ns_ioaddr(dev); u16 *sval = (u16 *)newval; u32 addr; if (np->srr < SRR_DP83815_D) { return 0; } /* enable writing to these registers by disabling the RX filter */ addr = readl(ioaddr + RxFilterAddr) & ~RFCRAddressMask; addr &= ~RxFilterEnable; writel(addr, ioaddr + RxFilterAddr); /* write the three words to (undocumented) RFCR vals 0xa, 0xc, 0xe */ writel(addr | 0xa, ioaddr + RxFilterAddr); writew(sval[0], ioaddr + RxFilterData); writel(addr | 0xc, ioaddr + RxFilterAddr); writew(sval[1], ioaddr + RxFilterData); writel(addr | 0xe, ioaddr + RxFilterAddr); writew(sval[2], ioaddr + RxFilterData); /* re-enable the RX filter */ writel(addr | RxFilterEnable, ioaddr + RxFilterAddr); return 0; } static int netdev_get_sopass(struct net_device *dev, u8 *data) { struct netdev_private *np = netdev_priv(dev); void __iomem * ioaddr = ns_ioaddr(dev); u16 *sval = (u16 *)data; u32 addr; if (np->srr < SRR_DP83815_D) { sval[0] = sval[1] = sval[2] = 0; return 0; } /* read the three words from (undocumented) RFCR vals 0xa, 0xc, 0xe */ addr = readl(ioaddr + RxFilterAddr) & ~RFCRAddressMask; writel(addr | 0xa, ioaddr + RxFilterAddr); sval[0] = readw(ioaddr + RxFilterData); writel(addr | 0xc, ioaddr + RxFilterAddr); sval[1] = readw(ioaddr + RxFilterData); writel(addr | 0xe, ioaddr + RxFilterAddr); sval[2] = readw(ioaddr + RxFilterData); writel(addr, ioaddr + RxFilterAddr); return 0; } static int netdev_get_ecmd(struct net_device *dev, struct ethtool_link_ksettings *ecmd) { struct netdev_private *np = netdev_priv(dev); u32 supported, advertising; u32 tmp; ecmd->base.port = dev->if_port; ecmd->base.speed = np->speed; ecmd->base.duplex = np->duplex; ecmd->base.autoneg = np->autoneg; advertising = 0; if (np->advertising & ADVERTISE_10HALF) advertising |= ADVERTISED_10baseT_Half; if (np->advertising & ADVERTISE_10FULL) advertising |= ADVERTISED_10baseT_Full; if (np->advertising & ADVERTISE_100HALF) advertising |= ADVERTISED_100baseT_Half; if (np->advertising & ADVERTISE_100FULL) advertising |= ADVERTISED_100baseT_Full; supported = (SUPPORTED_Autoneg | SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full | SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full | SUPPORTED_TP | SUPPORTED_MII | SUPPORTED_FIBRE); ecmd->base.phy_address = np->phy_addr_external; /* * We intentionally report the phy address of the external * phy, even if the internal phy is used. This is necessary * to work around a deficiency of the ethtool interface: * It's only possible to query the settings of the active * port. Therefore * # ethtool -s ethX port mii * actually sends an ioctl to switch to port mii with the * settings that are used for the current active port. * If we would report a different phy address in this * command, then * # ethtool -s ethX port tp;ethtool -s ethX port mii * would unintentionally change the phy address. * * Fortunately the phy address doesn't matter with the * internal phy... */ /* set information based on active port type */ switch (ecmd->base.port) { default: case PORT_TP: advertising |= ADVERTISED_TP; break; case PORT_MII: advertising |= ADVERTISED_MII; break; case PORT_FIBRE: advertising |= ADVERTISED_FIBRE; break; } /* if autonegotiation is on, try to return the active speed/duplex */ if (ecmd->base.autoneg == AUTONEG_ENABLE) { advertising |= ADVERTISED_Autoneg; tmp = mii_nway_result( np->advertising & mdio_read(dev, MII_LPA)); if (tmp == LPA_100FULL || tmp == LPA_100HALF) ecmd->base.speed = SPEED_100; else ecmd->base.speed = SPEED_10; if (tmp == LPA_100FULL || tmp == LPA_10FULL) ecmd->base.duplex = DUPLEX_FULL; else ecmd->base.duplex = DUPLEX_HALF; } /* ignore maxtxpkt, maxrxpkt for now */ ethtool_convert_legacy_u32_to_link_mode(ecmd->link_modes.supported, supported); ethtool_convert_legacy_u32_to_link_mode(ecmd->link_modes.advertising, advertising); return 0; } static int netdev_set_ecmd(struct net_device *dev, const struct ethtool_link_ksettings *ecmd) { struct netdev_private *np = netdev_priv(dev); u32 advertising; ethtool_convert_link_mode_to_legacy_u32(&advertising, ecmd->link_modes.advertising); if (ecmd->base.port != PORT_TP && ecmd->base.port != PORT_MII && ecmd->base.port != PORT_FIBRE) return -EINVAL; if (ecmd->base.autoneg == AUTONEG_ENABLE) { if ((advertising & (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full)) == 0) { return -EINVAL; } } else if (ecmd->base.autoneg == AUTONEG_DISABLE) { u32 speed = ecmd->base.speed; if (speed != SPEED_10 && speed != SPEED_100) return -EINVAL; if (ecmd->base.duplex != DUPLEX_HALF && ecmd->base.duplex != DUPLEX_FULL) return -EINVAL; } else { return -EINVAL; } /* * If we're ignoring the PHY then autoneg and the internal * transceiver are really not going to work so don't let the * user select them. */ if (np->ignore_phy && (ecmd->base.autoneg == AUTONEG_ENABLE || ecmd->base.port == PORT_TP)) return -EINVAL; /* * maxtxpkt, maxrxpkt: ignored for now. * * transceiver: * PORT_TP is always XCVR_INTERNAL, PORT_MII and PORT_FIBRE are always * XCVR_EXTERNAL. The implementation thus ignores ecmd->transceiver and * selects based on ecmd->port. * * Actually PORT_FIBRE is nearly identical to PORT_MII: it's for fibre * phys that are connected to the mii bus. It's used to apply fibre * specific updates. */ /* WHEW! now lets bang some bits */ /* save the parms */ dev->if_port = ecmd->base.port; np->autoneg = ecmd->base.autoneg; np->phy_addr_external = ecmd->base.phy_address & PhyAddrMask; if (np->autoneg == AUTONEG_ENABLE) { /* advertise only what has been requested */ np->advertising &= ~(ADVERTISE_ALL | ADVERTISE_100BASE4); if (advertising & ADVERTISED_10baseT_Half) np->advertising |= ADVERTISE_10HALF; if (advertising & ADVERTISED_10baseT_Full) np->advertising |= ADVERTISE_10FULL; if (advertising & ADVERTISED_100baseT_Half) np->advertising |= ADVERTISE_100HALF; if (advertising & ADVERTISED_100baseT_Full) np->advertising |= ADVERTISE_100FULL; } else { np->speed = ecmd->base.speed; np->duplex = ecmd->base.duplex; /* user overriding the initial full duplex parm? */ if (np->duplex == DUPLEX_HALF) np->full_duplex = 0; } /* get the right phy enabled */ if (ecmd->base.port == PORT_TP) switch_port_internal(dev); else switch_port_external(dev); /* set parms and see how this affected our link status */ init_phy_fixup(dev); check_link(dev); return 0; } static int netdev_get_regs(struct net_device *dev, u8 *buf) { int i; int j; u32 rfcr; u32 *rbuf = (u32 *)buf; void __iomem * ioaddr = ns_ioaddr(dev); /* read non-mii page 0 of registers */ for (i = 0; i < NATSEMI_PG0_NREGS/2; i++) { rbuf[i] = readl(ioaddr + i*4); } /* read current mii registers */ for (i = NATSEMI_PG0_NREGS/2; i < NATSEMI_PG0_NREGS; i++) rbuf[i] = mdio_read(dev, i & 0x1f); /* read only the 'magic' registers from page 1 */ writew(1, ioaddr + PGSEL); rbuf[i++] = readw(ioaddr + PMDCSR); rbuf[i++] = readw(ioaddr + TSTDAT); rbuf[i++] = readw(ioaddr + DSPCFG); rbuf[i++] = readw(ioaddr + SDCFG); writew(0, ioaddr + PGSEL); /* read RFCR indexed registers */ rfcr = readl(ioaddr + RxFilterAddr); for (j = 0; j < NATSEMI_RFDR_NREGS; j++) { writel(j*2, ioaddr + RxFilterAddr); rbuf[i++] = readw(ioaddr + RxFilterData); } writel(rfcr, ioaddr + RxFilterAddr); /* the interrupt status is clear-on-read - see if we missed any */ if (rbuf[4] & rbuf[5]) { printk(KERN_WARNING "%s: shoot, we dropped an interrupt (%#08x)\n", dev->name, rbuf[4] & rbuf[5]); } return 0; } #define SWAP_BITS(x) ( (((x) & 0x0001) << 15) | (((x) & 0x0002) << 13) \ | (((x) & 0x0004) << 11) | (((x) & 0x0008) << 9) \ | (((x) & 0x0010) << 7) | (((x) & 0x0020) << 5) \ | (((x) & 0x0040) << 3) | (((x) & 0x0080) << 1) \ | (((x) & 0x0100) >> 1) | (((x) & 0x0200) >> 3) \ | (((x) & 0x0400) >> 5) | (((x) & 0x0800) >> 7) \ | (((x) & 0x1000) >> 9) | (((x) & 0x2000) >> 11) \ | (((x) & 0x4000) >> 13) | (((x) & 0x8000) >> 15) ) static int netdev_get_eeprom(struct net_device *dev, u8 *buf) { int i; u16 *ebuf = (u16 *)buf; void __iomem * ioaddr = ns_ioaddr(dev); struct netdev_private *np = netdev_priv(dev); /* eeprom_read reads 16 bits, and indexes by 16 bits */ for (i = 0; i < np->eeprom_size/2; i++) { ebuf[i] = eeprom_read(ioaddr, i); /* The EEPROM itself stores data bit-swapped, but eeprom_read * reads it back "sanely". So we swap it back here in order to * present it to userland as it is stored. */ ebuf[i] = SWAP_BITS(ebuf[i]); } return 0; } static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) { struct mii_ioctl_data *data = if_mii(rq); struct netdev_private *np = netdev_priv(dev); switch(cmd) { case SIOCGMIIPHY: /* Get address of MII PHY in use. */ data->phy_id = np->phy_addr_external; fallthrough; case SIOCGMIIREG: /* Read MII PHY register. */ /* The phy_id is not enough to uniquely identify * the intended target. Therefore the command is sent to * the given mii on the current port. */ if (dev->if_port == PORT_TP) { if ((data->phy_id & 0x1f) == np->phy_addr_external) data->val_out = mdio_read(dev, data->reg_num & 0x1f); else data->val_out = 0; } else { move_int_phy(dev, data->phy_id & 0x1f); data->val_out = miiport_read(dev, data->phy_id & 0x1f, data->reg_num & 0x1f); } return 0; case SIOCSMIIREG: /* Write MII PHY register. */ if (dev->if_port == PORT_TP) { if ((data->phy_id & 0x1f) == np->phy_addr_external) { if ((data->reg_num & 0x1f) == MII_ADVERTISE) np->advertising = data->val_in; mdio_write(dev, data->reg_num & 0x1f, data->val_in); } } else { if ((data->phy_id & 0x1f) == np->phy_addr_external) { if ((data->reg_num & 0x1f) == MII_ADVERTISE) np->advertising = data->val_in; } move_int_phy(dev, data->phy_id & 0x1f); miiport_write(dev, data->phy_id & 0x1f, data->reg_num & 0x1f, data->val_in); } return 0; default: return -EOPNOTSUPP; } } static void enable_wol_mode(struct net_device *dev, int enable_intr) { void __iomem * ioaddr = ns_ioaddr(dev); struct netdev_private *np = netdev_priv(dev); if (netif_msg_wol(np)) printk(KERN_INFO "%s: remaining active for wake-on-lan\n", dev->name); /* For WOL we must restart the rx process in silent mode. * Write NULL to the RxRingPtr. Only possible if * rx process is stopped */ writel(0, ioaddr + RxRingPtr); /* read WoL status to clear */ readl(ioaddr + WOLCmd); /* PME on, clear status */ writel(np->SavedClkRun | PMEEnable | PMEStatus, ioaddr + ClkRun); /* and restart the rx process */ writel(RxOn, ioaddr + ChipCmd); if (enable_intr) { /* enable the WOL interrupt. * Could be used to send a netlink message. */ writel(WOLPkt | LinkChange, ioaddr + IntrMask); natsemi_irq_enable(dev); } } static int netdev_close(struct net_device *dev) { void __iomem * ioaddr = ns_ioaddr(dev); struct netdev_private *np = netdev_priv(dev); const int irq = np->pci_dev->irq; if (netif_msg_ifdown(np)) printk(KERN_DEBUG "%s: Shutting down ethercard, status was %#04x.\n", dev->name, (int)readl(ioaddr + ChipCmd)); if (netif_msg_pktdata(np)) printk(KERN_DEBUG "%s: Queue pointers were Tx %d / %d, Rx %d / %d.\n", dev->name, np->cur_tx, np->dirty_tx, np->cur_rx, np->dirty_rx); napi_disable(&np->napi); /* * FIXME: what if someone tries to close a device * that is suspended? * Should we reenable the nic to switch to * the final WOL settings? */ del_timer_sync(&np->timer); disable_irq(irq); spin_lock_irq(&np->lock); natsemi_irq_disable(dev); np->hands_off = 1; spin_unlock_irq(&np->lock); enable_irq(irq); free_irq(irq, dev); /* Interrupt disabled, interrupt handler released, * queue stopped, timer deleted, rtnl_lock held * All async codepaths that access the driver are disabled. */ spin_lock_irq(&np->lock); np->hands_off = 0; readl(ioaddr + IntrMask); readw(ioaddr + MIntrStatus); /* Freeze Stats */ writel(StatsFreeze, ioaddr + StatsCtrl); /* Stop the chip's Tx and Rx processes. */ natsemi_stop_rxtx(dev); __get_stats(dev); spin_unlock_irq(&np->lock); /* clear the carrier last - an interrupt could reenable it otherwise */ netif_carrier_off(dev); netif_stop_queue(dev); dump_ring(dev); drain_ring(dev); free_ring(dev); { u32 wol = readl(ioaddr + WOLCmd) & WakeOptsSummary; if (wol) { /* restart the NIC in WOL mode. * The nic must be stopped for this. */ enable_wol_mode(dev, 0); } else { /* Restore PME enable bit unmolested */ writel(np->SavedClkRun, ioaddr + ClkRun); } } return 0; } static void natsemi_remove1(struct pci_dev *pdev) { struct net_device *dev = pci_get_drvdata(pdev); void __iomem * ioaddr = ns_ioaddr(dev); NATSEMI_REMOVE_FILE(pdev, dspcfg_workaround); unregister_netdev (dev); iounmap(ioaddr); free_netdev (dev); } /* * The ns83815 chip doesn't have explicit RxStop bits. * Kicking the Rx or Tx process for a new packet reenables the Rx process * of the nic, thus this function must be very careful: * * suspend/resume synchronization: * entry points: * netdev_open, netdev_close, netdev_ioctl, set_rx_mode, intr_handler, * start_tx, ns_tx_timeout * * No function accesses the hardware without checking np->hands_off. * the check occurs under spin_lock_irq(&np->lock); * exceptions: * * netdev_ioctl: noncritical access. * * netdev_open: cannot happen due to the device_detach * * netdev_close: doesn't hurt. * * netdev_timer: timer stopped by natsemi_suspend. * * intr_handler: doesn't acquire the spinlock. suspend calls * disable_irq() to enforce synchronization. * * natsemi_poll: checks before reenabling interrupts. suspend * sets hands_off, disables interrupts and then waits with * napi_disable(). * * Interrupts must be disabled, otherwise hands_off can cause irq storms. */ static int __maybe_unused natsemi_suspend(struct device *dev_d) { struct net_device *dev = dev_get_drvdata(dev_d); struct netdev_private *np = netdev_priv(dev); void __iomem * ioaddr = ns_ioaddr(dev); rtnl_lock(); if (netif_running (dev)) { const int irq = np->pci_dev->irq; del_timer_sync(&np->timer); disable_irq(irq); spin_lock_irq(&np->lock); natsemi_irq_disable(dev); np->hands_off = 1; natsemi_stop_rxtx(dev); netif_stop_queue(dev); spin_unlock_irq(&np->lock); enable_irq(irq); napi_disable(&np->napi); /* Update the error counts. */ __get_stats(dev); /* pci_power_off(pdev, -1); */ drain_ring(dev); { u32 wol = readl(ioaddr + WOLCmd) & WakeOptsSummary; /* Restore PME enable bit */ if (wol) { /* restart the NIC in WOL mode. * The nic must be stopped for this. * FIXME: use the WOL interrupt */ enable_wol_mode(dev, 0); } else { /* Restore PME enable bit unmolested */ writel(np->SavedClkRun, ioaddr + ClkRun); } } } netif_device_detach(dev); rtnl_unlock(); return 0; } static int __maybe_unused natsemi_resume(struct device *dev_d) { struct net_device *dev = dev_get_drvdata(dev_d); struct netdev_private *np = netdev_priv(dev); rtnl_lock(); if (netif_device_present(dev)) goto out; if (netif_running(dev)) { const int irq = np->pci_dev->irq; BUG_ON(!np->hands_off); /* pci_power_on(pdev); */ napi_enable(&np->napi); natsemi_reset(dev); init_ring(dev); disable_irq(irq); spin_lock_irq(&np->lock); np->hands_off = 0; init_registers(dev); netif_device_attach(dev); spin_unlock_irq(&np->lock); enable_irq(irq); mod_timer(&np->timer, round_jiffies(jiffies + 1*HZ)); } netif_device_attach(dev); out: rtnl_unlock(); return 0; } static SIMPLE_DEV_PM_OPS(natsemi_pm_ops, natsemi_suspend, natsemi_resume); static struct pci_driver natsemi_driver = { .name = DRV_NAME, .id_table = natsemi_pci_tbl, .probe = natsemi_probe1, .remove = natsemi_remove1, .driver.pm = &natsemi_pm_ops, }; static int __init natsemi_init_mod (void) { /* when a module, this is printed whether or not devices are found in probe */ #ifdef MODULE printk(version); #endif return pci_register_driver(&natsemi_driver); } static void __exit natsemi_exit_mod (void) { pci_unregister_driver (&natsemi_driver); } module_init(natsemi_init_mod); module_exit(natsemi_exit_mod);
linux-master
drivers/net/ethernet/natsemi/natsemi.c
// SPDX-License-Identifier: GPL-2.0 /* * jazzsonic.c * * (C) 2005 Finn Thain * * Converted to DMA API, and (from the mac68k project) introduced * dhd's support for 16-bit cards. * * (C) 1996,1998 by Thomas Bogendoerfer ([email protected]) * * This driver is based on work from Andreas Busse, but most of * the code is rewritten. * * (C) 1995 by Andreas Busse ([email protected]) * * A driver for the onboard Sonic ethernet controller on Mips Jazz * systems (Acer Pica-61, Mips Magnum 4000, Olivetti M700 and * perhaps others, too) */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/types.h> #include <linux/fcntl.h> #include <linux/gfp.h> #include <linux/interrupt.h> #include <linux/ioport.h> #include <linux/in.h> #include <linux/string.h> #include <linux/delay.h> #include <linux/errno.h> #include <linux/netdevice.h> #include <linux/etherdevice.h> #include <linux/skbuff.h> #include <linux/platform_device.h> #include <linux/dma-mapping.h> #include <linux/slab.h> #include <linux/pgtable.h> #include <asm/bootinfo.h> #include <asm/io.h> #include <asm/dma.h> #include <asm/jazz.h> #include <asm/jazzdma.h> static char jazz_sonic_string[] = "jazzsonic"; #define SONIC_MEM_SIZE 0x100 #include "sonic.h" /* * Macros to access SONIC registers */ #define SONIC_READ(reg) (*((volatile unsigned int *)dev->base_addr+reg)) #define SONIC_WRITE(reg,val) \ do { \ *((volatile unsigned int *)dev->base_addr+(reg)) = (val); \ } while (0) /* * We cannot use station (ethernet) address prefixes to detect the * sonic controller since these are board manufacturer depended. * So we check for known Silicon Revision IDs instead. */ static unsigned short known_revisions[] = { 0x04, /* Mips Magnum 4000 */ 0xffff /* end of list */ }; static int jazzsonic_open(struct net_device* dev) { int retval; retval = request_irq(dev->irq, sonic_interrupt, 0, "sonic", dev); if (retval) { printk(KERN_ERR "%s: unable to get IRQ %d.\n", dev->name, dev->irq); return retval; } retval = sonic_open(dev); if (retval) free_irq(dev->irq, dev); return retval; } static int jazzsonic_close(struct net_device* dev) { int err; err = sonic_close(dev); free_irq(dev->irq, dev); return err; } static const struct net_device_ops sonic_netdev_ops = { .ndo_open = jazzsonic_open, .ndo_stop = jazzsonic_close, .ndo_start_xmit = sonic_send_packet, .ndo_get_stats = sonic_get_stats, .ndo_set_rx_mode = sonic_multicast_list, .ndo_tx_timeout = sonic_tx_timeout, .ndo_validate_addr = eth_validate_addr, .ndo_set_mac_address = eth_mac_addr, }; static int sonic_probe1(struct net_device *dev) { unsigned int silicon_revision; unsigned int val; struct sonic_local *lp = netdev_priv(dev); int err = -ENODEV; int i; unsigned char addr[ETH_ALEN]; if (!request_mem_region(dev->base_addr, SONIC_MEM_SIZE, jazz_sonic_string)) return -EBUSY; /* * get the Silicon Revision ID. If this is one of the known * one assume that we found a SONIC ethernet controller at * the expected location. */ silicon_revision = SONIC_READ(SONIC_SR); i = 0; while (known_revisions[i] != 0xffff && known_revisions[i] != silicon_revision) i++; if (known_revisions[i] == 0xffff) { pr_info("SONIC ethernet controller not found (0x%4x)\n", silicon_revision); goto out; } /* * Put the sonic into software reset, then * retrieve and print the ethernet address. */ SONIC_WRITE(SONIC_CMD,SONIC_CR_RST); SONIC_WRITE(SONIC_CEP,0); for (i=0; i<3; i++) { val = SONIC_READ(SONIC_CAP0-i); addr[i*2] = val; addr[i*2+1] = val >> 8; } eth_hw_addr_set(dev, addr); lp->dma_bitmode = SONIC_BITMODE32; err = sonic_alloc_descriptors(dev); if (err) goto out; dev->netdev_ops = &sonic_netdev_ops; dev->watchdog_timeo = TX_TIMEOUT; /* * clear tally counter */ SONIC_WRITE(SONIC_CRCT,0xffff); SONIC_WRITE(SONIC_FAET,0xffff); SONIC_WRITE(SONIC_MPT,0xffff); return 0; out: release_mem_region(dev->base_addr, SONIC_MEM_SIZE); return err; } /* * Probe for a SONIC ethernet controller on a Mips Jazz board. * Actually probing is superfluous but we're paranoid. */ static int jazz_sonic_probe(struct platform_device *pdev) { struct net_device *dev; struct sonic_local *lp; struct resource *res; int err = 0; res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!res) return -ENODEV; dev = alloc_etherdev(sizeof(struct sonic_local)); if (!dev) return -ENOMEM; lp = netdev_priv(dev); lp->device = &pdev->dev; SET_NETDEV_DEV(dev, &pdev->dev); platform_set_drvdata(pdev, dev); dev->base_addr = res->start; dev->irq = platform_get_irq(pdev, 0); err = sonic_probe1(dev); if (err) goto out; pr_info("SONIC ethernet @%08lx, MAC %pM, IRQ %d\n", dev->base_addr, dev->dev_addr, dev->irq); sonic_msg_init(dev); err = register_netdev(dev); if (err) goto undo_probe1; return 0; undo_probe1: dma_free_coherent(lp->device, SIZEOF_SONIC_DESC * SONIC_BUS_SCALE(lp->dma_bitmode), lp->descriptors, lp->descriptors_laddr); release_mem_region(dev->base_addr, SONIC_MEM_SIZE); out: free_netdev(dev); return err; } MODULE_DESCRIPTION("Jazz SONIC ethernet driver"); MODULE_ALIAS("platform:jazzsonic"); #include "sonic.c" static int jazz_sonic_device_remove(struct platform_device *pdev) { struct net_device *dev = platform_get_drvdata(pdev); struct sonic_local* lp = netdev_priv(dev); unregister_netdev(dev); dma_free_coherent(lp->device, SIZEOF_SONIC_DESC * SONIC_BUS_SCALE(lp->dma_bitmode), lp->descriptors, lp->descriptors_laddr); release_mem_region(dev->base_addr, SONIC_MEM_SIZE); free_netdev(dev); return 0; } static struct platform_driver jazz_sonic_driver = { .probe = jazz_sonic_probe, .remove = jazz_sonic_device_remove, .driver = { .name = jazz_sonic_string, }, }; module_platform_driver(jazz_sonic_driver);
linux-master
drivers/net/ethernet/natsemi/jazzsonic.c
// SPDX-License-Identifier: GPL-2.0 /* * xtsonic.c * * (C) 2001 - 2007 Tensilica Inc. * Kevin Chea <[email protected]> * Marc Gauthier <[email protected]> * Chris Zankel <[email protected]> * * (C) 1996,1998 by Thomas Bogendoerfer ([email protected]) * * This driver is based on work from Andreas Busse, but most of * the code is rewritten. * * (C) 1995 by Andreas Busse ([email protected]) * * A driver for the onboard Sonic ethernet controller on the XT2000. */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/types.h> #include <linux/fcntl.h> #include <linux/gfp.h> #include <linux/interrupt.h> #include <linux/init.h> #include <linux/ioport.h> #include <linux/in.h> #include <linux/string.h> #include <linux/delay.h> #include <linux/errno.h> #include <linux/netdevice.h> #include <linux/etherdevice.h> #include <linux/skbuff.h> #include <linux/platform_device.h> #include <linux/dma-mapping.h> #include <linux/slab.h> #include <linux/pgtable.h> #include <asm/io.h> #include <asm/dma.h> static char xtsonic_string[] = "xtsonic"; extern unsigned xtboard_nvram_valid(void); extern void xtboard_get_ether_addr(unsigned char *buf); #include "sonic.h" /* * According to the documentation for the Sonic ethernet controller, * EOBC should be 760 words (1520 bytes) for 32-bit applications, and, * as such, 2 words less than the buffer size. The value for RBSIZE * defined in sonic.h, however is only 1520. * * (Note that in 16-bit configurations, EOBC is 759 words (1518 bytes) and * RBSIZE 1520 bytes) */ #undef SONIC_RBSIZE #define SONIC_RBSIZE 1524 /* * The chip provides 256 byte register space. */ #define SONIC_MEM_SIZE 0x100 /* * Macros to access SONIC registers */ #define SONIC_READ(reg) \ (0xffff & *((volatile unsigned int *)dev->base_addr+reg)) #define SONIC_WRITE(reg,val) \ *((volatile unsigned int *)dev->base_addr+reg) = val /* * We cannot use station (ethernet) address prefixes to detect the * sonic controller since these are board manufacturer depended. * So we check for known Silicon Revision IDs instead. */ static unsigned short known_revisions[] = { 0x101, /* SONIC 83934 */ 0xffff /* end of list */ }; static int xtsonic_open(struct net_device *dev) { int retval; retval = request_irq(dev->irq, sonic_interrupt, 0, "sonic", dev); if (retval) { printk(KERN_ERR "%s: unable to get IRQ %d.\n", dev->name, dev->irq); return -EAGAIN; } retval = sonic_open(dev); if (retval) free_irq(dev->irq, dev); return retval; } static int xtsonic_close(struct net_device *dev) { int err; err = sonic_close(dev); free_irq(dev->irq, dev); return err; } static const struct net_device_ops xtsonic_netdev_ops = { .ndo_open = xtsonic_open, .ndo_stop = xtsonic_close, .ndo_start_xmit = sonic_send_packet, .ndo_get_stats = sonic_get_stats, .ndo_set_rx_mode = sonic_multicast_list, .ndo_tx_timeout = sonic_tx_timeout, .ndo_validate_addr = eth_validate_addr, .ndo_set_mac_address = eth_mac_addr, }; static int sonic_probe1(struct net_device *dev) { unsigned int silicon_revision; struct sonic_local *lp = netdev_priv(dev); unsigned int base_addr = dev->base_addr; int i; int err = 0; unsigned char addr[ETH_ALEN]; if (!request_mem_region(base_addr, 0x100, xtsonic_string)) return -EBUSY; /* * get the Silicon Revision ID. If this is one of the known * one assume that we found a SONIC ethernet controller at * the expected location. */ silicon_revision = SONIC_READ(SONIC_SR); i = 0; while ((known_revisions[i] != 0xffff) && (known_revisions[i] != silicon_revision)) i++; if (known_revisions[i] == 0xffff) { pr_info("SONIC ethernet controller not found (0x%4x)\n", silicon_revision); return -ENODEV; } /* * Put the sonic into software reset, then retrieve ethernet address. * Note: we are assuming that the boot-loader has initialized the cam. */ SONIC_WRITE(SONIC_CMD,SONIC_CR_RST); SONIC_WRITE(SONIC_DCR, SONIC_DCR_WC0|SONIC_DCR_DW|SONIC_DCR_LBR|SONIC_DCR_SBUS); SONIC_WRITE(SONIC_CEP,0); SONIC_WRITE(SONIC_IMR,0); SONIC_WRITE(SONIC_CMD,SONIC_CR_RST); SONIC_WRITE(SONIC_CEP,0); for (i=0; i<3; i++) { unsigned int val = SONIC_READ(SONIC_CAP0-i); addr[i*2] = val; addr[i*2+1] = val >> 8; } eth_hw_addr_set(dev, addr); lp->dma_bitmode = SONIC_BITMODE32; err = sonic_alloc_descriptors(dev); if (err) goto out; dev->netdev_ops = &xtsonic_netdev_ops; dev->watchdog_timeo = TX_TIMEOUT; /* * clear tally counter */ SONIC_WRITE(SONIC_CRCT,0xffff); SONIC_WRITE(SONIC_FAET,0xffff); SONIC_WRITE(SONIC_MPT,0xffff); return 0; out: release_region(dev->base_addr, SONIC_MEM_SIZE); return err; } /* * Probe for a SONIC ethernet controller on an XT2000 board. * Actually probing is superfluous but we're paranoid. */ int xtsonic_probe(struct platform_device *pdev) { struct net_device *dev; struct sonic_local *lp; struct resource *resmem, *resirq; int err = 0; if ((resmem = platform_get_resource(pdev, IORESOURCE_MEM, 0)) == NULL) return -ENODEV; if ((resirq = platform_get_resource(pdev, IORESOURCE_IRQ, 0)) == NULL) return -ENODEV; if ((dev = alloc_etherdev(sizeof(struct sonic_local))) == NULL) return -ENOMEM; lp = netdev_priv(dev); lp->device = &pdev->dev; platform_set_drvdata(pdev, dev); SET_NETDEV_DEV(dev, &pdev->dev); dev->base_addr = resmem->start; dev->irq = resirq->start; if ((err = sonic_probe1(dev))) goto out; pr_info("SONIC ethernet @%08lx, MAC %pM, IRQ %d\n", dev->base_addr, dev->dev_addr, dev->irq); sonic_msg_init(dev); if ((err = register_netdev(dev))) goto undo_probe1; return 0; undo_probe1: dma_free_coherent(lp->device, SIZEOF_SONIC_DESC * SONIC_BUS_SCALE(lp->dma_bitmode), lp->descriptors, lp->descriptors_laddr); release_region(dev->base_addr, SONIC_MEM_SIZE); out: free_netdev(dev); return err; } MODULE_DESCRIPTION("Xtensa XT2000 SONIC ethernet driver"); #include "sonic.c" static int xtsonic_device_remove(struct platform_device *pdev) { struct net_device *dev = platform_get_drvdata(pdev); struct sonic_local *lp = netdev_priv(dev); unregister_netdev(dev); dma_free_coherent(lp->device, SIZEOF_SONIC_DESC * SONIC_BUS_SCALE(lp->dma_bitmode), lp->descriptors, lp->descriptors_laddr); release_region (dev->base_addr, SONIC_MEM_SIZE); free_netdev(dev); return 0; } static struct platform_driver xtsonic_driver = { .probe = xtsonic_probe, .remove = xtsonic_device_remove, .driver = { .name = xtsonic_string, }, }; module_platform_driver(xtsonic_driver);
linux-master
drivers/net/ethernet/natsemi/xtsonic.c
// SPDX-License-Identifier: GPL-2.0-only /* * sonic.c * * (C) 2005 Finn Thain * * Converted to DMA API, added zero-copy buffer handling, and * (from the mac68k project) introduced dhd's support for 16-bit cards. * * (C) 1996,1998 by Thomas Bogendoerfer ([email protected]) * * This driver is based on work from Andreas Busse, but most of * the code is rewritten. * * (C) 1995 by Andreas Busse ([email protected]) * * Core code included by system sonic drivers * * And... partially rewritten again by David Huggins-Daines in order * to cope with screwed up Macintosh NICs that may or may not use * 16-bit DMA. * * (C) 1999 David Huggins-Daines <[email protected]> * */ /* * Sources: Olivetti M700-10 Risc Personal Computer hardware handbook, * National Semiconductors data sheet for the DP83932B Sonic Ethernet * controller, and the files "8390.c" and "skeleton.c" in this directory. * * Additional sources: Nat Semi data sheet for the DP83932C and Nat Semi * Application Note AN-746, the files "lance.c" and "ibmlana.c". See also * the NetBSD file "sys/arch/mac68k/dev/if_sn.c". */ static unsigned int version_printed; static int sonic_debug = -1; module_param(sonic_debug, int, 0); MODULE_PARM_DESC(sonic_debug, "debug message level"); static void sonic_msg_init(struct net_device *dev) { struct sonic_local *lp = netdev_priv(dev); lp->msg_enable = netif_msg_init(sonic_debug, 0); if (version_printed++ == 0) netif_dbg(lp, drv, dev, "%s", version); } static int sonic_alloc_descriptors(struct net_device *dev) { struct sonic_local *lp = netdev_priv(dev); /* Allocate a chunk of memory for the descriptors. Note that this * must not cross a 64K boundary. It is smaller than one page which * means that page alignment is a sufficient condition. */ lp->descriptors = dma_alloc_coherent(lp->device, SIZEOF_SONIC_DESC * SONIC_BUS_SCALE(lp->dma_bitmode), &lp->descriptors_laddr, GFP_KERNEL); if (!lp->descriptors) return -ENOMEM; lp->cda = lp->descriptors; lp->tda = lp->cda + SIZEOF_SONIC_CDA * SONIC_BUS_SCALE(lp->dma_bitmode); lp->rda = lp->tda + SIZEOF_SONIC_TD * SONIC_NUM_TDS * SONIC_BUS_SCALE(lp->dma_bitmode); lp->rra = lp->rda + SIZEOF_SONIC_RD * SONIC_NUM_RDS * SONIC_BUS_SCALE(lp->dma_bitmode); lp->cda_laddr = lp->descriptors_laddr; lp->tda_laddr = lp->cda_laddr + SIZEOF_SONIC_CDA * SONIC_BUS_SCALE(lp->dma_bitmode); lp->rda_laddr = lp->tda_laddr + SIZEOF_SONIC_TD * SONIC_NUM_TDS * SONIC_BUS_SCALE(lp->dma_bitmode); lp->rra_laddr = lp->rda_laddr + SIZEOF_SONIC_RD * SONIC_NUM_RDS * SONIC_BUS_SCALE(lp->dma_bitmode); return 0; } /* * Open/initialize the SONIC controller. * * This routine should set everything up anew at each open, even * registers that "should" only need to be set once at boot, so that * there is non-reboot way to recover if something goes wrong. */ static int sonic_open(struct net_device *dev) { struct sonic_local *lp = netdev_priv(dev); int i; netif_dbg(lp, ifup, dev, "%s: initializing sonic driver\n", __func__); spin_lock_init(&lp->lock); for (i = 0; i < SONIC_NUM_RRS; i++) { struct sk_buff *skb = netdev_alloc_skb(dev, SONIC_RBSIZE + 2); if (skb == NULL) { while(i > 0) { /* free any that were allocated successfully */ i--; dev_kfree_skb(lp->rx_skb[i]); lp->rx_skb[i] = NULL; } printk(KERN_ERR "%s: couldn't allocate receive buffers\n", dev->name); return -ENOMEM; } /* align IP header unless DMA requires otherwise */ if (SONIC_BUS_SCALE(lp->dma_bitmode) == 2) skb_reserve(skb, 2); lp->rx_skb[i] = skb; } for (i = 0; i < SONIC_NUM_RRS; i++) { dma_addr_t laddr = dma_map_single(lp->device, skb_put(lp->rx_skb[i], SONIC_RBSIZE), SONIC_RBSIZE, DMA_FROM_DEVICE); if (dma_mapping_error(lp->device, laddr)) { while(i > 0) { /* free any that were mapped successfully */ i--; dma_unmap_single(lp->device, lp->rx_laddr[i], SONIC_RBSIZE, DMA_FROM_DEVICE); lp->rx_laddr[i] = (dma_addr_t)0; } for (i = 0; i < SONIC_NUM_RRS; i++) { dev_kfree_skb(lp->rx_skb[i]); lp->rx_skb[i] = NULL; } printk(KERN_ERR "%s: couldn't map rx DMA buffers\n", dev->name); return -ENOMEM; } lp->rx_laddr[i] = laddr; } /* * Initialize the SONIC */ sonic_init(dev, true); netif_start_queue(dev); netif_dbg(lp, ifup, dev, "%s: Initialization done\n", __func__); return 0; } /* Wait for the SONIC to become idle. */ static void sonic_quiesce(struct net_device *dev, u16 mask, bool may_sleep) { struct sonic_local * __maybe_unused lp = netdev_priv(dev); int i; u16 bits; for (i = 0; i < 1000; ++i) { bits = SONIC_READ(SONIC_CMD) & mask; if (!bits) return; if (!may_sleep) udelay(20); else usleep_range(100, 200); } WARN_ONCE(1, "command deadline expired! 0x%04x\n", bits); } /* * Close the SONIC device */ static int sonic_close(struct net_device *dev) { struct sonic_local *lp = netdev_priv(dev); int i; netif_dbg(lp, ifdown, dev, "%s\n", __func__); netif_stop_queue(dev); /* * stop the SONIC, disable interrupts */ SONIC_WRITE(SONIC_CMD, SONIC_CR_RXDIS); sonic_quiesce(dev, SONIC_CR_ALL, true); SONIC_WRITE(SONIC_IMR, 0); SONIC_WRITE(SONIC_ISR, 0x7fff); SONIC_WRITE(SONIC_CMD, SONIC_CR_RST); /* unmap and free skbs that haven't been transmitted */ for (i = 0; i < SONIC_NUM_TDS; i++) { if(lp->tx_laddr[i]) { dma_unmap_single(lp->device, lp->tx_laddr[i], lp->tx_len[i], DMA_TO_DEVICE); lp->tx_laddr[i] = (dma_addr_t)0; } if(lp->tx_skb[i]) { dev_kfree_skb(lp->tx_skb[i]); lp->tx_skb[i] = NULL; } } /* unmap and free the receive buffers */ for (i = 0; i < SONIC_NUM_RRS; i++) { if(lp->rx_laddr[i]) { dma_unmap_single(lp->device, lp->rx_laddr[i], SONIC_RBSIZE, DMA_FROM_DEVICE); lp->rx_laddr[i] = (dma_addr_t)0; } if(lp->rx_skb[i]) { dev_kfree_skb(lp->rx_skb[i]); lp->rx_skb[i] = NULL; } } return 0; } static void sonic_tx_timeout(struct net_device *dev, unsigned int txqueue) { struct sonic_local *lp = netdev_priv(dev); int i; /* * put the Sonic into software-reset mode and * disable all interrupts before releasing DMA buffers */ SONIC_WRITE(SONIC_CMD, SONIC_CR_RXDIS); sonic_quiesce(dev, SONIC_CR_ALL, false); SONIC_WRITE(SONIC_IMR, 0); SONIC_WRITE(SONIC_ISR, 0x7fff); SONIC_WRITE(SONIC_CMD, SONIC_CR_RST); /* We could resend the original skbs. Easier to re-initialise. */ for (i = 0; i < SONIC_NUM_TDS; i++) { if(lp->tx_laddr[i]) { dma_unmap_single(lp->device, lp->tx_laddr[i], lp->tx_len[i], DMA_TO_DEVICE); lp->tx_laddr[i] = (dma_addr_t)0; } if(lp->tx_skb[i]) { dev_kfree_skb(lp->tx_skb[i]); lp->tx_skb[i] = NULL; } } /* Try to restart the adaptor. */ sonic_init(dev, false); lp->stats.tx_errors++; netif_trans_update(dev); /* prevent tx timeout */ netif_wake_queue(dev); } /* * transmit packet * * Appends new TD during transmission thus avoiding any TX interrupts * until we run out of TDs. * This routine interacts closely with the ISR in that it may, * set tx_skb[i] * reset the status flags of the new TD * set and reset EOL flags * stop the tx queue * The ISR interacts with this routine in various ways. It may, * reset tx_skb[i] * test the EOL and status flags of the TDs * wake the tx queue * Concurrently with all of this, the SONIC is potentially writing to * the status flags of the TDs. */ static int sonic_send_packet(struct sk_buff *skb, struct net_device *dev) { struct sonic_local *lp = netdev_priv(dev); dma_addr_t laddr; int length; int entry; unsigned long flags; netif_dbg(lp, tx_queued, dev, "%s: skb=%p\n", __func__, skb); length = skb->len; if (length < ETH_ZLEN) { if (skb_padto(skb, ETH_ZLEN)) return NETDEV_TX_OK; length = ETH_ZLEN; } /* * Map the packet data into the logical DMA address space */ laddr = dma_map_single(lp->device, skb->data, length, DMA_TO_DEVICE); if (dma_mapping_error(lp->device, laddr)) { pr_err_ratelimited("%s: failed to map tx DMA buffer.\n", dev->name); dev_kfree_skb_any(skb); return NETDEV_TX_OK; } spin_lock_irqsave(&lp->lock, flags); entry = (lp->eol_tx + 1) & SONIC_TDS_MASK; sonic_tda_put(dev, entry, SONIC_TD_STATUS, 0); /* clear status */ sonic_tda_put(dev, entry, SONIC_TD_FRAG_COUNT, 1); /* single fragment */ sonic_tda_put(dev, entry, SONIC_TD_PKTSIZE, length); /* length of packet */ sonic_tda_put(dev, entry, SONIC_TD_FRAG_PTR_L, laddr & 0xffff); sonic_tda_put(dev, entry, SONIC_TD_FRAG_PTR_H, laddr >> 16); sonic_tda_put(dev, entry, SONIC_TD_FRAG_SIZE, length); sonic_tda_put(dev, entry, SONIC_TD_LINK, sonic_tda_get(dev, entry, SONIC_TD_LINK) | SONIC_EOL); sonic_tda_put(dev, lp->eol_tx, SONIC_TD_LINK, ~SONIC_EOL & sonic_tda_get(dev, lp->eol_tx, SONIC_TD_LINK)); netif_dbg(lp, tx_queued, dev, "%s: issuing Tx command\n", __func__); SONIC_WRITE(SONIC_CMD, SONIC_CR_TXP); lp->tx_len[entry] = length; lp->tx_laddr[entry] = laddr; lp->tx_skb[entry] = skb; lp->eol_tx = entry; entry = (entry + 1) & SONIC_TDS_MASK; if (lp->tx_skb[entry]) { /* The ring is full, the ISR has yet to process the next TD. */ netif_dbg(lp, tx_queued, dev, "%s: stopping queue\n", __func__); netif_stop_queue(dev); /* after this packet, wait for ISR to free up some TDAs */ } spin_unlock_irqrestore(&lp->lock, flags); return NETDEV_TX_OK; } /* * The typical workload of the driver: * Handle the network interface interrupts. */ static irqreturn_t sonic_interrupt(int irq, void *dev_id) { struct net_device *dev = dev_id; struct sonic_local *lp = netdev_priv(dev); int status; unsigned long flags; /* The lock has two purposes. Firstly, it synchronizes sonic_interrupt() * with sonic_send_packet() so that the two functions can share state. * Secondly, it makes sonic_interrupt() re-entrant, as that is required * by macsonic which must use two IRQs with different priority levels. */ spin_lock_irqsave(&lp->lock, flags); status = SONIC_READ(SONIC_ISR) & SONIC_IMR_DEFAULT; if (!status) { spin_unlock_irqrestore(&lp->lock, flags); return IRQ_NONE; } do { SONIC_WRITE(SONIC_ISR, status); /* clear the interrupt(s) */ if (status & SONIC_INT_PKTRX) { netif_dbg(lp, intr, dev, "%s: packet rx\n", __func__); sonic_rx(dev); /* got packet(s) */ } if (status & SONIC_INT_TXDN) { int entry = lp->cur_tx; int td_status; int freed_some = 0; /* The state of a Transmit Descriptor may be inferred * from { tx_skb[entry], td_status } as follows. * { clear, clear } => the TD has never been used * { set, clear } => the TD was handed to SONIC * { set, set } => the TD was handed back * { clear, set } => the TD is available for re-use */ netif_dbg(lp, intr, dev, "%s: tx done\n", __func__); while (lp->tx_skb[entry] != NULL) { if ((td_status = sonic_tda_get(dev, entry, SONIC_TD_STATUS)) == 0) break; if (td_status & SONIC_TCR_PTX) { lp->stats.tx_packets++; lp->stats.tx_bytes += sonic_tda_get(dev, entry, SONIC_TD_PKTSIZE); } else { if (td_status & (SONIC_TCR_EXD | SONIC_TCR_EXC | SONIC_TCR_BCM)) lp->stats.tx_aborted_errors++; if (td_status & (SONIC_TCR_NCRS | SONIC_TCR_CRLS)) lp->stats.tx_carrier_errors++; if (td_status & SONIC_TCR_OWC) lp->stats.tx_window_errors++; if (td_status & SONIC_TCR_FU) lp->stats.tx_fifo_errors++; } /* We must free the original skb */ dev_consume_skb_irq(lp->tx_skb[entry]); lp->tx_skb[entry] = NULL; /* and unmap DMA buffer */ dma_unmap_single(lp->device, lp->tx_laddr[entry], lp->tx_len[entry], DMA_TO_DEVICE); lp->tx_laddr[entry] = (dma_addr_t)0; freed_some = 1; if (sonic_tda_get(dev, entry, SONIC_TD_LINK) & SONIC_EOL) { entry = (entry + 1) & SONIC_TDS_MASK; break; } entry = (entry + 1) & SONIC_TDS_MASK; } if (freed_some || lp->tx_skb[entry] == NULL) netif_wake_queue(dev); /* The ring is no longer full */ lp->cur_tx = entry; } /* * check error conditions */ if (status & SONIC_INT_RFO) { netif_dbg(lp, rx_err, dev, "%s: rx fifo overrun\n", __func__); } if (status & SONIC_INT_RDE) { netif_dbg(lp, rx_err, dev, "%s: rx descriptors exhausted\n", __func__); } if (status & SONIC_INT_RBAE) { netif_dbg(lp, rx_err, dev, "%s: rx buffer area exceeded\n", __func__); } /* counter overruns; all counters are 16bit wide */ if (status & SONIC_INT_FAE) lp->stats.rx_frame_errors += 65536; if (status & SONIC_INT_CRC) lp->stats.rx_crc_errors += 65536; if (status & SONIC_INT_MP) lp->stats.rx_missed_errors += 65536; /* transmit error */ if (status & SONIC_INT_TXER) { u16 tcr = SONIC_READ(SONIC_TCR); netif_dbg(lp, tx_err, dev, "%s: TXER intr, TCR %04x\n", __func__, tcr); if (tcr & (SONIC_TCR_EXD | SONIC_TCR_EXC | SONIC_TCR_FU | SONIC_TCR_BCM)) { /* Aborted transmission. Try again. */ netif_stop_queue(dev); SONIC_WRITE(SONIC_CMD, SONIC_CR_TXP); } } /* bus retry */ if (status & SONIC_INT_BR) { printk(KERN_ERR "%s: Bus retry occurred! Device interrupt disabled.\n", dev->name); /* ... to help debug DMA problems causing endless interrupts. */ /* Bounce the eth interface to turn on the interrupt again. */ SONIC_WRITE(SONIC_IMR, 0); } status = SONIC_READ(SONIC_ISR) & SONIC_IMR_DEFAULT; } while (status); spin_unlock_irqrestore(&lp->lock, flags); return IRQ_HANDLED; } /* Return the array index corresponding to a given Receive Buffer pointer. */ static int index_from_addr(struct sonic_local *lp, dma_addr_t addr, unsigned int last) { unsigned int i = last; do { i = (i + 1) & SONIC_RRS_MASK; if (addr == lp->rx_laddr[i]) return i; } while (i != last); return -ENOENT; } /* Allocate and map a new skb to be used as a receive buffer. */ static bool sonic_alloc_rb(struct net_device *dev, struct sonic_local *lp, struct sk_buff **new_skb, dma_addr_t *new_addr) { *new_skb = netdev_alloc_skb(dev, SONIC_RBSIZE + 2); if (!*new_skb) return false; if (SONIC_BUS_SCALE(lp->dma_bitmode) == 2) skb_reserve(*new_skb, 2); *new_addr = dma_map_single(lp->device, skb_put(*new_skb, SONIC_RBSIZE), SONIC_RBSIZE, DMA_FROM_DEVICE); if (dma_mapping_error(lp->device, *new_addr)) { dev_kfree_skb(*new_skb); *new_skb = NULL; return false; } return true; } /* Place a new receive resource in the Receive Resource Area and update RWP. */ static void sonic_update_rra(struct net_device *dev, struct sonic_local *lp, dma_addr_t old_addr, dma_addr_t new_addr) { unsigned int entry = sonic_rr_entry(dev, SONIC_READ(SONIC_RWP)); unsigned int end = sonic_rr_entry(dev, SONIC_READ(SONIC_RRP)); u32 buf; /* The resources in the range [RRP, RWP) belong to the SONIC. This loop * scans the other resources in the RRA, those in the range [RWP, RRP). */ do { buf = (sonic_rra_get(dev, entry, SONIC_RR_BUFADR_H) << 16) | sonic_rra_get(dev, entry, SONIC_RR_BUFADR_L); if (buf == old_addr) break; entry = (entry + 1) & SONIC_RRS_MASK; } while (entry != end); WARN_ONCE(buf != old_addr, "failed to find resource!\n"); sonic_rra_put(dev, entry, SONIC_RR_BUFADR_H, new_addr >> 16); sonic_rra_put(dev, entry, SONIC_RR_BUFADR_L, new_addr & 0xffff); entry = (entry + 1) & SONIC_RRS_MASK; SONIC_WRITE(SONIC_RWP, sonic_rr_addr(dev, entry)); } /* * We have a good packet(s), pass it/them up the network stack. */ static void sonic_rx(struct net_device *dev) { struct sonic_local *lp = netdev_priv(dev); int entry = lp->cur_rx; int prev_entry = lp->eol_rx; bool rbe = false; while (sonic_rda_get(dev, entry, SONIC_RD_IN_USE) == 0) { u16 status = sonic_rda_get(dev, entry, SONIC_RD_STATUS); /* If the RD has LPKT set, the chip has finished with the RB */ if ((status & SONIC_RCR_PRX) && (status & SONIC_RCR_LPKT)) { struct sk_buff *new_skb; dma_addr_t new_laddr; u32 addr = (sonic_rda_get(dev, entry, SONIC_RD_PKTPTR_H) << 16) | sonic_rda_get(dev, entry, SONIC_RD_PKTPTR_L); int i = index_from_addr(lp, addr, entry); if (i < 0) { WARN_ONCE(1, "failed to find buffer!\n"); break; } if (sonic_alloc_rb(dev, lp, &new_skb, &new_laddr)) { struct sk_buff *used_skb = lp->rx_skb[i]; int pkt_len; /* Pass the used buffer up the stack */ dma_unmap_single(lp->device, addr, SONIC_RBSIZE, DMA_FROM_DEVICE); pkt_len = sonic_rda_get(dev, entry, SONIC_RD_PKTLEN); skb_trim(used_skb, pkt_len); used_skb->protocol = eth_type_trans(used_skb, dev); netif_rx(used_skb); lp->stats.rx_packets++; lp->stats.rx_bytes += pkt_len; lp->rx_skb[i] = new_skb; lp->rx_laddr[i] = new_laddr; } else { /* Failed to obtain a new buffer so re-use it */ new_laddr = addr; lp->stats.rx_dropped++; } /* If RBE is already asserted when RWP advances then * it's safe to clear RBE after processing this packet. */ rbe = rbe || SONIC_READ(SONIC_ISR) & SONIC_INT_RBE; sonic_update_rra(dev, lp, addr, new_laddr); } /* * give back the descriptor */ sonic_rda_put(dev, entry, SONIC_RD_STATUS, 0); sonic_rda_put(dev, entry, SONIC_RD_IN_USE, 1); prev_entry = entry; entry = (entry + 1) & SONIC_RDS_MASK; } lp->cur_rx = entry; if (prev_entry != lp->eol_rx) { /* Advance the EOL flag to put descriptors back into service */ sonic_rda_put(dev, prev_entry, SONIC_RD_LINK, SONIC_EOL | sonic_rda_get(dev, prev_entry, SONIC_RD_LINK)); sonic_rda_put(dev, lp->eol_rx, SONIC_RD_LINK, ~SONIC_EOL & sonic_rda_get(dev, lp->eol_rx, SONIC_RD_LINK)); lp->eol_rx = prev_entry; } if (rbe) SONIC_WRITE(SONIC_ISR, SONIC_INT_RBE); } /* * Get the current statistics. * This may be called with the device open or closed. */ static struct net_device_stats *sonic_get_stats(struct net_device *dev) { struct sonic_local *lp = netdev_priv(dev); /* read the tally counter from the SONIC and reset them */ lp->stats.rx_crc_errors += SONIC_READ(SONIC_CRCT); SONIC_WRITE(SONIC_CRCT, 0xffff); lp->stats.rx_frame_errors += SONIC_READ(SONIC_FAET); SONIC_WRITE(SONIC_FAET, 0xffff); lp->stats.rx_missed_errors += SONIC_READ(SONIC_MPT); SONIC_WRITE(SONIC_MPT, 0xffff); return &lp->stats; } /* * Set or clear the multicast filter for this adaptor. */ static void sonic_multicast_list(struct net_device *dev) { struct sonic_local *lp = netdev_priv(dev); unsigned int rcr; struct netdev_hw_addr *ha; unsigned char *addr; int i; rcr = SONIC_READ(SONIC_RCR) & ~(SONIC_RCR_PRO | SONIC_RCR_AMC); rcr |= SONIC_RCR_BRD; /* accept broadcast packets */ if (dev->flags & IFF_PROMISC) { /* set promiscuous mode */ rcr |= SONIC_RCR_PRO; } else { if ((dev->flags & IFF_ALLMULTI) || (netdev_mc_count(dev) > 15)) { rcr |= SONIC_RCR_AMC; } else { unsigned long flags; netif_dbg(lp, ifup, dev, "%s: mc_count %d\n", __func__, netdev_mc_count(dev)); sonic_set_cam_enable(dev, 1); /* always enable our own address */ i = 1; netdev_for_each_mc_addr(ha, dev) { addr = ha->addr; sonic_cda_put(dev, i, SONIC_CD_CAP0, addr[1] << 8 | addr[0]); sonic_cda_put(dev, i, SONIC_CD_CAP1, addr[3] << 8 | addr[2]); sonic_cda_put(dev, i, SONIC_CD_CAP2, addr[5] << 8 | addr[4]); sonic_set_cam_enable(dev, sonic_get_cam_enable(dev) | (1 << i)); i++; } SONIC_WRITE(SONIC_CDC, 16); SONIC_WRITE(SONIC_CDP, lp->cda_laddr & 0xffff); /* LCAM and TXP commands can't be used simultaneously */ spin_lock_irqsave(&lp->lock, flags); sonic_quiesce(dev, SONIC_CR_TXP, false); SONIC_WRITE(SONIC_CMD, SONIC_CR_LCAM); sonic_quiesce(dev, SONIC_CR_LCAM, false); spin_unlock_irqrestore(&lp->lock, flags); } } netif_dbg(lp, ifup, dev, "%s: setting RCR=%x\n", __func__, rcr); SONIC_WRITE(SONIC_RCR, rcr); } /* * Initialize the SONIC ethernet controller. */ static int sonic_init(struct net_device *dev, bool may_sleep) { struct sonic_local *lp = netdev_priv(dev); int i; /* * put the Sonic into software-reset mode and * disable all interrupts */ SONIC_WRITE(SONIC_IMR, 0); SONIC_WRITE(SONIC_ISR, 0x7fff); SONIC_WRITE(SONIC_CMD, SONIC_CR_RST); /* While in reset mode, clear CAM Enable register */ SONIC_WRITE(SONIC_CE, 0); /* * clear software reset flag, disable receiver, clear and * enable interrupts, then completely initialize the SONIC */ SONIC_WRITE(SONIC_CMD, 0); SONIC_WRITE(SONIC_CMD, SONIC_CR_RXDIS | SONIC_CR_STP); sonic_quiesce(dev, SONIC_CR_ALL, may_sleep); /* * initialize the receive resource area */ netif_dbg(lp, ifup, dev, "%s: initialize receive resource area\n", __func__); for (i = 0; i < SONIC_NUM_RRS; i++) { u16 bufadr_l = (unsigned long)lp->rx_laddr[i] & 0xffff; u16 bufadr_h = (unsigned long)lp->rx_laddr[i] >> 16; sonic_rra_put(dev, i, SONIC_RR_BUFADR_L, bufadr_l); sonic_rra_put(dev, i, SONIC_RR_BUFADR_H, bufadr_h); sonic_rra_put(dev, i, SONIC_RR_BUFSIZE_L, SONIC_RBSIZE >> 1); sonic_rra_put(dev, i, SONIC_RR_BUFSIZE_H, 0); } /* initialize all RRA registers */ SONIC_WRITE(SONIC_RSA, sonic_rr_addr(dev, 0)); SONIC_WRITE(SONIC_REA, sonic_rr_addr(dev, SONIC_NUM_RRS)); SONIC_WRITE(SONIC_RRP, sonic_rr_addr(dev, 0)); SONIC_WRITE(SONIC_RWP, sonic_rr_addr(dev, SONIC_NUM_RRS - 1)); SONIC_WRITE(SONIC_URRA, lp->rra_laddr >> 16); SONIC_WRITE(SONIC_EOBC, (SONIC_RBSIZE >> 1) - (lp->dma_bitmode ? 2 : 1)); /* load the resource pointers */ netif_dbg(lp, ifup, dev, "%s: issuing RRRA command\n", __func__); SONIC_WRITE(SONIC_CMD, SONIC_CR_RRRA); sonic_quiesce(dev, SONIC_CR_RRRA, may_sleep); /* * Initialize the receive descriptors so that they * become a circular linked list, ie. let the last * descriptor point to the first again. */ netif_dbg(lp, ifup, dev, "%s: initialize receive descriptors\n", __func__); for (i=0; i<SONIC_NUM_RDS; i++) { sonic_rda_put(dev, i, SONIC_RD_STATUS, 0); sonic_rda_put(dev, i, SONIC_RD_PKTLEN, 0); sonic_rda_put(dev, i, SONIC_RD_PKTPTR_L, 0); sonic_rda_put(dev, i, SONIC_RD_PKTPTR_H, 0); sonic_rda_put(dev, i, SONIC_RD_SEQNO, 0); sonic_rda_put(dev, i, SONIC_RD_IN_USE, 1); sonic_rda_put(dev, i, SONIC_RD_LINK, lp->rda_laddr + ((i+1) * SIZEOF_SONIC_RD * SONIC_BUS_SCALE(lp->dma_bitmode))); } /* fix last descriptor */ sonic_rda_put(dev, SONIC_NUM_RDS - 1, SONIC_RD_LINK, (lp->rda_laddr & 0xffff) | SONIC_EOL); lp->eol_rx = SONIC_NUM_RDS - 1; lp->cur_rx = 0; SONIC_WRITE(SONIC_URDA, lp->rda_laddr >> 16); SONIC_WRITE(SONIC_CRDA, lp->rda_laddr & 0xffff); /* * initialize transmit descriptors */ netif_dbg(lp, ifup, dev, "%s: initialize transmit descriptors\n", __func__); for (i = 0; i < SONIC_NUM_TDS; i++) { sonic_tda_put(dev, i, SONIC_TD_STATUS, 0); sonic_tda_put(dev, i, SONIC_TD_CONFIG, 0); sonic_tda_put(dev, i, SONIC_TD_PKTSIZE, 0); sonic_tda_put(dev, i, SONIC_TD_FRAG_COUNT, 0); sonic_tda_put(dev, i, SONIC_TD_LINK, (lp->tda_laddr & 0xffff) + (i + 1) * SIZEOF_SONIC_TD * SONIC_BUS_SCALE(lp->dma_bitmode)); lp->tx_skb[i] = NULL; } /* fix last descriptor */ sonic_tda_put(dev, SONIC_NUM_TDS - 1, SONIC_TD_LINK, (lp->tda_laddr & 0xffff)); SONIC_WRITE(SONIC_UTDA, lp->tda_laddr >> 16); SONIC_WRITE(SONIC_CTDA, lp->tda_laddr & 0xffff); lp->cur_tx = 0; lp->eol_tx = SONIC_NUM_TDS - 1; /* * put our own address to CAM desc[0] */ sonic_cda_put(dev, 0, SONIC_CD_CAP0, dev->dev_addr[1] << 8 | dev->dev_addr[0]); sonic_cda_put(dev, 0, SONIC_CD_CAP1, dev->dev_addr[3] << 8 | dev->dev_addr[2]); sonic_cda_put(dev, 0, SONIC_CD_CAP2, dev->dev_addr[5] << 8 | dev->dev_addr[4]); sonic_set_cam_enable(dev, 1); for (i = 0; i < 16; i++) sonic_cda_put(dev, i, SONIC_CD_ENTRY_POINTER, i); /* * initialize CAM registers */ SONIC_WRITE(SONIC_CDP, lp->cda_laddr & 0xffff); SONIC_WRITE(SONIC_CDC, 16); /* * load the CAM */ SONIC_WRITE(SONIC_CMD, SONIC_CR_LCAM); sonic_quiesce(dev, SONIC_CR_LCAM, may_sleep); /* * enable receiver, disable loopback * and enable all interrupts */ SONIC_WRITE(SONIC_RCR, SONIC_RCR_DEFAULT); SONIC_WRITE(SONIC_TCR, SONIC_TCR_DEFAULT); SONIC_WRITE(SONIC_ISR, 0x7fff); SONIC_WRITE(SONIC_IMR, SONIC_IMR_DEFAULT); SONIC_WRITE(SONIC_CMD, SONIC_CR_RXEN); netif_dbg(lp, ifup, dev, "%s: new status=%x\n", __func__, SONIC_READ(SONIC_CMD)); return 0; } MODULE_LICENSE("GPL");
linux-master
drivers/net/ethernet/natsemi/sonic.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * drivers/net/ethernet/nxp/lpc_eth.c * * Author: Kevin Wells <[email protected]> * * Copyright (C) 2010 NXP Semiconductors * Copyright (C) 2012 Roland Stigge <[email protected]> */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/clk.h> #include <linux/crc32.h> #include <linux/etherdevice.h> #include <linux/module.h> #include <linux/of.h> #include <linux/of_mdio.h> #include <linux/of_net.h> #include <linux/phy.h> #include <linux/platform_device.h> #include <linux/spinlock.h> #include <linux/soc/nxp/lpc32xx-misc.h> #define MODNAME "lpc-eth" #define DRV_VERSION "1.00" #define ENET_MAXF_SIZE 1536 #define ENET_RX_DESC 48 #define ENET_TX_DESC 16 #define NAPI_WEIGHT 16 /* * Ethernet MAC controller Register offsets */ #define LPC_ENET_MAC1(x) (x + 0x000) #define LPC_ENET_MAC2(x) (x + 0x004) #define LPC_ENET_IPGT(x) (x + 0x008) #define LPC_ENET_IPGR(x) (x + 0x00C) #define LPC_ENET_CLRT(x) (x + 0x010) #define LPC_ENET_MAXF(x) (x + 0x014) #define LPC_ENET_SUPP(x) (x + 0x018) #define LPC_ENET_TEST(x) (x + 0x01C) #define LPC_ENET_MCFG(x) (x + 0x020) #define LPC_ENET_MCMD(x) (x + 0x024) #define LPC_ENET_MADR(x) (x + 0x028) #define LPC_ENET_MWTD(x) (x + 0x02C) #define LPC_ENET_MRDD(x) (x + 0x030) #define LPC_ENET_MIND(x) (x + 0x034) #define LPC_ENET_SA0(x) (x + 0x040) #define LPC_ENET_SA1(x) (x + 0x044) #define LPC_ENET_SA2(x) (x + 0x048) #define LPC_ENET_COMMAND(x) (x + 0x100) #define LPC_ENET_STATUS(x) (x + 0x104) #define LPC_ENET_RXDESCRIPTOR(x) (x + 0x108) #define LPC_ENET_RXSTATUS(x) (x + 0x10C) #define LPC_ENET_RXDESCRIPTORNUMBER(x) (x + 0x110) #define LPC_ENET_RXPRODUCEINDEX(x) (x + 0x114) #define LPC_ENET_RXCONSUMEINDEX(x) (x + 0x118) #define LPC_ENET_TXDESCRIPTOR(x) (x + 0x11C) #define LPC_ENET_TXSTATUS(x) (x + 0x120) #define LPC_ENET_TXDESCRIPTORNUMBER(x) (x + 0x124) #define LPC_ENET_TXPRODUCEINDEX(x) (x + 0x128) #define LPC_ENET_TXCONSUMEINDEX(x) (x + 0x12C) #define LPC_ENET_TSV0(x) (x + 0x158) #define LPC_ENET_TSV1(x) (x + 0x15C) #define LPC_ENET_RSV(x) (x + 0x160) #define LPC_ENET_FLOWCONTROLCOUNTER(x) (x + 0x170) #define LPC_ENET_FLOWCONTROLSTATUS(x) (x + 0x174) #define LPC_ENET_RXFILTER_CTRL(x) (x + 0x200) #define LPC_ENET_RXFILTERWOLSTATUS(x) (x + 0x204) #define LPC_ENET_RXFILTERWOLCLEAR(x) (x + 0x208) #define LPC_ENET_HASHFILTERL(x) (x + 0x210) #define LPC_ENET_HASHFILTERH(x) (x + 0x214) #define LPC_ENET_INTSTATUS(x) (x + 0xFE0) #define LPC_ENET_INTENABLE(x) (x + 0xFE4) #define LPC_ENET_INTCLEAR(x) (x + 0xFE8) #define LPC_ENET_INTSET(x) (x + 0xFEC) #define LPC_ENET_POWERDOWN(x) (x + 0xFF4) /* * mac1 register definitions */ #define LPC_MAC1_RECV_ENABLE (1 << 0) #define LPC_MAC1_PASS_ALL_RX_FRAMES (1 << 1) #define LPC_MAC1_RX_FLOW_CONTROL (1 << 2) #define LPC_MAC1_TX_FLOW_CONTROL (1 << 3) #define LPC_MAC1_LOOPBACK (1 << 4) #define LPC_MAC1_RESET_TX (1 << 8) #define LPC_MAC1_RESET_MCS_TX (1 << 9) #define LPC_MAC1_RESET_RX (1 << 10) #define LPC_MAC1_RESET_MCS_RX (1 << 11) #define LPC_MAC1_SIMULATION_RESET (1 << 14) #define LPC_MAC1_SOFT_RESET (1 << 15) /* * mac2 register definitions */ #define LPC_MAC2_FULL_DUPLEX (1 << 0) #define LPC_MAC2_FRAME_LENGTH_CHECKING (1 << 1) #define LPC_MAC2_HUGH_LENGTH_CHECKING (1 << 2) #define LPC_MAC2_DELAYED_CRC (1 << 3) #define LPC_MAC2_CRC_ENABLE (1 << 4) #define LPC_MAC2_PAD_CRC_ENABLE (1 << 5) #define LPC_MAC2_VLAN_PAD_ENABLE (1 << 6) #define LPC_MAC2_AUTO_DETECT_PAD_ENABLE (1 << 7) #define LPC_MAC2_PURE_PREAMBLE_ENFORCEMENT (1 << 8) #define LPC_MAC2_LONG_PREAMBLE_ENFORCEMENT (1 << 9) #define LPC_MAC2_NO_BACKOFF (1 << 12) #define LPC_MAC2_BACK_PRESSURE (1 << 13) #define LPC_MAC2_EXCESS_DEFER (1 << 14) /* * ipgt register definitions */ #define LPC_IPGT_LOAD(n) ((n) & 0x7F) /* * ipgr register definitions */ #define LPC_IPGR_LOAD_PART2(n) ((n) & 0x7F) #define LPC_IPGR_LOAD_PART1(n) (((n) & 0x7F) << 8) /* * clrt register definitions */ #define LPC_CLRT_LOAD_RETRY_MAX(n) ((n) & 0xF) #define LPC_CLRT_LOAD_COLLISION_WINDOW(n) (((n) & 0x3F) << 8) /* * maxf register definitions */ #define LPC_MAXF_LOAD_MAX_FRAME_LEN(n) ((n) & 0xFFFF) /* * supp register definitions */ #define LPC_SUPP_SPEED (1 << 8) #define LPC_SUPP_RESET_RMII (1 << 11) /* * test register definitions */ #define LPC_TEST_SHORTCUT_PAUSE_QUANTA (1 << 0) #define LPC_TEST_PAUSE (1 << 1) #define LPC_TEST_BACKPRESSURE (1 << 2) /* * mcfg register definitions */ #define LPC_MCFG_SCAN_INCREMENT (1 << 0) #define LPC_MCFG_SUPPRESS_PREAMBLE (1 << 1) #define LPC_MCFG_CLOCK_SELECT(n) (((n) & 0x7) << 2) #define LPC_MCFG_CLOCK_HOST_DIV_4 0 #define LPC_MCFG_CLOCK_HOST_DIV_6 2 #define LPC_MCFG_CLOCK_HOST_DIV_8 3 #define LPC_MCFG_CLOCK_HOST_DIV_10 4 #define LPC_MCFG_CLOCK_HOST_DIV_14 5 #define LPC_MCFG_CLOCK_HOST_DIV_20 6 #define LPC_MCFG_CLOCK_HOST_DIV_28 7 #define LPC_MCFG_RESET_MII_MGMT (1 << 15) /* * mcmd register definitions */ #define LPC_MCMD_READ (1 << 0) #define LPC_MCMD_SCAN (1 << 1) /* * madr register definitions */ #define LPC_MADR_REGISTER_ADDRESS(n) ((n) & 0x1F) #define LPC_MADR_PHY_0ADDRESS(n) (((n) & 0x1F) << 8) /* * mwtd register definitions */ #define LPC_MWDT_WRITE(n) ((n) & 0xFFFF) /* * mrdd register definitions */ #define LPC_MRDD_READ_MASK 0xFFFF /* * mind register definitions */ #define LPC_MIND_BUSY (1 << 0) #define LPC_MIND_SCANNING (1 << 1) #define LPC_MIND_NOT_VALID (1 << 2) #define LPC_MIND_MII_LINK_FAIL (1 << 3) /* * command register definitions */ #define LPC_COMMAND_RXENABLE (1 << 0) #define LPC_COMMAND_TXENABLE (1 << 1) #define LPC_COMMAND_REG_RESET (1 << 3) #define LPC_COMMAND_TXRESET (1 << 4) #define LPC_COMMAND_RXRESET (1 << 5) #define LPC_COMMAND_PASSRUNTFRAME (1 << 6) #define LPC_COMMAND_PASSRXFILTER (1 << 7) #define LPC_COMMAND_TXFLOWCONTROL (1 << 8) #define LPC_COMMAND_RMII (1 << 9) #define LPC_COMMAND_FULLDUPLEX (1 << 10) /* * status register definitions */ #define LPC_STATUS_RXACTIVE (1 << 0) #define LPC_STATUS_TXACTIVE (1 << 1) /* * tsv0 register definitions */ #define LPC_TSV0_CRC_ERROR (1 << 0) #define LPC_TSV0_LENGTH_CHECK_ERROR (1 << 1) #define LPC_TSV0_LENGTH_OUT_OF_RANGE (1 << 2) #define LPC_TSV0_DONE (1 << 3) #define LPC_TSV0_MULTICAST (1 << 4) #define LPC_TSV0_BROADCAST (1 << 5) #define LPC_TSV0_PACKET_DEFER (1 << 6) #define LPC_TSV0_ESCESSIVE_DEFER (1 << 7) #define LPC_TSV0_ESCESSIVE_COLLISION (1 << 8) #define LPC_TSV0_LATE_COLLISION (1 << 9) #define LPC_TSV0_GIANT (1 << 10) #define LPC_TSV0_UNDERRUN (1 << 11) #define LPC_TSV0_TOTAL_BYTES(n) (((n) >> 12) & 0xFFFF) #define LPC_TSV0_CONTROL_FRAME (1 << 28) #define LPC_TSV0_PAUSE (1 << 29) #define LPC_TSV0_BACKPRESSURE (1 << 30) #define LPC_TSV0_VLAN (1 << 31) /* * tsv1 register definitions */ #define LPC_TSV1_TRANSMIT_BYTE_COUNT(n) ((n) & 0xFFFF) #define LPC_TSV1_COLLISION_COUNT(n) (((n) >> 16) & 0xF) /* * rsv register definitions */ #define LPC_RSV_RECEIVED_BYTE_COUNT(n) ((n) & 0xFFFF) #define LPC_RSV_RXDV_EVENT_IGNORED (1 << 16) #define LPC_RSV_RXDV_EVENT_PREVIOUSLY_SEEN (1 << 17) #define LPC_RSV_CARRIER_EVNT_PREVIOUS_SEEN (1 << 18) #define LPC_RSV_RECEIVE_CODE_VIOLATION (1 << 19) #define LPC_RSV_CRC_ERROR (1 << 20) #define LPC_RSV_LENGTH_CHECK_ERROR (1 << 21) #define LPC_RSV_LENGTH_OUT_OF_RANGE (1 << 22) #define LPC_RSV_RECEIVE_OK (1 << 23) #define LPC_RSV_MULTICAST (1 << 24) #define LPC_RSV_BROADCAST (1 << 25) #define LPC_RSV_DRIBBLE_NIBBLE (1 << 26) #define LPC_RSV_CONTROL_FRAME (1 << 27) #define LPC_RSV_PAUSE (1 << 28) #define LPC_RSV_UNSUPPORTED_OPCODE (1 << 29) #define LPC_RSV_VLAN (1 << 30) /* * flowcontrolcounter register definitions */ #define LPC_FCCR_MIRRORCOUNTER(n) ((n) & 0xFFFF) #define LPC_FCCR_PAUSETIMER(n) (((n) >> 16) & 0xFFFF) /* * flowcontrolstatus register definitions */ #define LPC_FCCR_MIRRORCOUNTERCURRENT(n) ((n) & 0xFFFF) /* * rxfilterctrl, rxfilterwolstatus, and rxfilterwolclear shared * register definitions */ #define LPC_RXFLTRW_ACCEPTUNICAST (1 << 0) #define LPC_RXFLTRW_ACCEPTUBROADCAST (1 << 1) #define LPC_RXFLTRW_ACCEPTUMULTICAST (1 << 2) #define LPC_RXFLTRW_ACCEPTUNICASTHASH (1 << 3) #define LPC_RXFLTRW_ACCEPTUMULTICASTHASH (1 << 4) #define LPC_RXFLTRW_ACCEPTPERFECT (1 << 5) /* * rxfilterctrl register definitions */ #define LPC_RXFLTRWSTS_MAGICPACKETENWOL (1 << 12) #define LPC_RXFLTRWSTS_RXFILTERENWOL (1 << 13) /* * rxfilterwolstatus/rxfilterwolclear register definitions */ #define LPC_RXFLTRWSTS_RXFILTERWOL (1 << 7) #define LPC_RXFLTRWSTS_MAGICPACKETWOL (1 << 8) /* * intstatus, intenable, intclear, and Intset shared register * definitions */ #define LPC_MACINT_RXOVERRUNINTEN (1 << 0) #define LPC_MACINT_RXERRORONINT (1 << 1) #define LPC_MACINT_RXFINISHEDINTEN (1 << 2) #define LPC_MACINT_RXDONEINTEN (1 << 3) #define LPC_MACINT_TXUNDERRUNINTEN (1 << 4) #define LPC_MACINT_TXERRORINTEN (1 << 5) #define LPC_MACINT_TXFINISHEDINTEN (1 << 6) #define LPC_MACINT_TXDONEINTEN (1 << 7) #define LPC_MACINT_SOFTINTEN (1 << 12) #define LPC_MACINT_WAKEUPINTEN (1 << 13) /* * powerdown register definitions */ #define LPC_POWERDOWN_MACAHB (1 << 31) static phy_interface_t lpc_phy_interface_mode(struct device *dev) { if (dev && dev->of_node) { const char *mode = of_get_property(dev->of_node, "phy-mode", NULL); if (mode && !strcmp(mode, "mii")) return PHY_INTERFACE_MODE_MII; } return PHY_INTERFACE_MODE_RMII; } static bool use_iram_for_net(struct device *dev) { if (dev && dev->of_node) return of_property_read_bool(dev->of_node, "use-iram"); return false; } /* Receive Status information word */ #define RXSTATUS_SIZE 0x000007FF #define RXSTATUS_CONTROL (1 << 18) #define RXSTATUS_VLAN (1 << 19) #define RXSTATUS_FILTER (1 << 20) #define RXSTATUS_MULTICAST (1 << 21) #define RXSTATUS_BROADCAST (1 << 22) #define RXSTATUS_CRC (1 << 23) #define RXSTATUS_SYMBOL (1 << 24) #define RXSTATUS_LENGTH (1 << 25) #define RXSTATUS_RANGE (1 << 26) #define RXSTATUS_ALIGN (1 << 27) #define RXSTATUS_OVERRUN (1 << 28) #define RXSTATUS_NODESC (1 << 29) #define RXSTATUS_LAST (1 << 30) #define RXSTATUS_ERROR (1 << 31) #define RXSTATUS_STATUS_ERROR \ (RXSTATUS_NODESC | RXSTATUS_OVERRUN | RXSTATUS_ALIGN | \ RXSTATUS_RANGE | RXSTATUS_LENGTH | RXSTATUS_SYMBOL | RXSTATUS_CRC) /* Receive Descriptor control word */ #define RXDESC_CONTROL_SIZE 0x000007FF #define RXDESC_CONTROL_INT (1 << 31) /* Transmit Status information word */ #define TXSTATUS_COLLISIONS_GET(x) (((x) >> 21) & 0xF) #define TXSTATUS_DEFER (1 << 25) #define TXSTATUS_EXCESSDEFER (1 << 26) #define TXSTATUS_EXCESSCOLL (1 << 27) #define TXSTATUS_LATECOLL (1 << 28) #define TXSTATUS_UNDERRUN (1 << 29) #define TXSTATUS_NODESC (1 << 30) #define TXSTATUS_ERROR (1 << 31) /* Transmit Descriptor control word */ #define TXDESC_CONTROL_SIZE 0x000007FF #define TXDESC_CONTROL_OVERRIDE (1 << 26) #define TXDESC_CONTROL_HUGE (1 << 27) #define TXDESC_CONTROL_PAD (1 << 28) #define TXDESC_CONTROL_CRC (1 << 29) #define TXDESC_CONTROL_LAST (1 << 30) #define TXDESC_CONTROL_INT (1 << 31) /* * Structure of a TX/RX descriptors and RX status */ struct txrx_desc_t { __le32 packet; __le32 control; }; struct rx_status_t { __le32 statusinfo; __le32 statushashcrc; }; /* * Device driver data structure */ struct netdata_local { struct platform_device *pdev; struct net_device *ndev; struct device_node *phy_node; spinlock_t lock; void __iomem *net_base; u32 msg_enable; unsigned int skblen[ENET_TX_DESC]; unsigned int last_tx_idx; unsigned int num_used_tx_buffs; struct mii_bus *mii_bus; struct clk *clk; dma_addr_t dma_buff_base_p; void *dma_buff_base_v; size_t dma_buff_size; struct txrx_desc_t *tx_desc_v; u32 *tx_stat_v; void *tx_buff_v; struct txrx_desc_t *rx_desc_v; struct rx_status_t *rx_stat_v; void *rx_buff_v; int link; int speed; int duplex; struct napi_struct napi; }; /* * MAC support functions */ static void __lpc_set_mac(struct netdata_local *pldat, const u8 *mac) { u32 tmp; /* Set station address */ tmp = mac[0] | ((u32)mac[1] << 8); writel(tmp, LPC_ENET_SA2(pldat->net_base)); tmp = mac[2] | ((u32)mac[3] << 8); writel(tmp, LPC_ENET_SA1(pldat->net_base)); tmp = mac[4] | ((u32)mac[5] << 8); writel(tmp, LPC_ENET_SA0(pldat->net_base)); netdev_dbg(pldat->ndev, "Ethernet MAC address %pM\n", mac); } static void __lpc_get_mac(struct netdata_local *pldat, u8 *mac) { u32 tmp; /* Get station address */ tmp = readl(LPC_ENET_SA2(pldat->net_base)); mac[0] = tmp & 0xFF; mac[1] = tmp >> 8; tmp = readl(LPC_ENET_SA1(pldat->net_base)); mac[2] = tmp & 0xFF; mac[3] = tmp >> 8; tmp = readl(LPC_ENET_SA0(pldat->net_base)); mac[4] = tmp & 0xFF; mac[5] = tmp >> 8; } static void __lpc_params_setup(struct netdata_local *pldat) { u32 tmp; if (pldat->duplex == DUPLEX_FULL) { tmp = readl(LPC_ENET_MAC2(pldat->net_base)); tmp |= LPC_MAC2_FULL_DUPLEX; writel(tmp, LPC_ENET_MAC2(pldat->net_base)); tmp = readl(LPC_ENET_COMMAND(pldat->net_base)); tmp |= LPC_COMMAND_FULLDUPLEX; writel(tmp, LPC_ENET_COMMAND(pldat->net_base)); writel(LPC_IPGT_LOAD(0x15), LPC_ENET_IPGT(pldat->net_base)); } else { tmp = readl(LPC_ENET_MAC2(pldat->net_base)); tmp &= ~LPC_MAC2_FULL_DUPLEX; writel(tmp, LPC_ENET_MAC2(pldat->net_base)); tmp = readl(LPC_ENET_COMMAND(pldat->net_base)); tmp &= ~LPC_COMMAND_FULLDUPLEX; writel(tmp, LPC_ENET_COMMAND(pldat->net_base)); writel(LPC_IPGT_LOAD(0x12), LPC_ENET_IPGT(pldat->net_base)); } if (pldat->speed == SPEED_100) writel(LPC_SUPP_SPEED, LPC_ENET_SUPP(pldat->net_base)); else writel(0, LPC_ENET_SUPP(pldat->net_base)); } static void __lpc_eth_reset(struct netdata_local *pldat) { /* Reset all MAC logic */ writel((LPC_MAC1_RESET_TX | LPC_MAC1_RESET_MCS_TX | LPC_MAC1_RESET_RX | LPC_MAC1_RESET_MCS_RX | LPC_MAC1_SIMULATION_RESET | LPC_MAC1_SOFT_RESET), LPC_ENET_MAC1(pldat->net_base)); writel((LPC_COMMAND_REG_RESET | LPC_COMMAND_TXRESET | LPC_COMMAND_RXRESET), LPC_ENET_COMMAND(pldat->net_base)); } static int __lpc_mii_mngt_reset(struct netdata_local *pldat) { /* Reset MII management hardware */ writel(LPC_MCFG_RESET_MII_MGMT, LPC_ENET_MCFG(pldat->net_base)); /* Setup MII clock to slowest rate with a /28 divider */ writel(LPC_MCFG_CLOCK_SELECT(LPC_MCFG_CLOCK_HOST_DIV_28), LPC_ENET_MCFG(pldat->net_base)); return 0; } static inline phys_addr_t __va_to_pa(void *addr, struct netdata_local *pldat) { phys_addr_t phaddr; phaddr = addr - pldat->dma_buff_base_v; phaddr += pldat->dma_buff_base_p; return phaddr; } static void lpc_eth_enable_int(void __iomem *regbase) { writel((LPC_MACINT_RXDONEINTEN | LPC_MACINT_TXDONEINTEN), LPC_ENET_INTENABLE(regbase)); } static void lpc_eth_disable_int(void __iomem *regbase) { writel(0, LPC_ENET_INTENABLE(regbase)); } /* Setup TX/RX descriptors */ static void __lpc_txrx_desc_setup(struct netdata_local *pldat) { u32 *ptxstat; void *tbuff; int i; struct txrx_desc_t *ptxrxdesc; struct rx_status_t *prxstat; tbuff = PTR_ALIGN(pldat->dma_buff_base_v, 16); /* Setup TX descriptors, status, and buffers */ pldat->tx_desc_v = tbuff; tbuff += sizeof(struct txrx_desc_t) * ENET_TX_DESC; pldat->tx_stat_v = tbuff; tbuff += sizeof(u32) * ENET_TX_DESC; tbuff = PTR_ALIGN(tbuff, 16); pldat->tx_buff_v = tbuff; tbuff += ENET_MAXF_SIZE * ENET_TX_DESC; /* Setup RX descriptors, status, and buffers */ pldat->rx_desc_v = tbuff; tbuff += sizeof(struct txrx_desc_t) * ENET_RX_DESC; tbuff = PTR_ALIGN(tbuff, 16); pldat->rx_stat_v = tbuff; tbuff += sizeof(struct rx_status_t) * ENET_RX_DESC; tbuff = PTR_ALIGN(tbuff, 16); pldat->rx_buff_v = tbuff; tbuff += ENET_MAXF_SIZE * ENET_RX_DESC; /* Map the TX descriptors to the TX buffers in hardware */ for (i = 0; i < ENET_TX_DESC; i++) { ptxstat = &pldat->tx_stat_v[i]; ptxrxdesc = &pldat->tx_desc_v[i]; ptxrxdesc->packet = __va_to_pa( pldat->tx_buff_v + i * ENET_MAXF_SIZE, pldat); ptxrxdesc->control = 0; *ptxstat = 0; } /* Map the RX descriptors to the RX buffers in hardware */ for (i = 0; i < ENET_RX_DESC; i++) { prxstat = &pldat->rx_stat_v[i]; ptxrxdesc = &pldat->rx_desc_v[i]; ptxrxdesc->packet = __va_to_pa( pldat->rx_buff_v + i * ENET_MAXF_SIZE, pldat); ptxrxdesc->control = RXDESC_CONTROL_INT | (ENET_MAXF_SIZE - 1); prxstat->statusinfo = 0; prxstat->statushashcrc = 0; } /* Setup base addresses in hardware to point to buffers and * descriptors */ writel((ENET_TX_DESC - 1), LPC_ENET_TXDESCRIPTORNUMBER(pldat->net_base)); writel(__va_to_pa(pldat->tx_desc_v, pldat), LPC_ENET_TXDESCRIPTOR(pldat->net_base)); writel(__va_to_pa(pldat->tx_stat_v, pldat), LPC_ENET_TXSTATUS(pldat->net_base)); writel((ENET_RX_DESC - 1), LPC_ENET_RXDESCRIPTORNUMBER(pldat->net_base)); writel(__va_to_pa(pldat->rx_desc_v, pldat), LPC_ENET_RXDESCRIPTOR(pldat->net_base)); writel(__va_to_pa(pldat->rx_stat_v, pldat), LPC_ENET_RXSTATUS(pldat->net_base)); } static void __lpc_eth_init(struct netdata_local *pldat) { u32 tmp; /* Disable controller and reset */ tmp = readl(LPC_ENET_COMMAND(pldat->net_base)); tmp &= ~LPC_COMMAND_RXENABLE | LPC_COMMAND_TXENABLE; writel(tmp, LPC_ENET_COMMAND(pldat->net_base)); tmp = readl(LPC_ENET_MAC1(pldat->net_base)); tmp &= ~LPC_MAC1_RECV_ENABLE; writel(tmp, LPC_ENET_MAC1(pldat->net_base)); /* Initial MAC setup */ writel(LPC_MAC1_PASS_ALL_RX_FRAMES, LPC_ENET_MAC1(pldat->net_base)); writel((LPC_MAC2_PAD_CRC_ENABLE | LPC_MAC2_CRC_ENABLE), LPC_ENET_MAC2(pldat->net_base)); writel(ENET_MAXF_SIZE, LPC_ENET_MAXF(pldat->net_base)); /* Collision window, gap */ writel((LPC_CLRT_LOAD_RETRY_MAX(0xF) | LPC_CLRT_LOAD_COLLISION_WINDOW(0x37)), LPC_ENET_CLRT(pldat->net_base)); writel(LPC_IPGR_LOAD_PART2(0x12), LPC_ENET_IPGR(pldat->net_base)); if (lpc_phy_interface_mode(&pldat->pdev->dev) == PHY_INTERFACE_MODE_MII) writel(LPC_COMMAND_PASSRUNTFRAME, LPC_ENET_COMMAND(pldat->net_base)); else { writel((LPC_COMMAND_PASSRUNTFRAME | LPC_COMMAND_RMII), LPC_ENET_COMMAND(pldat->net_base)); writel(LPC_SUPP_RESET_RMII, LPC_ENET_SUPP(pldat->net_base)); } __lpc_params_setup(pldat); /* Setup TX and RX descriptors */ __lpc_txrx_desc_setup(pldat); /* Setup packet filtering */ writel((LPC_RXFLTRW_ACCEPTUBROADCAST | LPC_RXFLTRW_ACCEPTPERFECT), LPC_ENET_RXFILTER_CTRL(pldat->net_base)); /* Get the next TX buffer output index */ pldat->num_used_tx_buffs = 0; pldat->last_tx_idx = readl(LPC_ENET_TXCONSUMEINDEX(pldat->net_base)); /* Clear and enable interrupts */ writel(0xFFFF, LPC_ENET_INTCLEAR(pldat->net_base)); smp_wmb(); lpc_eth_enable_int(pldat->net_base); /* Enable controller */ tmp = readl(LPC_ENET_COMMAND(pldat->net_base)); tmp |= LPC_COMMAND_RXENABLE | LPC_COMMAND_TXENABLE; writel(tmp, LPC_ENET_COMMAND(pldat->net_base)); tmp = readl(LPC_ENET_MAC1(pldat->net_base)); tmp |= LPC_MAC1_RECV_ENABLE; writel(tmp, LPC_ENET_MAC1(pldat->net_base)); } static void __lpc_eth_shutdown(struct netdata_local *pldat) { /* Reset ethernet and power down PHY */ __lpc_eth_reset(pldat); writel(0, LPC_ENET_MAC1(pldat->net_base)); writel(0, LPC_ENET_MAC2(pldat->net_base)); } /* * MAC<--->PHY support functions */ static int lpc_mdio_read(struct mii_bus *bus, int phy_id, int phyreg) { struct netdata_local *pldat = bus->priv; unsigned long timeout = jiffies + msecs_to_jiffies(100); int lps; writel(((phy_id << 8) | phyreg), LPC_ENET_MADR(pldat->net_base)); writel(LPC_MCMD_READ, LPC_ENET_MCMD(pldat->net_base)); /* Wait for unbusy status */ while (readl(LPC_ENET_MIND(pldat->net_base)) & LPC_MIND_BUSY) { if (time_after(jiffies, timeout)) return -EIO; cpu_relax(); } lps = readl(LPC_ENET_MRDD(pldat->net_base)); writel(0, LPC_ENET_MCMD(pldat->net_base)); return lps; } static int lpc_mdio_write(struct mii_bus *bus, int phy_id, int phyreg, u16 phydata) { struct netdata_local *pldat = bus->priv; unsigned long timeout = jiffies + msecs_to_jiffies(100); writel(((phy_id << 8) | phyreg), LPC_ENET_MADR(pldat->net_base)); writel(phydata, LPC_ENET_MWTD(pldat->net_base)); /* Wait for completion */ while (readl(LPC_ENET_MIND(pldat->net_base)) & LPC_MIND_BUSY) { if (time_after(jiffies, timeout)) return -EIO; cpu_relax(); } return 0; } static int lpc_mdio_reset(struct mii_bus *bus) { return __lpc_mii_mngt_reset((struct netdata_local *)bus->priv); } static void lpc_handle_link_change(struct net_device *ndev) { struct netdata_local *pldat = netdev_priv(ndev); struct phy_device *phydev = ndev->phydev; unsigned long flags; bool status_change = false; spin_lock_irqsave(&pldat->lock, flags); if (phydev->link) { if ((pldat->speed != phydev->speed) || (pldat->duplex != phydev->duplex)) { pldat->speed = phydev->speed; pldat->duplex = phydev->duplex; status_change = true; } } if (phydev->link != pldat->link) { if (!phydev->link) { pldat->speed = 0; pldat->duplex = -1; } pldat->link = phydev->link; status_change = true; } spin_unlock_irqrestore(&pldat->lock, flags); if (status_change) __lpc_params_setup(pldat); } static int lpc_mii_probe(struct net_device *ndev) { struct netdata_local *pldat = netdev_priv(ndev); struct phy_device *phydev; /* Attach to the PHY */ if (lpc_phy_interface_mode(&pldat->pdev->dev) == PHY_INTERFACE_MODE_MII) netdev_info(ndev, "using MII interface\n"); else netdev_info(ndev, "using RMII interface\n"); if (pldat->phy_node) phydev = of_phy_find_device(pldat->phy_node); else phydev = phy_find_first(pldat->mii_bus); if (!phydev) { netdev_err(ndev, "no PHY found\n"); return -ENODEV; } phydev = phy_connect(ndev, phydev_name(phydev), &lpc_handle_link_change, lpc_phy_interface_mode(&pldat->pdev->dev)); if (IS_ERR(phydev)) { netdev_err(ndev, "Could not attach to PHY\n"); return PTR_ERR(phydev); } phy_set_max_speed(phydev, SPEED_100); pldat->link = 0; pldat->speed = 0; pldat->duplex = -1; phy_attached_info(phydev); return 0; } static int lpc_mii_init(struct netdata_local *pldat) { struct device_node *node; int err = -ENXIO; pldat->mii_bus = mdiobus_alloc(); if (!pldat->mii_bus) { err = -ENOMEM; goto err_out; } /* Setup MII mode */ if (lpc_phy_interface_mode(&pldat->pdev->dev) == PHY_INTERFACE_MODE_MII) writel(LPC_COMMAND_PASSRUNTFRAME, LPC_ENET_COMMAND(pldat->net_base)); else { writel((LPC_COMMAND_PASSRUNTFRAME | LPC_COMMAND_RMII), LPC_ENET_COMMAND(pldat->net_base)); writel(LPC_SUPP_RESET_RMII, LPC_ENET_SUPP(pldat->net_base)); } pldat->mii_bus->name = "lpc_mii_bus"; pldat->mii_bus->read = &lpc_mdio_read; pldat->mii_bus->write = &lpc_mdio_write; pldat->mii_bus->reset = &lpc_mdio_reset; snprintf(pldat->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x", pldat->pdev->name, pldat->pdev->id); pldat->mii_bus->priv = pldat; pldat->mii_bus->parent = &pldat->pdev->dev; node = of_get_child_by_name(pldat->pdev->dev.of_node, "mdio"); err = of_mdiobus_register(pldat->mii_bus, node); of_node_put(node); if (err) goto err_out_unregister_bus; err = lpc_mii_probe(pldat->ndev); if (err) goto err_out_unregister_bus; return 0; err_out_unregister_bus: mdiobus_unregister(pldat->mii_bus); mdiobus_free(pldat->mii_bus); err_out: return err; } static void __lpc_handle_xmit(struct net_device *ndev) { struct netdata_local *pldat = netdev_priv(ndev); u32 txcidx, *ptxstat, txstat; txcidx = readl(LPC_ENET_TXCONSUMEINDEX(pldat->net_base)); while (pldat->last_tx_idx != txcidx) { unsigned int skblen = pldat->skblen[pldat->last_tx_idx]; /* A buffer is available, get buffer status */ ptxstat = &pldat->tx_stat_v[pldat->last_tx_idx]; txstat = *ptxstat; /* Next buffer and decrement used buffer counter */ pldat->num_used_tx_buffs--; pldat->last_tx_idx++; if (pldat->last_tx_idx >= ENET_TX_DESC) pldat->last_tx_idx = 0; /* Update collision counter */ ndev->stats.collisions += TXSTATUS_COLLISIONS_GET(txstat); /* Any errors occurred? */ if (txstat & TXSTATUS_ERROR) { if (txstat & TXSTATUS_UNDERRUN) { /* FIFO underrun */ ndev->stats.tx_fifo_errors++; } if (txstat & TXSTATUS_LATECOLL) { /* Late collision */ ndev->stats.tx_aborted_errors++; } if (txstat & TXSTATUS_EXCESSCOLL) { /* Excessive collision */ ndev->stats.tx_aborted_errors++; } if (txstat & TXSTATUS_EXCESSDEFER) { /* Defer limit */ ndev->stats.tx_aborted_errors++; } ndev->stats.tx_errors++; } else { /* Update stats */ ndev->stats.tx_packets++; ndev->stats.tx_bytes += skblen; } txcidx = readl(LPC_ENET_TXCONSUMEINDEX(pldat->net_base)); } if (pldat->num_used_tx_buffs <= ENET_TX_DESC/2) { if (netif_queue_stopped(ndev)) netif_wake_queue(ndev); } } static int __lpc_handle_recv(struct net_device *ndev, int budget) { struct netdata_local *pldat = netdev_priv(ndev); struct sk_buff *skb; u32 rxconsidx, len, ethst; struct rx_status_t *prxstat; int rx_done = 0; /* Get the current RX buffer indexes */ rxconsidx = readl(LPC_ENET_RXCONSUMEINDEX(pldat->net_base)); while (rx_done < budget && rxconsidx != readl(LPC_ENET_RXPRODUCEINDEX(pldat->net_base))) { /* Get pointer to receive status */ prxstat = &pldat->rx_stat_v[rxconsidx]; len = (prxstat->statusinfo & RXSTATUS_SIZE) + 1; /* Status error? */ ethst = prxstat->statusinfo; if ((ethst & (RXSTATUS_ERROR | RXSTATUS_STATUS_ERROR)) == (RXSTATUS_ERROR | RXSTATUS_RANGE)) ethst &= ~RXSTATUS_ERROR; if (ethst & RXSTATUS_ERROR) { int si = prxstat->statusinfo; /* Check statuses */ if (si & RXSTATUS_OVERRUN) { /* Overrun error */ ndev->stats.rx_fifo_errors++; } else if (si & RXSTATUS_CRC) { /* CRC error */ ndev->stats.rx_crc_errors++; } else if (si & RXSTATUS_LENGTH) { /* Length error */ ndev->stats.rx_length_errors++; } else if (si & RXSTATUS_ERROR) { /* Other error */ ndev->stats.rx_length_errors++; } ndev->stats.rx_errors++; } else { /* Packet is good */ skb = dev_alloc_skb(len); if (!skb) { ndev->stats.rx_dropped++; } else { /* Copy packet from buffer */ skb_put_data(skb, pldat->rx_buff_v + rxconsidx * ENET_MAXF_SIZE, len); /* Pass to upper layer */ skb->protocol = eth_type_trans(skb, ndev); netif_receive_skb(skb); ndev->stats.rx_packets++; ndev->stats.rx_bytes += len; } } /* Increment consume index */ rxconsidx = rxconsidx + 1; if (rxconsidx >= ENET_RX_DESC) rxconsidx = 0; writel(rxconsidx, LPC_ENET_RXCONSUMEINDEX(pldat->net_base)); rx_done++; } return rx_done; } static int lpc_eth_poll(struct napi_struct *napi, int budget) { struct netdata_local *pldat = container_of(napi, struct netdata_local, napi); struct net_device *ndev = pldat->ndev; int rx_done = 0; struct netdev_queue *txq = netdev_get_tx_queue(ndev, 0); __netif_tx_lock(txq, smp_processor_id()); __lpc_handle_xmit(ndev); __netif_tx_unlock(txq); rx_done = __lpc_handle_recv(ndev, budget); if (rx_done < budget) { napi_complete_done(napi, rx_done); lpc_eth_enable_int(pldat->net_base); } return rx_done; } static irqreturn_t __lpc_eth_interrupt(int irq, void *dev_id) { struct net_device *ndev = dev_id; struct netdata_local *pldat = netdev_priv(ndev); u32 tmp; spin_lock(&pldat->lock); tmp = readl(LPC_ENET_INTSTATUS(pldat->net_base)); /* Clear interrupts */ writel(tmp, LPC_ENET_INTCLEAR(pldat->net_base)); lpc_eth_disable_int(pldat->net_base); if (likely(napi_schedule_prep(&pldat->napi))) __napi_schedule(&pldat->napi); spin_unlock(&pldat->lock); return IRQ_HANDLED; } static int lpc_eth_close(struct net_device *ndev) { unsigned long flags; struct netdata_local *pldat = netdev_priv(ndev); if (netif_msg_ifdown(pldat)) dev_dbg(&pldat->pdev->dev, "shutting down %s\n", ndev->name); napi_disable(&pldat->napi); netif_stop_queue(ndev); spin_lock_irqsave(&pldat->lock, flags); __lpc_eth_reset(pldat); netif_carrier_off(ndev); writel(0, LPC_ENET_MAC1(pldat->net_base)); writel(0, LPC_ENET_MAC2(pldat->net_base)); spin_unlock_irqrestore(&pldat->lock, flags); if (ndev->phydev) phy_stop(ndev->phydev); clk_disable_unprepare(pldat->clk); return 0; } static netdev_tx_t lpc_eth_hard_start_xmit(struct sk_buff *skb, struct net_device *ndev) { struct netdata_local *pldat = netdev_priv(ndev); u32 len, txidx; u32 *ptxstat; struct txrx_desc_t *ptxrxdesc; len = skb->len; spin_lock_irq(&pldat->lock); if (pldat->num_used_tx_buffs >= (ENET_TX_DESC - 1)) { /* This function should never be called when there are no * buffers */ netif_stop_queue(ndev); spin_unlock_irq(&pldat->lock); WARN(1, "BUG! TX request when no free TX buffers!\n"); return NETDEV_TX_BUSY; } /* Get the next TX descriptor index */ txidx = readl(LPC_ENET_TXPRODUCEINDEX(pldat->net_base)); /* Setup control for the transfer */ ptxstat = &pldat->tx_stat_v[txidx]; *ptxstat = 0; ptxrxdesc = &pldat->tx_desc_v[txidx]; ptxrxdesc->control = (len - 1) | TXDESC_CONTROL_LAST | TXDESC_CONTROL_INT; /* Copy data to the DMA buffer */ memcpy(pldat->tx_buff_v + txidx * ENET_MAXF_SIZE, skb->data, len); /* Save the buffer and increment the buffer counter */ pldat->skblen[txidx] = len; pldat->num_used_tx_buffs++; /* Start transmit */ txidx++; if (txidx >= ENET_TX_DESC) txidx = 0; writel(txidx, LPC_ENET_TXPRODUCEINDEX(pldat->net_base)); /* Stop queue if no more TX buffers */ if (pldat->num_used_tx_buffs >= (ENET_TX_DESC - 1)) netif_stop_queue(ndev); spin_unlock_irq(&pldat->lock); dev_kfree_skb(skb); return NETDEV_TX_OK; } static int lpc_set_mac_address(struct net_device *ndev, void *p) { struct sockaddr *addr = p; struct netdata_local *pldat = netdev_priv(ndev); unsigned long flags; if (!is_valid_ether_addr(addr->sa_data)) return -EADDRNOTAVAIL; eth_hw_addr_set(ndev, addr->sa_data); spin_lock_irqsave(&pldat->lock, flags); /* Set station address */ __lpc_set_mac(pldat, ndev->dev_addr); spin_unlock_irqrestore(&pldat->lock, flags); return 0; } static void lpc_eth_set_multicast_list(struct net_device *ndev) { struct netdata_local *pldat = netdev_priv(ndev); struct netdev_hw_addr_list *mcptr = &ndev->mc; struct netdev_hw_addr *ha; u32 tmp32, hash_val, hashlo, hashhi; unsigned long flags; spin_lock_irqsave(&pldat->lock, flags); /* Set station address */ __lpc_set_mac(pldat, ndev->dev_addr); tmp32 = LPC_RXFLTRW_ACCEPTUBROADCAST | LPC_RXFLTRW_ACCEPTPERFECT; if (ndev->flags & IFF_PROMISC) tmp32 |= LPC_RXFLTRW_ACCEPTUNICAST | LPC_RXFLTRW_ACCEPTUMULTICAST; if (ndev->flags & IFF_ALLMULTI) tmp32 |= LPC_RXFLTRW_ACCEPTUMULTICAST; if (netdev_hw_addr_list_count(mcptr)) tmp32 |= LPC_RXFLTRW_ACCEPTUMULTICASTHASH; writel(tmp32, LPC_ENET_RXFILTER_CTRL(pldat->net_base)); /* Set initial hash table */ hashlo = 0x0; hashhi = 0x0; /* 64 bits : multicast address in hash table */ netdev_hw_addr_list_for_each(ha, mcptr) { hash_val = (ether_crc(6, ha->addr) >> 23) & 0x3F; if (hash_val >= 32) hashhi |= 1 << (hash_val - 32); else hashlo |= 1 << hash_val; } writel(hashlo, LPC_ENET_HASHFILTERL(pldat->net_base)); writel(hashhi, LPC_ENET_HASHFILTERH(pldat->net_base)); spin_unlock_irqrestore(&pldat->lock, flags); } static int lpc_eth_open(struct net_device *ndev) { struct netdata_local *pldat = netdev_priv(ndev); int ret; if (netif_msg_ifup(pldat)) dev_dbg(&pldat->pdev->dev, "enabling %s\n", ndev->name); ret = clk_prepare_enable(pldat->clk); if (ret) return ret; /* Suspended PHY makes LPC ethernet core block, so resume now */ phy_resume(ndev->phydev); /* Reset and initialize */ __lpc_eth_reset(pldat); __lpc_eth_init(pldat); /* schedule a link state check */ phy_start(ndev->phydev); netif_start_queue(ndev); napi_enable(&pldat->napi); return 0; } /* * Ethtool ops */ static void lpc_eth_ethtool_getdrvinfo(struct net_device *ndev, struct ethtool_drvinfo *info) { strscpy(info->driver, MODNAME, sizeof(info->driver)); strscpy(info->version, DRV_VERSION, sizeof(info->version)); strscpy(info->bus_info, dev_name(ndev->dev.parent), sizeof(info->bus_info)); } static u32 lpc_eth_ethtool_getmsglevel(struct net_device *ndev) { struct netdata_local *pldat = netdev_priv(ndev); return pldat->msg_enable; } static void lpc_eth_ethtool_setmsglevel(struct net_device *ndev, u32 level) { struct netdata_local *pldat = netdev_priv(ndev); pldat->msg_enable = level; } static const struct ethtool_ops lpc_eth_ethtool_ops = { .get_drvinfo = lpc_eth_ethtool_getdrvinfo, .get_msglevel = lpc_eth_ethtool_getmsglevel, .set_msglevel = lpc_eth_ethtool_setmsglevel, .get_link = ethtool_op_get_link, .get_link_ksettings = phy_ethtool_get_link_ksettings, .set_link_ksettings = phy_ethtool_set_link_ksettings, }; static const struct net_device_ops lpc_netdev_ops = { .ndo_open = lpc_eth_open, .ndo_stop = lpc_eth_close, .ndo_start_xmit = lpc_eth_hard_start_xmit, .ndo_set_rx_mode = lpc_eth_set_multicast_list, .ndo_eth_ioctl = phy_do_ioctl_running, .ndo_set_mac_address = lpc_set_mac_address, .ndo_validate_addr = eth_validate_addr, }; static int lpc_eth_drv_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct device_node *np = dev->of_node; struct netdata_local *pldat; struct net_device *ndev; dma_addr_t dma_handle; struct resource *res; u8 addr[ETH_ALEN]; int irq, ret; /* Setup network interface for RMII or MII mode */ lpc32xx_set_phy_interface_mode(lpc_phy_interface_mode(dev)); /* Get platform resources */ res = platform_get_resource(pdev, IORESOURCE_MEM, 0); irq = platform_get_irq(pdev, 0); if (!res || irq < 0) { dev_err(dev, "error getting resources.\n"); ret = -ENXIO; goto err_exit; } /* Allocate net driver data structure */ ndev = alloc_etherdev(sizeof(struct netdata_local)); if (!ndev) { dev_err(dev, "could not allocate device.\n"); ret = -ENOMEM; goto err_exit; } SET_NETDEV_DEV(ndev, dev); pldat = netdev_priv(ndev); pldat->pdev = pdev; pldat->ndev = ndev; spin_lock_init(&pldat->lock); /* Save resources */ ndev->irq = irq; /* Get clock for the device */ pldat->clk = clk_get(dev, NULL); if (IS_ERR(pldat->clk)) { dev_err(dev, "error getting clock.\n"); ret = PTR_ERR(pldat->clk); goto err_out_free_dev; } /* Enable network clock */ ret = clk_prepare_enable(pldat->clk); if (ret) goto err_out_clk_put; /* Map IO space */ pldat->net_base = ioremap(res->start, resource_size(res)); if (!pldat->net_base) { dev_err(dev, "failed to map registers\n"); ret = -ENOMEM; goto err_out_disable_clocks; } ret = request_irq(ndev->irq, __lpc_eth_interrupt, 0, ndev->name, ndev); if (ret) { dev_err(dev, "error requesting interrupt.\n"); goto err_out_iounmap; } /* Setup driver functions */ ndev->netdev_ops = &lpc_netdev_ops; ndev->ethtool_ops = &lpc_eth_ethtool_ops; ndev->watchdog_timeo = msecs_to_jiffies(2500); /* Get size of DMA buffers/descriptors region */ pldat->dma_buff_size = (ENET_TX_DESC + ENET_RX_DESC) * (ENET_MAXF_SIZE + sizeof(struct txrx_desc_t) + sizeof(struct rx_status_t)); if (use_iram_for_net(dev)) { if (pldat->dma_buff_size > lpc32xx_return_iram(&pldat->dma_buff_base_v, &dma_handle)) { pldat->dma_buff_base_v = NULL; pldat->dma_buff_size = 0; netdev_err(ndev, "IRAM not big enough for net buffers, using SDRAM instead.\n"); } } if (pldat->dma_buff_base_v == NULL) { ret = dma_coerce_mask_and_coherent(dev, DMA_BIT_MASK(32)); if (ret) goto err_out_free_irq; pldat->dma_buff_size = PAGE_ALIGN(pldat->dma_buff_size); /* Allocate a chunk of memory for the DMA ethernet buffers * and descriptors */ pldat->dma_buff_base_v = dma_alloc_coherent(dev, pldat->dma_buff_size, &dma_handle, GFP_KERNEL); if (pldat->dma_buff_base_v == NULL) { ret = -ENOMEM; goto err_out_free_irq; } } pldat->dma_buff_base_p = dma_handle; netdev_dbg(ndev, "IO address space :%pR\n", res); netdev_dbg(ndev, "IO address size :%zd\n", (size_t)resource_size(res)); netdev_dbg(ndev, "IO address (mapped) :0x%p\n", pldat->net_base); netdev_dbg(ndev, "IRQ number :%d\n", ndev->irq); netdev_dbg(ndev, "DMA buffer size :%zd\n", pldat->dma_buff_size); netdev_dbg(ndev, "DMA buffer P address :%pad\n", &pldat->dma_buff_base_p); netdev_dbg(ndev, "DMA buffer V address :0x%p\n", pldat->dma_buff_base_v); pldat->phy_node = of_parse_phandle(np, "phy-handle", 0); /* Get MAC address from current HW setting (POR state is all zeros) */ __lpc_get_mac(pldat, addr); eth_hw_addr_set(ndev, addr); if (!is_valid_ether_addr(ndev->dev_addr)) { of_get_ethdev_address(np, ndev); } if (!is_valid_ether_addr(ndev->dev_addr)) eth_hw_addr_random(ndev); /* then shut everything down to save power */ __lpc_eth_shutdown(pldat); /* Set default parameters */ pldat->msg_enable = NETIF_MSG_LINK; /* Force an MII interface reset and clock setup */ __lpc_mii_mngt_reset(pldat); /* Force default PHY interface setup in chip, this will probably be * changed by the PHY driver */ pldat->link = 0; pldat->speed = 100; pldat->duplex = DUPLEX_FULL; __lpc_params_setup(pldat); netif_napi_add_weight(ndev, &pldat->napi, lpc_eth_poll, NAPI_WEIGHT); ret = register_netdev(ndev); if (ret) { dev_err(dev, "Cannot register net device, aborting.\n"); goto err_out_dma_unmap; } platform_set_drvdata(pdev, ndev); ret = lpc_mii_init(pldat); if (ret) goto err_out_unregister_netdev; netdev_info(ndev, "LPC mac at 0x%08lx irq %d\n", (unsigned long)res->start, ndev->irq); device_init_wakeup(dev, 1); device_set_wakeup_enable(dev, 0); return 0; err_out_unregister_netdev: unregister_netdev(ndev); err_out_dma_unmap: if (!use_iram_for_net(dev) || pldat->dma_buff_size > lpc32xx_return_iram(NULL, NULL)) dma_free_coherent(dev, pldat->dma_buff_size, pldat->dma_buff_base_v, pldat->dma_buff_base_p); err_out_free_irq: free_irq(ndev->irq, ndev); err_out_iounmap: iounmap(pldat->net_base); err_out_disable_clocks: clk_disable_unprepare(pldat->clk); err_out_clk_put: clk_put(pldat->clk); err_out_free_dev: free_netdev(ndev); err_exit: pr_err("%s: not found (%d).\n", MODNAME, ret); return ret; } static int lpc_eth_drv_remove(struct platform_device *pdev) { struct net_device *ndev = platform_get_drvdata(pdev); struct netdata_local *pldat = netdev_priv(ndev); unregister_netdev(ndev); if (!use_iram_for_net(&pldat->pdev->dev) || pldat->dma_buff_size > lpc32xx_return_iram(NULL, NULL)) dma_free_coherent(&pldat->pdev->dev, pldat->dma_buff_size, pldat->dma_buff_base_v, pldat->dma_buff_base_p); free_irq(ndev->irq, ndev); iounmap(pldat->net_base); mdiobus_unregister(pldat->mii_bus); mdiobus_free(pldat->mii_bus); clk_disable_unprepare(pldat->clk); clk_put(pldat->clk); free_netdev(ndev); return 0; } #ifdef CONFIG_PM static int lpc_eth_drv_suspend(struct platform_device *pdev, pm_message_t state) { struct net_device *ndev = platform_get_drvdata(pdev); struct netdata_local *pldat = netdev_priv(ndev); if (device_may_wakeup(&pdev->dev)) enable_irq_wake(ndev->irq); if (ndev) { if (netif_running(ndev)) { netif_device_detach(ndev); __lpc_eth_shutdown(pldat); clk_disable_unprepare(pldat->clk); /* * Reset again now clock is disable to be sure * EMC_MDC is down */ __lpc_eth_reset(pldat); } } return 0; } static int lpc_eth_drv_resume(struct platform_device *pdev) { struct net_device *ndev = platform_get_drvdata(pdev); struct netdata_local *pldat; int ret; if (device_may_wakeup(&pdev->dev)) disable_irq_wake(ndev->irq); if (ndev) { if (netif_running(ndev)) { pldat = netdev_priv(ndev); /* Enable interface clock */ ret = clk_enable(pldat->clk); if (ret) return ret; /* Reset and initialize */ __lpc_eth_reset(pldat); __lpc_eth_init(pldat); netif_device_attach(ndev); } } return 0; } #endif static const struct of_device_id lpc_eth_match[] = { { .compatible = "nxp,lpc-eth" }, { } }; MODULE_DEVICE_TABLE(of, lpc_eth_match); static struct platform_driver lpc_eth_driver = { .probe = lpc_eth_drv_probe, .remove = lpc_eth_drv_remove, #ifdef CONFIG_PM .suspend = lpc_eth_drv_suspend, .resume = lpc_eth_drv_resume, #endif .driver = { .name = MODNAME, .of_match_table = lpc_eth_match, }, }; module_platform_driver(lpc_eth_driver); MODULE_AUTHOR("Kevin Wells <[email protected]>"); MODULE_AUTHOR("Roland Stigge <[email protected]>"); MODULE_DESCRIPTION("LPC Ethernet Driver"); MODULE_LICENSE("GPL");
linux-master
drivers/net/ethernet/nxp/lpc_eth.c
/* sundance.c: A Linux device driver for the Sundance ST201 "Alta". */ /* Written 1999-2000 by Donald Becker. This software may be used and distributed according to the terms of the GNU General Public License (GPL), incorporated herein by reference. Drivers based on or derived from this code fall under the GPL and must retain the authorship, copyright and license notice. This file is not a complete program and may only be used when the entire operating system is licensed under the GPL. The author may be reached as [email protected], or C/O Scyld Computing Corporation 410 Severn Ave., Suite 210 Annapolis MD 21403 Support and updates available at http://www.scyld.com/network/sundance.html [link no longer provides useful info -jgarzik] Archives of the mailing list are still available at https://www.beowulf.org/pipermail/netdrivers/ */ #define DRV_NAME "sundance" /* The user-configurable values. These may be modified when a driver module is loaded.*/ static int debug = 1; /* 1 normal messages, 0 quiet .. 7 verbose. */ /* Maximum number of multicast addresses to filter (vs. rx-all-multicast). Typical is a 64 element hash table based on the Ethernet CRC. */ static const int multicast_filter_limit = 32; /* Set the copy breakpoint for the copy-only-tiny-frames scheme. Setting to > 1518 effectively disables this feature. This chip can receive into offset buffers, so the Alpha does not need a copy-align. */ static int rx_copybreak; static int flowctrl=1; /* media[] specifies the media type the NIC operates at. autosense Autosensing active media. 10mbps_hd 10Mbps half duplex. 10mbps_fd 10Mbps full duplex. 100mbps_hd 100Mbps half duplex. 100mbps_fd 100Mbps full duplex. 0 Autosensing active media. 1 10Mbps half duplex. 2 10Mbps full duplex. 3 100Mbps half duplex. 4 100Mbps full duplex. */ #define MAX_UNITS 8 static char *media[MAX_UNITS]; /* Operational parameters that are set at compile time. */ /* Keep the ring sizes a power of two for compile efficiency. The compiler will convert <unsigned>'%'<2^N> into a bit mask. Making the Tx ring too large decreases the effectiveness of channel bonding and packet priority, and more than 128 requires modifying the Tx error recovery. Large receive rings merely waste memory. */ #define TX_RING_SIZE 32 #define TX_QUEUE_LEN (TX_RING_SIZE - 1) /* Limit ring entries actually used. */ #define RX_RING_SIZE 64 #define RX_BUDGET 32 #define TX_TOTAL_SIZE TX_RING_SIZE*sizeof(struct netdev_desc) #define RX_TOTAL_SIZE RX_RING_SIZE*sizeof(struct netdev_desc) /* Operational parameters that usually are not changed. */ /* Time in jiffies before concluding the transmitter is hung. */ #define TX_TIMEOUT (4*HZ) #define PKT_BUF_SZ 1536 /* Size of each temporary Rx buffer.*/ /* Include files, designed to support most kernel versions 2.0.0 and later. */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/string.h> #include <linux/timer.h> #include <linux/errno.h> #include <linux/ioport.h> #include <linux/interrupt.h> #include <linux/pci.h> #include <linux/netdevice.h> #include <linux/etherdevice.h> #include <linux/skbuff.h> #include <linux/init.h> #include <linux/bitops.h> #include <linux/uaccess.h> #include <asm/processor.h> /* Processor type for cache alignment. */ #include <asm/io.h> #include <linux/delay.h> #include <linux/spinlock.h> #include <linux/dma-mapping.h> #include <linux/crc32.h> #include <linux/ethtool.h> #include <linux/mii.h> MODULE_AUTHOR("Donald Becker <[email protected]>"); MODULE_DESCRIPTION("Sundance Alta Ethernet driver"); MODULE_LICENSE("GPL"); module_param(debug, int, 0); module_param(rx_copybreak, int, 0); module_param_array(media, charp, NULL, 0); module_param(flowctrl, int, 0); MODULE_PARM_DESC(debug, "Sundance Alta debug level (0-5)"); MODULE_PARM_DESC(rx_copybreak, "Sundance Alta copy breakpoint for copy-only-tiny-frames"); MODULE_PARM_DESC(flowctrl, "Sundance Alta flow control [0|1]"); /* Theory of Operation I. Board Compatibility This driver is designed for the Sundance Technologies "Alta" ST201 chip. II. Board-specific settings III. Driver operation IIIa. Ring buffers This driver uses two statically allocated fixed-size descriptor lists formed into rings by a branch from the final descriptor to the beginning of the list. The ring sizes are set at compile time by RX/TX_RING_SIZE. Some chips explicitly use only 2^N sized rings, while others use a 'next descriptor' pointer that the driver forms into rings. IIIb/c. Transmit/Receive Structure This driver uses a zero-copy receive and transmit scheme. The driver allocates full frame size skbuffs for the Rx ring buffers at open() time and passes the skb->data field to the chip as receive data buffers. When an incoming frame is less than RX_COPYBREAK bytes long, a fresh skbuff is allocated and the frame is copied to the new skbuff. When the incoming frame is larger, the skbuff is passed directly up the protocol stack. Buffers consumed this way are replaced by newly allocated skbuffs in a later phase of receives. The RX_COPYBREAK value is chosen to trade-off the memory wasted by using a full-sized skbuff for small frames vs. the copying costs of larger frames. New boards are typically used in generously configured machines and the underfilled buffers have negligible impact compared to the benefit of a single allocation size, so the default value of zero results in never copying packets. When copying is done, the cost is usually mitigated by using a combined copy/checksum routine. Copying also preloads the cache, which is most useful with small frames. A subtle aspect of the operation is that the IP header at offset 14 in an ethernet frame isn't longword aligned for further processing. Unaligned buffers are permitted by the Sundance hardware, so frames are received into the skbuff at an offset of "+2", 16-byte aligning the IP header. IIId. Synchronization The driver runs as two independent, single-threaded flows of control. One is the send-packet routine, which enforces single-threaded use by the dev->tbusy flag. The other thread is the interrupt handler, which is single threaded by the hardware and interrupt handling software. The send packet thread has partial control over the Tx ring and 'dev->tbusy' flag. It sets the tbusy flag whenever it's queuing a Tx packet. If the next queue slot is empty, it clears the tbusy flag when finished otherwise it sets the 'lp->tx_full' flag. The interrupt handler has exclusive control over the Rx ring and records stats from the Tx ring. After reaping the stats, it marks the Tx queue entry as empty by incrementing the dirty_tx mark. Iff the 'lp->tx_full' flag is set, it clears both the tx_full and tbusy flags. IV. Notes IVb. References The Sundance ST201 datasheet, preliminary version. The Kendin KS8723 datasheet, preliminary version. The ICplus IP100 datasheet, preliminary version. http://www.scyld.com/expert/100mbps.html http://www.scyld.com/expert/NWay.html IVc. Errata */ /* Work-around for Kendin chip bugs. */ #ifndef CONFIG_SUNDANCE_MMIO #define USE_IO_OPS 1 #endif static const struct pci_device_id sundance_pci_tbl[] = { { 0x1186, 0x1002, 0x1186, 0x1002, 0, 0, 0 }, { 0x1186, 0x1002, 0x1186, 0x1003, 0, 0, 1 }, { 0x1186, 0x1002, 0x1186, 0x1012, 0, 0, 2 }, { 0x1186, 0x1002, 0x1186, 0x1040, 0, 0, 3 }, { 0x1186, 0x1002, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 4 }, { 0x13F0, 0x0201, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 5 }, { 0x13F0, 0x0200, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 6 }, { } }; MODULE_DEVICE_TABLE(pci, sundance_pci_tbl); enum { netdev_io_size = 128 }; struct pci_id_info { const char *name; }; static const struct pci_id_info pci_id_tbl[] = { {"D-Link DFE-550TX FAST Ethernet Adapter"}, {"D-Link DFE-550FX 100Mbps Fiber-optics Adapter"}, {"D-Link DFE-580TX 4 port Server Adapter"}, {"D-Link DFE-530TXS FAST Ethernet Adapter"}, {"D-Link DL10050-based FAST Ethernet Adapter"}, {"Sundance Technology Alta"}, {"IC Plus Corporation IP100A FAST Ethernet Adapter"}, { } /* terminate list. */ }; /* This driver was written to use PCI memory space, however x86-oriented hardware often uses I/O space accesses. */ /* Offsets to the device registers. Unlike software-only systems, device drivers interact with complex hardware. It's not useful to define symbolic names for every register bit in the device. The name can only partially document the semantics and make the driver longer and more difficult to read. In general, only the important configuration values or bits changed multiple times should be defined symbolically. */ enum alta_offsets { DMACtrl = 0x00, TxListPtr = 0x04, TxDMABurstThresh = 0x08, TxDMAUrgentThresh = 0x09, TxDMAPollPeriod = 0x0a, RxDMAStatus = 0x0c, RxListPtr = 0x10, DebugCtrl0 = 0x1a, DebugCtrl1 = 0x1c, RxDMABurstThresh = 0x14, RxDMAUrgentThresh = 0x15, RxDMAPollPeriod = 0x16, LEDCtrl = 0x1a, ASICCtrl = 0x30, EEData = 0x34, EECtrl = 0x36, FlashAddr = 0x40, FlashData = 0x44, WakeEvent = 0x45, TxStatus = 0x46, TxFrameId = 0x47, DownCounter = 0x18, IntrClear = 0x4a, IntrEnable = 0x4c, IntrStatus = 0x4e, MACCtrl0 = 0x50, MACCtrl1 = 0x52, StationAddr = 0x54, MaxFrameSize = 0x5A, RxMode = 0x5c, MIICtrl = 0x5e, MulticastFilter0 = 0x60, MulticastFilter1 = 0x64, RxOctetsLow = 0x68, RxOctetsHigh = 0x6a, TxOctetsLow = 0x6c, TxOctetsHigh = 0x6e, TxFramesOK = 0x70, RxFramesOK = 0x72, StatsCarrierError = 0x74, StatsLateColl = 0x75, StatsMultiColl = 0x76, StatsOneColl = 0x77, StatsTxDefer = 0x78, RxMissed = 0x79, StatsTxXSDefer = 0x7a, StatsTxAbort = 0x7b, StatsBcastTx = 0x7c, StatsBcastRx = 0x7d, StatsMcastTx = 0x7e, StatsMcastRx = 0x7f, /* Aliased and bogus values! */ RxStatus = 0x0c, }; #define ASIC_HI_WORD(x) ((x) + 2) enum ASICCtrl_HiWord_bit { GlobalReset = 0x0001, RxReset = 0x0002, TxReset = 0x0004, DMAReset = 0x0008, FIFOReset = 0x0010, NetworkReset = 0x0020, HostReset = 0x0040, ResetBusy = 0x0400, }; /* Bits in the interrupt status/mask registers. */ enum intr_status_bits { IntrSummary=0x0001, IntrPCIErr=0x0002, IntrMACCtrl=0x0008, IntrTxDone=0x0004, IntrRxDone=0x0010, IntrRxStart=0x0020, IntrDrvRqst=0x0040, StatsMax=0x0080, LinkChange=0x0100, IntrTxDMADone=0x0200, IntrRxDMADone=0x0400, }; /* Bits in the RxMode register. */ enum rx_mode_bits { AcceptAllIPMulti=0x20, AcceptMultiHash=0x10, AcceptAll=0x08, AcceptBroadcast=0x04, AcceptMulticast=0x02, AcceptMyPhys=0x01, }; /* Bits in MACCtrl. */ enum mac_ctrl0_bits { EnbFullDuplex=0x20, EnbRcvLargeFrame=0x40, EnbFlowCtrl=0x100, EnbPassRxCRC=0x200, }; enum mac_ctrl1_bits { StatsEnable=0x0020, StatsDisable=0x0040, StatsEnabled=0x0080, TxEnable=0x0100, TxDisable=0x0200, TxEnabled=0x0400, RxEnable=0x0800, RxDisable=0x1000, RxEnabled=0x2000, }; /* Bits in WakeEvent register. */ enum wake_event_bits { WakePktEnable = 0x01, MagicPktEnable = 0x02, LinkEventEnable = 0x04, WolEnable = 0x80, }; /* The Rx and Tx buffer descriptors. */ /* Note that using only 32 bit fields simplifies conversion to big-endian architectures. */ struct netdev_desc { __le32 next_desc; __le32 status; struct desc_frag { __le32 addr, length; } frag; }; /* Bits in netdev_desc.status */ enum desc_status_bits { DescOwn=0x8000, DescEndPacket=0x4000, DescEndRing=0x2000, LastFrag=0x80000000, DescIntrOnTx=0x8000, DescIntrOnDMADone=0x80000000, DisableAlign = 0x00000001, }; #define PRIV_ALIGN 15 /* Required alignment mask */ /* Use __attribute__((aligned (L1_CACHE_BYTES))) to maintain alignment within the structure. */ #define MII_CNT 4 struct netdev_private { /* Descriptor rings first for alignment. */ struct netdev_desc *rx_ring; struct netdev_desc *tx_ring; struct sk_buff* rx_skbuff[RX_RING_SIZE]; struct sk_buff* tx_skbuff[TX_RING_SIZE]; dma_addr_t tx_ring_dma; dma_addr_t rx_ring_dma; struct timer_list timer; /* Media monitoring timer. */ struct net_device *ndev; /* backpointer */ /* ethtool extra stats */ struct { u64 tx_multiple_collisions; u64 tx_single_collisions; u64 tx_late_collisions; u64 tx_deferred; u64 tx_deferred_excessive; u64 tx_aborted; u64 tx_bcasts; u64 rx_bcasts; u64 tx_mcasts; u64 rx_mcasts; } xstats; /* Frequently used values: keep some adjacent for cache effect. */ spinlock_t lock; int msg_enable; int chip_id; unsigned int cur_rx, dirty_rx; /* Producer/consumer ring indices */ unsigned int rx_buf_sz; /* Based on MTU+slack. */ struct netdev_desc *last_tx; /* Last Tx descriptor used. */ unsigned int cur_tx, dirty_tx; /* These values are keep track of the transceiver/media in use. */ unsigned int flowctrl:1; unsigned int default_port:4; /* Last dev->if_port value. */ unsigned int an_enable:1; unsigned int speed; unsigned int wol_enabled:1; /* Wake on LAN enabled */ struct tasklet_struct rx_tasklet; struct tasklet_struct tx_tasklet; int budget; int cur_task; /* Multicast and receive mode. */ spinlock_t mcastlock; /* SMP lock multicast updates. */ u16 mcast_filter[4]; /* MII transceiver section. */ struct mii_if_info mii_if; int mii_preamble_required; unsigned char phys[MII_CNT]; /* MII device addresses, only first one used. */ struct pci_dev *pci_dev; void __iomem *base; spinlock_t statlock; }; /* The station address location in the EEPROM. */ #define EEPROM_SA_OFFSET 0x10 #define DEFAULT_INTR (IntrRxDMADone | IntrPCIErr | \ IntrDrvRqst | IntrTxDone | StatsMax | \ LinkChange) static int change_mtu(struct net_device *dev, int new_mtu); static int eeprom_read(void __iomem *ioaddr, int location); static int mdio_read(struct net_device *dev, int phy_id, int location); static void mdio_write(struct net_device *dev, int phy_id, int location, int value); static int mdio_wait_link(struct net_device *dev, int wait); static int netdev_open(struct net_device *dev); static void check_duplex(struct net_device *dev); static void netdev_timer(struct timer_list *t); static void tx_timeout(struct net_device *dev, unsigned int txqueue); static void init_ring(struct net_device *dev); static netdev_tx_t start_tx(struct sk_buff *skb, struct net_device *dev); static int reset_tx (struct net_device *dev); static irqreturn_t intr_handler(int irq, void *dev_instance); static void rx_poll(struct tasklet_struct *t); static void tx_poll(struct tasklet_struct *t); static void refill_rx (struct net_device *dev); static void netdev_error(struct net_device *dev, int intr_status); static void netdev_error(struct net_device *dev, int intr_status); static void set_rx_mode(struct net_device *dev); static int __set_mac_addr(struct net_device *dev); static int sundance_set_mac_addr(struct net_device *dev, void *data); static struct net_device_stats *get_stats(struct net_device *dev); static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd); static int netdev_close(struct net_device *dev); static const struct ethtool_ops ethtool_ops; static void sundance_reset(struct net_device *dev, unsigned long reset_cmd) { struct netdev_private *np = netdev_priv(dev); void __iomem *ioaddr = np->base + ASICCtrl; int countdown; /* ST201 documentation states ASICCtrl is a 32bit register */ iowrite32 (reset_cmd | ioread32 (ioaddr), ioaddr); /* ST201 documentation states reset can take up to 1 ms */ countdown = 10 + 1; while (ioread32 (ioaddr) & (ResetBusy << 16)) { if (--countdown == 0) { printk(KERN_WARNING "%s : reset not completed !!\n", dev->name); break; } udelay(100); } } #ifdef CONFIG_NET_POLL_CONTROLLER static void sundance_poll_controller(struct net_device *dev) { struct netdev_private *np = netdev_priv(dev); disable_irq(np->pci_dev->irq); intr_handler(np->pci_dev->irq, dev); enable_irq(np->pci_dev->irq); } #endif static const struct net_device_ops netdev_ops = { .ndo_open = netdev_open, .ndo_stop = netdev_close, .ndo_start_xmit = start_tx, .ndo_get_stats = get_stats, .ndo_set_rx_mode = set_rx_mode, .ndo_eth_ioctl = netdev_ioctl, .ndo_tx_timeout = tx_timeout, .ndo_change_mtu = change_mtu, .ndo_set_mac_address = sundance_set_mac_addr, .ndo_validate_addr = eth_validate_addr, #ifdef CONFIG_NET_POLL_CONTROLLER .ndo_poll_controller = sundance_poll_controller, #endif }; static int sundance_probe1(struct pci_dev *pdev, const struct pci_device_id *ent) { struct net_device *dev; struct netdev_private *np; static int card_idx; int chip_idx = ent->driver_data; int irq; int i; void __iomem *ioaddr; u16 mii_ctl; void *ring_space; dma_addr_t ring_dma; #ifdef USE_IO_OPS int bar = 0; #else int bar = 1; #endif int phy, phy_end, phy_idx = 0; __le16 addr[ETH_ALEN / 2]; if (pci_enable_device(pdev)) return -EIO; pci_set_master(pdev); irq = pdev->irq; dev = alloc_etherdev(sizeof(*np)); if (!dev) return -ENOMEM; SET_NETDEV_DEV(dev, &pdev->dev); if (pci_request_regions(pdev, DRV_NAME)) goto err_out_netdev; ioaddr = pci_iomap(pdev, bar, netdev_io_size); if (!ioaddr) goto err_out_res; for (i = 0; i < 3; i++) addr[i] = cpu_to_le16(eeprom_read(ioaddr, i + EEPROM_SA_OFFSET)); eth_hw_addr_set(dev, (u8 *)addr); np = netdev_priv(dev); np->ndev = dev; np->base = ioaddr; np->pci_dev = pdev; np->chip_id = chip_idx; np->msg_enable = (1 << debug) - 1; spin_lock_init(&np->lock); spin_lock_init(&np->statlock); tasklet_setup(&np->rx_tasklet, rx_poll); tasklet_setup(&np->tx_tasklet, tx_poll); ring_space = dma_alloc_coherent(&pdev->dev, TX_TOTAL_SIZE, &ring_dma, GFP_KERNEL); if (!ring_space) goto err_out_cleardev; np->tx_ring = (struct netdev_desc *)ring_space; np->tx_ring_dma = ring_dma; ring_space = dma_alloc_coherent(&pdev->dev, RX_TOTAL_SIZE, &ring_dma, GFP_KERNEL); if (!ring_space) goto err_out_unmap_tx; np->rx_ring = (struct netdev_desc *)ring_space; np->rx_ring_dma = ring_dma; np->mii_if.dev = dev; np->mii_if.mdio_read = mdio_read; np->mii_if.mdio_write = mdio_write; np->mii_if.phy_id_mask = 0x1f; np->mii_if.reg_num_mask = 0x1f; /* The chip-specific entries in the device structure. */ dev->netdev_ops = &netdev_ops; dev->ethtool_ops = &ethtool_ops; dev->watchdog_timeo = TX_TIMEOUT; /* MTU range: 68 - 8191 */ dev->min_mtu = ETH_MIN_MTU; dev->max_mtu = 8191; pci_set_drvdata(pdev, dev); i = register_netdev(dev); if (i) goto err_out_unmap_rx; printk(KERN_INFO "%s: %s at %p, %pM, IRQ %d.\n", dev->name, pci_id_tbl[chip_idx].name, ioaddr, dev->dev_addr, irq); np->phys[0] = 1; /* Default setting */ np->mii_preamble_required++; /* * It seems some phys doesn't deal well with address 0 being accessed * first */ if (sundance_pci_tbl[np->chip_id].device == 0x0200) { phy = 0; phy_end = 31; } else { phy = 1; phy_end = 32; /* wraps to zero, due to 'phy & 0x1f' */ } for (; phy <= phy_end && phy_idx < MII_CNT; phy++) { int phyx = phy & 0x1f; int mii_status = mdio_read(dev, phyx, MII_BMSR); if (mii_status != 0xffff && mii_status != 0x0000) { np->phys[phy_idx++] = phyx; np->mii_if.advertising = mdio_read(dev, phyx, MII_ADVERTISE); if ((mii_status & 0x0040) == 0) np->mii_preamble_required++; printk(KERN_INFO "%s: MII PHY found at address %d, status " "0x%4.4x advertising %4.4x.\n", dev->name, phyx, mii_status, np->mii_if.advertising); } } np->mii_preamble_required--; if (phy_idx == 0) { printk(KERN_INFO "%s: No MII transceiver found, aborting. ASIC status %x\n", dev->name, ioread32(ioaddr + ASICCtrl)); goto err_out_unregister; } np->mii_if.phy_id = np->phys[0]; /* Parse override configuration */ np->an_enable = 1; if (card_idx < MAX_UNITS) { if (media[card_idx] != NULL) { np->an_enable = 0; if (strcmp (media[card_idx], "100mbps_fd") == 0 || strcmp (media[card_idx], "4") == 0) { np->speed = 100; np->mii_if.full_duplex = 1; } else if (strcmp (media[card_idx], "100mbps_hd") == 0 || strcmp (media[card_idx], "3") == 0) { np->speed = 100; np->mii_if.full_duplex = 0; } else if (strcmp (media[card_idx], "10mbps_fd") == 0 || strcmp (media[card_idx], "2") == 0) { np->speed = 10; np->mii_if.full_duplex = 1; } else if (strcmp (media[card_idx], "10mbps_hd") == 0 || strcmp (media[card_idx], "1") == 0) { np->speed = 10; np->mii_if.full_duplex = 0; } else { np->an_enable = 1; } } if (flowctrl == 1) np->flowctrl = 1; } /* Fibre PHY? */ if (ioread32 (ioaddr + ASICCtrl) & 0x80) { /* Default 100Mbps Full */ if (np->an_enable) { np->speed = 100; np->mii_if.full_duplex = 1; np->an_enable = 0; } } /* Reset PHY */ mdio_write (dev, np->phys[0], MII_BMCR, BMCR_RESET); mdelay (300); /* If flow control enabled, we need to advertise it.*/ if (np->flowctrl) mdio_write (dev, np->phys[0], MII_ADVERTISE, np->mii_if.advertising | 0x0400); mdio_write (dev, np->phys[0], MII_BMCR, BMCR_ANENABLE|BMCR_ANRESTART); /* Force media type */ if (!np->an_enable) { mii_ctl = 0; mii_ctl |= (np->speed == 100) ? BMCR_SPEED100 : 0; mii_ctl |= (np->mii_if.full_duplex) ? BMCR_FULLDPLX : 0; mdio_write (dev, np->phys[0], MII_BMCR, mii_ctl); printk (KERN_INFO "Override speed=%d, %s duplex\n", np->speed, np->mii_if.full_duplex ? "Full" : "Half"); } /* Perhaps move the reset here? */ /* Reset the chip to erase previous misconfiguration. */ if (netif_msg_hw(np)) printk("ASIC Control is %x.\n", ioread32(ioaddr + ASICCtrl)); sundance_reset(dev, 0x00ff << 16); if (netif_msg_hw(np)) printk("ASIC Control is now %x.\n", ioread32(ioaddr + ASICCtrl)); card_idx++; return 0; err_out_unregister: unregister_netdev(dev); err_out_unmap_rx: dma_free_coherent(&pdev->dev, RX_TOTAL_SIZE, np->rx_ring, np->rx_ring_dma); err_out_unmap_tx: dma_free_coherent(&pdev->dev, TX_TOTAL_SIZE, np->tx_ring, np->tx_ring_dma); err_out_cleardev: pci_iounmap(pdev, ioaddr); err_out_res: pci_release_regions(pdev); err_out_netdev: free_netdev (dev); return -ENODEV; } static int change_mtu(struct net_device *dev, int new_mtu) { if (netif_running(dev)) return -EBUSY; dev->mtu = new_mtu; return 0; } #define eeprom_delay(ee_addr) ioread32(ee_addr) /* Read the EEPROM and MII Management Data I/O (MDIO) interfaces. */ static int eeprom_read(void __iomem *ioaddr, int location) { int boguscnt = 10000; /* Typical 1900 ticks. */ iowrite16(0x0200 | (location & 0xff), ioaddr + EECtrl); do { eeprom_delay(ioaddr + EECtrl); if (! (ioread16(ioaddr + EECtrl) & 0x8000)) { return ioread16(ioaddr + EEData); } } while (--boguscnt > 0); return 0; } /* MII transceiver control section. Read and write the MII registers using software-generated serial MDIO protocol. See the MII specifications or DP83840A data sheet for details. The maximum data clock rate is 2.5 Mhz. The minimum timing is usually met by back-to-back 33Mhz PCI cycles. */ #define mdio_delay() ioread8(mdio_addr) enum mii_reg_bits { MDIO_ShiftClk=0x0001, MDIO_Data=0x0002, MDIO_EnbOutput=0x0004, }; #define MDIO_EnbIn (0) #define MDIO_WRITE0 (MDIO_EnbOutput) #define MDIO_WRITE1 (MDIO_Data | MDIO_EnbOutput) /* Generate the preamble required for initial synchronization and a few older transceivers. */ static void mdio_sync(void __iomem *mdio_addr) { int bits = 32; /* Establish sync by sending at least 32 logic ones. */ while (--bits >= 0) { iowrite8(MDIO_WRITE1, mdio_addr); mdio_delay(); iowrite8(MDIO_WRITE1 | MDIO_ShiftClk, mdio_addr); mdio_delay(); } } static int mdio_read(struct net_device *dev, int phy_id, int location) { struct netdev_private *np = netdev_priv(dev); void __iomem *mdio_addr = np->base + MIICtrl; int mii_cmd = (0xf6 << 10) | (phy_id << 5) | location; int i, retval = 0; if (np->mii_preamble_required) mdio_sync(mdio_addr); /* Shift the read command bits out. */ for (i = 15; i >= 0; i--) { int dataval = (mii_cmd & (1 << i)) ? MDIO_WRITE1 : MDIO_WRITE0; iowrite8(dataval, mdio_addr); mdio_delay(); iowrite8(dataval | MDIO_ShiftClk, mdio_addr); mdio_delay(); } /* Read the two transition, 16 data, and wire-idle bits. */ for (i = 19; i > 0; i--) { iowrite8(MDIO_EnbIn, mdio_addr); mdio_delay(); retval = (retval << 1) | ((ioread8(mdio_addr) & MDIO_Data) ? 1 : 0); iowrite8(MDIO_EnbIn | MDIO_ShiftClk, mdio_addr); mdio_delay(); } return (retval>>1) & 0xffff; } static void mdio_write(struct net_device *dev, int phy_id, int location, int value) { struct netdev_private *np = netdev_priv(dev); void __iomem *mdio_addr = np->base + MIICtrl; int mii_cmd = (0x5002 << 16) | (phy_id << 23) | (location<<18) | value; int i; if (np->mii_preamble_required) mdio_sync(mdio_addr); /* Shift the command bits out. */ for (i = 31; i >= 0; i--) { int dataval = (mii_cmd & (1 << i)) ? MDIO_WRITE1 : MDIO_WRITE0; iowrite8(dataval, mdio_addr); mdio_delay(); iowrite8(dataval | MDIO_ShiftClk, mdio_addr); mdio_delay(); } /* Clear out extra bits. */ for (i = 2; i > 0; i--) { iowrite8(MDIO_EnbIn, mdio_addr); mdio_delay(); iowrite8(MDIO_EnbIn | MDIO_ShiftClk, mdio_addr); mdio_delay(); } } static int mdio_wait_link(struct net_device *dev, int wait) { int bmsr; int phy_id; struct netdev_private *np; np = netdev_priv(dev); phy_id = np->phys[0]; do { bmsr = mdio_read(dev, phy_id, MII_BMSR); if (bmsr & 0x0004) return 0; mdelay(1); } while (--wait > 0); return -1; } static int netdev_open(struct net_device *dev) { struct netdev_private *np = netdev_priv(dev); void __iomem *ioaddr = np->base; const int irq = np->pci_dev->irq; unsigned long flags; int i; sundance_reset(dev, 0x00ff << 16); i = request_irq(irq, intr_handler, IRQF_SHARED, dev->name, dev); if (i) return i; if (netif_msg_ifup(np)) printk(KERN_DEBUG "%s: netdev_open() irq %d\n", dev->name, irq); init_ring(dev); iowrite32(np->rx_ring_dma, ioaddr + RxListPtr); /* The Tx list pointer is written as packets are queued. */ /* Initialize other registers. */ __set_mac_addr(dev); #if IS_ENABLED(CONFIG_VLAN_8021Q) iowrite16(dev->mtu + 18, ioaddr + MaxFrameSize); #else iowrite16(dev->mtu + 14, ioaddr + MaxFrameSize); #endif if (dev->mtu > 2047) iowrite32(ioread32(ioaddr + ASICCtrl) | 0x0C, ioaddr + ASICCtrl); /* Configure the PCI bus bursts and FIFO thresholds. */ if (dev->if_port == 0) dev->if_port = np->default_port; spin_lock_init(&np->mcastlock); set_rx_mode(dev); iowrite16(0, ioaddr + IntrEnable); iowrite16(0, ioaddr + DownCounter); /* Set the chip to poll every N*320nsec. */ iowrite8(100, ioaddr + RxDMAPollPeriod); iowrite8(127, ioaddr + TxDMAPollPeriod); /* Fix DFE-580TX packet drop issue */ if (np->pci_dev->revision >= 0x14) iowrite8(0x01, ioaddr + DebugCtrl1); netif_start_queue(dev); spin_lock_irqsave(&np->lock, flags); reset_tx(dev); spin_unlock_irqrestore(&np->lock, flags); iowrite16 (StatsEnable | RxEnable | TxEnable, ioaddr + MACCtrl1); /* Disable Wol */ iowrite8(ioread8(ioaddr + WakeEvent) | 0x00, ioaddr + WakeEvent); np->wol_enabled = 0; if (netif_msg_ifup(np)) printk(KERN_DEBUG "%s: Done netdev_open(), status: Rx %x Tx %x " "MAC Control %x, %4.4x %4.4x.\n", dev->name, ioread32(ioaddr + RxStatus), ioread8(ioaddr + TxStatus), ioread32(ioaddr + MACCtrl0), ioread16(ioaddr + MACCtrl1), ioread16(ioaddr + MACCtrl0)); /* Set the timer to check for link beat. */ timer_setup(&np->timer, netdev_timer, 0); np->timer.expires = jiffies + 3*HZ; add_timer(&np->timer); /* Enable interrupts by setting the interrupt mask. */ iowrite16(DEFAULT_INTR, ioaddr + IntrEnable); return 0; } static void check_duplex(struct net_device *dev) { struct netdev_private *np = netdev_priv(dev); void __iomem *ioaddr = np->base; int mii_lpa = mdio_read(dev, np->phys[0], MII_LPA); int negotiated = mii_lpa & np->mii_if.advertising; int duplex; /* Force media */ if (!np->an_enable || mii_lpa == 0xffff) { if (np->mii_if.full_duplex) iowrite16 (ioread16 (ioaddr + MACCtrl0) | EnbFullDuplex, ioaddr + MACCtrl0); return; } /* Autonegotiation */ duplex = (negotiated & 0x0100) || (negotiated & 0x01C0) == 0x0040; if (np->mii_if.full_duplex != duplex) { np->mii_if.full_duplex = duplex; if (netif_msg_link(np)) printk(KERN_INFO "%s: Setting %s-duplex based on MII #%d " "negotiated capability %4.4x.\n", dev->name, duplex ? "full" : "half", np->phys[0], negotiated); iowrite16(ioread16(ioaddr + MACCtrl0) | (duplex ? 0x20 : 0), ioaddr + MACCtrl0); } } static void netdev_timer(struct timer_list *t) { struct netdev_private *np = from_timer(np, t, timer); struct net_device *dev = np->mii_if.dev; void __iomem *ioaddr = np->base; int next_tick = 10*HZ; if (netif_msg_timer(np)) { printk(KERN_DEBUG "%s: Media selection timer tick, intr status %4.4x, " "Tx %x Rx %x.\n", dev->name, ioread16(ioaddr + IntrEnable), ioread8(ioaddr + TxStatus), ioread32(ioaddr + RxStatus)); } check_duplex(dev); np->timer.expires = jiffies + next_tick; add_timer(&np->timer); } static void tx_timeout(struct net_device *dev, unsigned int txqueue) { struct netdev_private *np = netdev_priv(dev); void __iomem *ioaddr = np->base; unsigned long flag; netif_stop_queue(dev); tasklet_disable_in_atomic(&np->tx_tasklet); iowrite16(0, ioaddr + IntrEnable); printk(KERN_WARNING "%s: Transmit timed out, TxStatus %2.2x " "TxFrameId %2.2x," " resetting...\n", dev->name, ioread8(ioaddr + TxStatus), ioread8(ioaddr + TxFrameId)); { int i; for (i=0; i<TX_RING_SIZE; i++) { printk(KERN_DEBUG "%02x %08llx %08x %08x(%02x) %08x %08x\n", i, (unsigned long long)(np->tx_ring_dma + i*sizeof(*np->tx_ring)), le32_to_cpu(np->tx_ring[i].next_desc), le32_to_cpu(np->tx_ring[i].status), (le32_to_cpu(np->tx_ring[i].status) >> 2) & 0xff, le32_to_cpu(np->tx_ring[i].frag.addr), le32_to_cpu(np->tx_ring[i].frag.length)); } printk(KERN_DEBUG "TxListPtr=%08x netif_queue_stopped=%d\n", ioread32(np->base + TxListPtr), netif_queue_stopped(dev)); printk(KERN_DEBUG "cur_tx=%d(%02x) dirty_tx=%d(%02x)\n", np->cur_tx, np->cur_tx % TX_RING_SIZE, np->dirty_tx, np->dirty_tx % TX_RING_SIZE); printk(KERN_DEBUG "cur_rx=%d dirty_rx=%d\n", np->cur_rx, np->dirty_rx); printk(KERN_DEBUG "cur_task=%d\n", np->cur_task); } spin_lock_irqsave(&np->lock, flag); /* Stop and restart the chip's Tx processes . */ reset_tx(dev); spin_unlock_irqrestore(&np->lock, flag); dev->if_port = 0; netif_trans_update(dev); /* prevent tx timeout */ dev->stats.tx_errors++; if (np->cur_tx - np->dirty_tx < TX_QUEUE_LEN - 4) { netif_wake_queue(dev); } iowrite16(DEFAULT_INTR, ioaddr + IntrEnable); tasklet_enable(&np->tx_tasklet); } /* Initialize the Rx and Tx rings, along with various 'dev' bits. */ static void init_ring(struct net_device *dev) { struct netdev_private *np = netdev_priv(dev); int i; np->cur_rx = np->cur_tx = 0; np->dirty_rx = np->dirty_tx = 0; np->cur_task = 0; np->rx_buf_sz = (dev->mtu <= 1520 ? PKT_BUF_SZ : dev->mtu + 16); /* Initialize all Rx descriptors. */ for (i = 0; i < RX_RING_SIZE; i++) { np->rx_ring[i].next_desc = cpu_to_le32(np->rx_ring_dma + ((i+1)%RX_RING_SIZE)*sizeof(*np->rx_ring)); np->rx_ring[i].status = 0; np->rx_ring[i].frag.length = 0; np->rx_skbuff[i] = NULL; } /* Fill in the Rx buffers. Handle allocation failure gracefully. */ for (i = 0; i < RX_RING_SIZE; i++) { struct sk_buff *skb = netdev_alloc_skb(dev, np->rx_buf_sz + 2); np->rx_skbuff[i] = skb; if (skb == NULL) break; skb_reserve(skb, 2); /* 16 byte align the IP header. */ np->rx_ring[i].frag.addr = cpu_to_le32( dma_map_single(&np->pci_dev->dev, skb->data, np->rx_buf_sz, DMA_FROM_DEVICE)); if (dma_mapping_error(&np->pci_dev->dev, np->rx_ring[i].frag.addr)) { dev_kfree_skb(skb); np->rx_skbuff[i] = NULL; break; } np->rx_ring[i].frag.length = cpu_to_le32(np->rx_buf_sz | LastFrag); } np->dirty_rx = (unsigned int)(i - RX_RING_SIZE); for (i = 0; i < TX_RING_SIZE; i++) { np->tx_skbuff[i] = NULL; np->tx_ring[i].status = 0; } } static void tx_poll(struct tasklet_struct *t) { struct netdev_private *np = from_tasklet(np, t, tx_tasklet); unsigned head = np->cur_task % TX_RING_SIZE; struct netdev_desc *txdesc = &np->tx_ring[(np->cur_tx - 1) % TX_RING_SIZE]; /* Chain the next pointer */ for (; np->cur_tx - np->cur_task > 0; np->cur_task++) { int entry = np->cur_task % TX_RING_SIZE; txdesc = &np->tx_ring[entry]; if (np->last_tx) { np->last_tx->next_desc = cpu_to_le32(np->tx_ring_dma + entry*sizeof(struct netdev_desc)); } np->last_tx = txdesc; } /* Indicate the latest descriptor of tx ring */ txdesc->status |= cpu_to_le32(DescIntrOnTx); if (ioread32 (np->base + TxListPtr) == 0) iowrite32 (np->tx_ring_dma + head * sizeof(struct netdev_desc), np->base + TxListPtr); } static netdev_tx_t start_tx (struct sk_buff *skb, struct net_device *dev) { struct netdev_private *np = netdev_priv(dev); struct netdev_desc *txdesc; unsigned entry; /* Calculate the next Tx descriptor entry. */ entry = np->cur_tx % TX_RING_SIZE; np->tx_skbuff[entry] = skb; txdesc = &np->tx_ring[entry]; txdesc->next_desc = 0; txdesc->status = cpu_to_le32 ((entry << 2) | DisableAlign); txdesc->frag.addr = cpu_to_le32(dma_map_single(&np->pci_dev->dev, skb->data, skb->len, DMA_TO_DEVICE)); if (dma_mapping_error(&np->pci_dev->dev, txdesc->frag.addr)) goto drop_frame; txdesc->frag.length = cpu_to_le32 (skb->len | LastFrag); /* Increment cur_tx before tasklet_schedule() */ np->cur_tx++; mb(); /* Schedule a tx_poll() task */ tasklet_schedule(&np->tx_tasklet); /* On some architectures: explicitly flush cache lines here. */ if (np->cur_tx - np->dirty_tx < TX_QUEUE_LEN - 1 && !netif_queue_stopped(dev)) { /* do nothing */ } else { netif_stop_queue (dev); } if (netif_msg_tx_queued(np)) { printk (KERN_DEBUG "%s: Transmit frame #%d queued in slot %d.\n", dev->name, np->cur_tx, entry); } return NETDEV_TX_OK; drop_frame: dev_kfree_skb_any(skb); np->tx_skbuff[entry] = NULL; dev->stats.tx_dropped++; return NETDEV_TX_OK; } /* Reset hardware tx and free all of tx buffers */ static int reset_tx (struct net_device *dev) { struct netdev_private *np = netdev_priv(dev); void __iomem *ioaddr = np->base; struct sk_buff *skb; int i; /* Reset tx logic, TxListPtr will be cleaned */ iowrite16 (TxDisable, ioaddr + MACCtrl1); sundance_reset(dev, (NetworkReset|FIFOReset|DMAReset|TxReset) << 16); /* free all tx skbuff */ for (i = 0; i < TX_RING_SIZE; i++) { np->tx_ring[i].next_desc = 0; skb = np->tx_skbuff[i]; if (skb) { dma_unmap_single(&np->pci_dev->dev, le32_to_cpu(np->tx_ring[i].frag.addr), skb->len, DMA_TO_DEVICE); dev_kfree_skb_any(skb); np->tx_skbuff[i] = NULL; dev->stats.tx_dropped++; } } np->cur_tx = np->dirty_tx = 0; np->cur_task = 0; np->last_tx = NULL; iowrite8(127, ioaddr + TxDMAPollPeriod); iowrite16 (StatsEnable | RxEnable | TxEnable, ioaddr + MACCtrl1); return 0; } /* The interrupt handler cleans up after the Tx thread, and schedule a Rx thread work */ static irqreturn_t intr_handler(int irq, void *dev_instance) { struct net_device *dev = (struct net_device *)dev_instance; struct netdev_private *np = netdev_priv(dev); void __iomem *ioaddr = np->base; int hw_frame_id; int tx_cnt; int tx_status; int handled = 0; int i; do { int intr_status = ioread16(ioaddr + IntrStatus); iowrite16(intr_status, ioaddr + IntrStatus); if (netif_msg_intr(np)) printk(KERN_DEBUG "%s: Interrupt, status %4.4x.\n", dev->name, intr_status); if (!(intr_status & DEFAULT_INTR)) break; handled = 1; if (intr_status & (IntrRxDMADone)) { iowrite16(DEFAULT_INTR & ~(IntrRxDone|IntrRxDMADone), ioaddr + IntrEnable); if (np->budget < 0) np->budget = RX_BUDGET; tasklet_schedule(&np->rx_tasklet); } if (intr_status & (IntrTxDone | IntrDrvRqst)) { tx_status = ioread16 (ioaddr + TxStatus); for (tx_cnt=32; tx_status & 0x80; --tx_cnt) { if (netif_msg_tx_done(np)) printk ("%s: Transmit status is %2.2x.\n", dev->name, tx_status); if (tx_status & 0x1e) { if (netif_msg_tx_err(np)) printk("%s: Transmit error status %4.4x.\n", dev->name, tx_status); dev->stats.tx_errors++; if (tx_status & 0x10) dev->stats.tx_fifo_errors++; if (tx_status & 0x08) dev->stats.collisions++; if (tx_status & 0x04) dev->stats.tx_fifo_errors++; if (tx_status & 0x02) dev->stats.tx_window_errors++; /* ** This reset has been verified on ** DFE-580TX boards ! [email protected]. */ if (tx_status & 0x10) { /* TxUnderrun */ /* Restart Tx FIFO and transmitter */ sundance_reset(dev, (NetworkReset|FIFOReset|TxReset) << 16); /* No need to reset the Tx pointer here */ } /* Restart the Tx. Need to make sure tx enabled */ i = 10; do { iowrite16(ioread16(ioaddr + MACCtrl1) | TxEnable, ioaddr + MACCtrl1); if (ioread16(ioaddr + MACCtrl1) & TxEnabled) break; mdelay(1); } while (--i); } /* Yup, this is a documentation bug. It cost me *hours*. */ iowrite16 (0, ioaddr + TxStatus); if (tx_cnt < 0) { iowrite32(5000, ioaddr + DownCounter); break; } tx_status = ioread16 (ioaddr + TxStatus); } hw_frame_id = (tx_status >> 8) & 0xff; } else { hw_frame_id = ioread8(ioaddr + TxFrameId); } if (np->pci_dev->revision >= 0x14) { spin_lock(&np->lock); for (; np->cur_tx - np->dirty_tx > 0; np->dirty_tx++) { int entry = np->dirty_tx % TX_RING_SIZE; struct sk_buff *skb; int sw_frame_id; sw_frame_id = (le32_to_cpu( np->tx_ring[entry].status) >> 2) & 0xff; if (sw_frame_id == hw_frame_id && !(le32_to_cpu(np->tx_ring[entry].status) & 0x00010000)) break; if (sw_frame_id == (hw_frame_id + 1) % TX_RING_SIZE) break; skb = np->tx_skbuff[entry]; /* Free the original skb. */ dma_unmap_single(&np->pci_dev->dev, le32_to_cpu(np->tx_ring[entry].frag.addr), skb->len, DMA_TO_DEVICE); dev_consume_skb_irq(np->tx_skbuff[entry]); np->tx_skbuff[entry] = NULL; np->tx_ring[entry].frag.addr = 0; np->tx_ring[entry].frag.length = 0; } spin_unlock(&np->lock); } else { spin_lock(&np->lock); for (; np->cur_tx - np->dirty_tx > 0; np->dirty_tx++) { int entry = np->dirty_tx % TX_RING_SIZE; struct sk_buff *skb; if (!(le32_to_cpu(np->tx_ring[entry].status) & 0x00010000)) break; skb = np->tx_skbuff[entry]; /* Free the original skb. */ dma_unmap_single(&np->pci_dev->dev, le32_to_cpu(np->tx_ring[entry].frag.addr), skb->len, DMA_TO_DEVICE); dev_consume_skb_irq(np->tx_skbuff[entry]); np->tx_skbuff[entry] = NULL; np->tx_ring[entry].frag.addr = 0; np->tx_ring[entry].frag.length = 0; } spin_unlock(&np->lock); } if (netif_queue_stopped(dev) && np->cur_tx - np->dirty_tx < TX_QUEUE_LEN - 4) { /* The ring is no longer full, clear busy flag. */ netif_wake_queue (dev); } /* Abnormal error summary/uncommon events handlers. */ if (intr_status & (IntrPCIErr | LinkChange | StatsMax)) netdev_error(dev, intr_status); } while (0); if (netif_msg_intr(np)) printk(KERN_DEBUG "%s: exiting interrupt, status=%#4.4x.\n", dev->name, ioread16(ioaddr + IntrStatus)); return IRQ_RETVAL(handled); } static void rx_poll(struct tasklet_struct *t) { struct netdev_private *np = from_tasklet(np, t, rx_tasklet); struct net_device *dev = np->ndev; int entry = np->cur_rx % RX_RING_SIZE; int boguscnt = np->budget; void __iomem *ioaddr = np->base; int received = 0; /* If EOP is set on the next entry, it's a new packet. Send it up. */ while (1) { struct netdev_desc *desc = &(np->rx_ring[entry]); u32 frame_status = le32_to_cpu(desc->status); int pkt_len; if (--boguscnt < 0) { goto not_done; } if (!(frame_status & DescOwn)) break; pkt_len = frame_status & 0x1fff; /* Chip omits the CRC. */ if (netif_msg_rx_status(np)) printk(KERN_DEBUG " netdev_rx() status was %8.8x.\n", frame_status); if (frame_status & 0x001f4000) { /* There was a error. */ if (netif_msg_rx_err(np)) printk(KERN_DEBUG " netdev_rx() Rx error was %8.8x.\n", frame_status); dev->stats.rx_errors++; if (frame_status & 0x00100000) dev->stats.rx_length_errors++; if (frame_status & 0x00010000) dev->stats.rx_fifo_errors++; if (frame_status & 0x00060000) dev->stats.rx_frame_errors++; if (frame_status & 0x00080000) dev->stats.rx_crc_errors++; if (frame_status & 0x00100000) { printk(KERN_WARNING "%s: Oversized Ethernet frame," " status %8.8x.\n", dev->name, frame_status); } } else { struct sk_buff *skb; #ifndef final_version if (netif_msg_rx_status(np)) printk(KERN_DEBUG " netdev_rx() normal Rx pkt length %d" ", bogus_cnt %d.\n", pkt_len, boguscnt); #endif /* Check if the packet is long enough to accept without copying to a minimally-sized skbuff. */ if (pkt_len < rx_copybreak && (skb = netdev_alloc_skb(dev, pkt_len + 2)) != NULL) { skb_reserve(skb, 2); /* 16 byte align the IP header */ dma_sync_single_for_cpu(&np->pci_dev->dev, le32_to_cpu(desc->frag.addr), np->rx_buf_sz, DMA_FROM_DEVICE); skb_copy_to_linear_data(skb, np->rx_skbuff[entry]->data, pkt_len); dma_sync_single_for_device(&np->pci_dev->dev, le32_to_cpu(desc->frag.addr), np->rx_buf_sz, DMA_FROM_DEVICE); skb_put(skb, pkt_len); } else { dma_unmap_single(&np->pci_dev->dev, le32_to_cpu(desc->frag.addr), np->rx_buf_sz, DMA_FROM_DEVICE); skb_put(skb = np->rx_skbuff[entry], pkt_len); np->rx_skbuff[entry] = NULL; } skb->protocol = eth_type_trans(skb, dev); /* Note: checksum -> skb->ip_summed = CHECKSUM_UNNECESSARY; */ netif_rx(skb); } entry = (entry + 1) % RX_RING_SIZE; received++; } np->cur_rx = entry; refill_rx (dev); np->budget -= received; iowrite16(DEFAULT_INTR, ioaddr + IntrEnable); return; not_done: np->cur_rx = entry; refill_rx (dev); if (!received) received = 1; np->budget -= received; if (np->budget <= 0) np->budget = RX_BUDGET; tasklet_schedule(&np->rx_tasklet); } static void refill_rx (struct net_device *dev) { struct netdev_private *np = netdev_priv(dev); int entry; /* Refill the Rx ring buffers. */ for (;(np->cur_rx - np->dirty_rx + RX_RING_SIZE) % RX_RING_SIZE > 0; np->dirty_rx = (np->dirty_rx + 1) % RX_RING_SIZE) { struct sk_buff *skb; entry = np->dirty_rx % RX_RING_SIZE; if (np->rx_skbuff[entry] == NULL) { skb = netdev_alloc_skb(dev, np->rx_buf_sz + 2); np->rx_skbuff[entry] = skb; if (skb == NULL) break; /* Better luck next round. */ skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */ np->rx_ring[entry].frag.addr = cpu_to_le32( dma_map_single(&np->pci_dev->dev, skb->data, np->rx_buf_sz, DMA_FROM_DEVICE)); if (dma_mapping_error(&np->pci_dev->dev, np->rx_ring[entry].frag.addr)) { dev_kfree_skb_irq(skb); np->rx_skbuff[entry] = NULL; break; } } /* Perhaps we need not reset this field. */ np->rx_ring[entry].frag.length = cpu_to_le32(np->rx_buf_sz | LastFrag); np->rx_ring[entry].status = 0; } } static void netdev_error(struct net_device *dev, int intr_status) { struct netdev_private *np = netdev_priv(dev); void __iomem *ioaddr = np->base; u16 mii_ctl, mii_advertise, mii_lpa; int speed; if (intr_status & LinkChange) { if (mdio_wait_link(dev, 10) == 0) { printk(KERN_INFO "%s: Link up\n", dev->name); if (np->an_enable) { mii_advertise = mdio_read(dev, np->phys[0], MII_ADVERTISE); mii_lpa = mdio_read(dev, np->phys[0], MII_LPA); mii_advertise &= mii_lpa; printk(KERN_INFO "%s: Link changed: ", dev->name); if (mii_advertise & ADVERTISE_100FULL) { np->speed = 100; printk("100Mbps, full duplex\n"); } else if (mii_advertise & ADVERTISE_100HALF) { np->speed = 100; printk("100Mbps, half duplex\n"); } else if (mii_advertise & ADVERTISE_10FULL) { np->speed = 10; printk("10Mbps, full duplex\n"); } else if (mii_advertise & ADVERTISE_10HALF) { np->speed = 10; printk("10Mbps, half duplex\n"); } else printk("\n"); } else { mii_ctl = mdio_read(dev, np->phys[0], MII_BMCR); speed = (mii_ctl & BMCR_SPEED100) ? 100 : 10; np->speed = speed; printk(KERN_INFO "%s: Link changed: %dMbps ,", dev->name, speed); printk("%s duplex.\n", (mii_ctl & BMCR_FULLDPLX) ? "full" : "half"); } check_duplex(dev); if (np->flowctrl && np->mii_if.full_duplex) { iowrite16(ioread16(ioaddr + MulticastFilter1+2) | 0x0200, ioaddr + MulticastFilter1+2); iowrite16(ioread16(ioaddr + MACCtrl0) | EnbFlowCtrl, ioaddr + MACCtrl0); } netif_carrier_on(dev); } else { printk(KERN_INFO "%s: Link down\n", dev->name); netif_carrier_off(dev); } } if (intr_status & StatsMax) { get_stats(dev); } if (intr_status & IntrPCIErr) { printk(KERN_ERR "%s: Something Wicked happened! %4.4x.\n", dev->name, intr_status); /* We must do a global reset of DMA to continue. */ } } static struct net_device_stats *get_stats(struct net_device *dev) { struct netdev_private *np = netdev_priv(dev); void __iomem *ioaddr = np->base; unsigned long flags; u8 late_coll, single_coll, mult_coll; spin_lock_irqsave(&np->statlock, flags); /* The chip only need report frame silently dropped. */ dev->stats.rx_missed_errors += ioread8(ioaddr + RxMissed); dev->stats.tx_packets += ioread16(ioaddr + TxFramesOK); dev->stats.rx_packets += ioread16(ioaddr + RxFramesOK); dev->stats.tx_carrier_errors += ioread8(ioaddr + StatsCarrierError); mult_coll = ioread8(ioaddr + StatsMultiColl); np->xstats.tx_multiple_collisions += mult_coll; single_coll = ioread8(ioaddr + StatsOneColl); np->xstats.tx_single_collisions += single_coll; late_coll = ioread8(ioaddr + StatsLateColl); np->xstats.tx_late_collisions += late_coll; dev->stats.collisions += mult_coll + single_coll + late_coll; np->xstats.tx_deferred += ioread8(ioaddr + StatsTxDefer); np->xstats.tx_deferred_excessive += ioread8(ioaddr + StatsTxXSDefer); np->xstats.tx_aborted += ioread8(ioaddr + StatsTxAbort); np->xstats.tx_bcasts += ioread8(ioaddr + StatsBcastTx); np->xstats.rx_bcasts += ioread8(ioaddr + StatsBcastRx); np->xstats.tx_mcasts += ioread8(ioaddr + StatsMcastTx); np->xstats.rx_mcasts += ioread8(ioaddr + StatsMcastRx); dev->stats.tx_bytes += ioread16(ioaddr + TxOctetsLow); dev->stats.tx_bytes += ioread16(ioaddr + TxOctetsHigh) << 16; dev->stats.rx_bytes += ioread16(ioaddr + RxOctetsLow); dev->stats.rx_bytes += ioread16(ioaddr + RxOctetsHigh) << 16; spin_unlock_irqrestore(&np->statlock, flags); return &dev->stats; } static void set_rx_mode(struct net_device *dev) { struct netdev_private *np = netdev_priv(dev); void __iomem *ioaddr = np->base; u16 mc_filter[4]; /* Multicast hash filter */ u32 rx_mode; int i; if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */ memset(mc_filter, 0xff, sizeof(mc_filter)); rx_mode = AcceptBroadcast | AcceptMulticast | AcceptAll | AcceptMyPhys; } else if ((netdev_mc_count(dev) > multicast_filter_limit) || (dev->flags & IFF_ALLMULTI)) { /* Too many to match, or accept all multicasts. */ memset(mc_filter, 0xff, sizeof(mc_filter)); rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys; } else if (!netdev_mc_empty(dev)) { struct netdev_hw_addr *ha; int bit; int index; int crc; memset (mc_filter, 0, sizeof (mc_filter)); netdev_for_each_mc_addr(ha, dev) { crc = ether_crc_le(ETH_ALEN, ha->addr); for (index=0, bit=0; bit < 6; bit++, crc <<= 1) if (crc & 0x80000000) index |= 1 << bit; mc_filter[index/16] |= (1 << (index % 16)); } rx_mode = AcceptBroadcast | AcceptMultiHash | AcceptMyPhys; } else { iowrite8(AcceptBroadcast | AcceptMyPhys, ioaddr + RxMode); return; } if (np->mii_if.full_duplex && np->flowctrl) mc_filter[3] |= 0x0200; for (i = 0; i < 4; i++) iowrite16(mc_filter[i], ioaddr + MulticastFilter0 + i*2); iowrite8(rx_mode, ioaddr + RxMode); } static int __set_mac_addr(struct net_device *dev) { struct netdev_private *np = netdev_priv(dev); u16 addr16; addr16 = (dev->dev_addr[0] | (dev->dev_addr[1] << 8)); iowrite16(addr16, np->base + StationAddr); addr16 = (dev->dev_addr[2] | (dev->dev_addr[3] << 8)); iowrite16(addr16, np->base + StationAddr+2); addr16 = (dev->dev_addr[4] | (dev->dev_addr[5] << 8)); iowrite16(addr16, np->base + StationAddr+4); return 0; } /* Invoked with rtnl_lock held */ static int sundance_set_mac_addr(struct net_device *dev, void *data) { const struct sockaddr *addr = data; if (!is_valid_ether_addr(addr->sa_data)) return -EADDRNOTAVAIL; eth_hw_addr_set(dev, addr->sa_data); __set_mac_addr(dev); return 0; } static const struct { const char name[ETH_GSTRING_LEN]; } sundance_stats[] = { { "tx_multiple_collisions" }, { "tx_single_collisions" }, { "tx_late_collisions" }, { "tx_deferred" }, { "tx_deferred_excessive" }, { "tx_aborted" }, { "tx_bcasts" }, { "rx_bcasts" }, { "tx_mcasts" }, { "rx_mcasts" }, }; static int check_if_running(struct net_device *dev) { if (!netif_running(dev)) return -EINVAL; return 0; } static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) { struct netdev_private *np = netdev_priv(dev); strscpy(info->driver, DRV_NAME, sizeof(info->driver)); strscpy(info->bus_info, pci_name(np->pci_dev), sizeof(info->bus_info)); } static int get_link_ksettings(struct net_device *dev, struct ethtool_link_ksettings *cmd) { struct netdev_private *np = netdev_priv(dev); spin_lock_irq(&np->lock); mii_ethtool_get_link_ksettings(&np->mii_if, cmd); spin_unlock_irq(&np->lock); return 0; } static int set_link_ksettings(struct net_device *dev, const struct ethtool_link_ksettings *cmd) { struct netdev_private *np = netdev_priv(dev); int res; spin_lock_irq(&np->lock); res = mii_ethtool_set_link_ksettings(&np->mii_if, cmd); spin_unlock_irq(&np->lock); return res; } static int nway_reset(struct net_device *dev) { struct netdev_private *np = netdev_priv(dev); return mii_nway_restart(&np->mii_if); } static u32 get_link(struct net_device *dev) { struct netdev_private *np = netdev_priv(dev); return mii_link_ok(&np->mii_if); } static u32 get_msglevel(struct net_device *dev) { struct netdev_private *np = netdev_priv(dev); return np->msg_enable; } static void set_msglevel(struct net_device *dev, u32 val) { struct netdev_private *np = netdev_priv(dev); np->msg_enable = val; } static void get_strings(struct net_device *dev, u32 stringset, u8 *data) { if (stringset == ETH_SS_STATS) memcpy(data, sundance_stats, sizeof(sundance_stats)); } static int get_sset_count(struct net_device *dev, int sset) { switch (sset) { case ETH_SS_STATS: return ARRAY_SIZE(sundance_stats); default: return -EOPNOTSUPP; } } static void get_ethtool_stats(struct net_device *dev, struct ethtool_stats *stats, u64 *data) { struct netdev_private *np = netdev_priv(dev); int i = 0; get_stats(dev); data[i++] = np->xstats.tx_multiple_collisions; data[i++] = np->xstats.tx_single_collisions; data[i++] = np->xstats.tx_late_collisions; data[i++] = np->xstats.tx_deferred; data[i++] = np->xstats.tx_deferred_excessive; data[i++] = np->xstats.tx_aborted; data[i++] = np->xstats.tx_bcasts; data[i++] = np->xstats.rx_bcasts; data[i++] = np->xstats.tx_mcasts; data[i++] = np->xstats.rx_mcasts; } #ifdef CONFIG_PM static void sundance_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol) { struct netdev_private *np = netdev_priv(dev); void __iomem *ioaddr = np->base; u8 wol_bits; wol->wolopts = 0; wol->supported = (WAKE_PHY | WAKE_MAGIC); if (!np->wol_enabled) return; wol_bits = ioread8(ioaddr + WakeEvent); if (wol_bits & MagicPktEnable) wol->wolopts |= WAKE_MAGIC; if (wol_bits & LinkEventEnable) wol->wolopts |= WAKE_PHY; } static int sundance_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol) { struct netdev_private *np = netdev_priv(dev); void __iomem *ioaddr = np->base; u8 wol_bits; if (!device_can_wakeup(&np->pci_dev->dev)) return -EOPNOTSUPP; np->wol_enabled = !!(wol->wolopts); wol_bits = ioread8(ioaddr + WakeEvent); wol_bits &= ~(WakePktEnable | MagicPktEnable | LinkEventEnable | WolEnable); if (np->wol_enabled) { if (wol->wolopts & WAKE_MAGIC) wol_bits |= (MagicPktEnable | WolEnable); if (wol->wolopts & WAKE_PHY) wol_bits |= (LinkEventEnable | WolEnable); } iowrite8(wol_bits, ioaddr + WakeEvent); device_set_wakeup_enable(&np->pci_dev->dev, np->wol_enabled); return 0; } #else #define sundance_get_wol NULL #define sundance_set_wol NULL #endif /* CONFIG_PM */ static const struct ethtool_ops ethtool_ops = { .begin = check_if_running, .get_drvinfo = get_drvinfo, .nway_reset = nway_reset, .get_link = get_link, .get_wol = sundance_get_wol, .set_wol = sundance_set_wol, .get_msglevel = get_msglevel, .set_msglevel = set_msglevel, .get_strings = get_strings, .get_sset_count = get_sset_count, .get_ethtool_stats = get_ethtool_stats, .get_link_ksettings = get_link_ksettings, .set_link_ksettings = set_link_ksettings, }; static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) { struct netdev_private *np = netdev_priv(dev); int rc; if (!netif_running(dev)) return -EINVAL; spin_lock_irq(&np->lock); rc = generic_mii_ioctl(&np->mii_if, if_mii(rq), cmd, NULL); spin_unlock_irq(&np->lock); return rc; } static int netdev_close(struct net_device *dev) { struct netdev_private *np = netdev_priv(dev); void __iomem *ioaddr = np->base; struct sk_buff *skb; int i; /* Wait and kill tasklet */ tasklet_kill(&np->rx_tasklet); tasklet_kill(&np->tx_tasklet); np->cur_tx = 0; np->dirty_tx = 0; np->cur_task = 0; np->last_tx = NULL; netif_stop_queue(dev); if (netif_msg_ifdown(np)) { printk(KERN_DEBUG "%s: Shutting down ethercard, status was Tx %2.2x " "Rx %4.4x Int %2.2x.\n", dev->name, ioread8(ioaddr + TxStatus), ioread32(ioaddr + RxStatus), ioread16(ioaddr + IntrStatus)); printk(KERN_DEBUG "%s: Queue pointers were Tx %d / %d, Rx %d / %d.\n", dev->name, np->cur_tx, np->dirty_tx, np->cur_rx, np->dirty_rx); } /* Disable interrupts by clearing the interrupt mask. */ iowrite16(0x0000, ioaddr + IntrEnable); /* Disable Rx and Tx DMA for safely release resource */ iowrite32(0x500, ioaddr + DMACtrl); /* Stop the chip's Tx and Rx processes. */ iowrite16(TxDisable | RxDisable | StatsDisable, ioaddr + MACCtrl1); for (i = 2000; i > 0; i--) { if ((ioread32(ioaddr + DMACtrl) & 0xc000) == 0) break; mdelay(1); } iowrite16(GlobalReset | DMAReset | FIFOReset | NetworkReset, ioaddr + ASIC_HI_WORD(ASICCtrl)); for (i = 2000; i > 0; i--) { if ((ioread16(ioaddr + ASIC_HI_WORD(ASICCtrl)) & ResetBusy) == 0) break; mdelay(1); } #ifdef __i386__ if (netif_msg_hw(np)) { printk(KERN_DEBUG " Tx ring at %8.8x:\n", (int)(np->tx_ring_dma)); for (i = 0; i < TX_RING_SIZE; i++) printk(KERN_DEBUG " #%d desc. %4.4x %8.8x %8.8x.\n", i, np->tx_ring[i].status, np->tx_ring[i].frag.addr, np->tx_ring[i].frag.length); printk(KERN_DEBUG " Rx ring %8.8x:\n", (int)(np->rx_ring_dma)); for (i = 0; i < /*RX_RING_SIZE*/4 ; i++) { printk(KERN_DEBUG " #%d desc. %4.4x %4.4x %8.8x\n", i, np->rx_ring[i].status, np->rx_ring[i].frag.addr, np->rx_ring[i].frag.length); } } #endif /* __i386__ debugging only */ free_irq(np->pci_dev->irq, dev); del_timer_sync(&np->timer); /* Free all the skbuffs in the Rx queue. */ for (i = 0; i < RX_RING_SIZE; i++) { np->rx_ring[i].status = 0; skb = np->rx_skbuff[i]; if (skb) { dma_unmap_single(&np->pci_dev->dev, le32_to_cpu(np->rx_ring[i].frag.addr), np->rx_buf_sz, DMA_FROM_DEVICE); dev_kfree_skb(skb); np->rx_skbuff[i] = NULL; } np->rx_ring[i].frag.addr = cpu_to_le32(0xBADF00D0); /* poison */ } for (i = 0; i < TX_RING_SIZE; i++) { np->tx_ring[i].next_desc = 0; skb = np->tx_skbuff[i]; if (skb) { dma_unmap_single(&np->pci_dev->dev, le32_to_cpu(np->tx_ring[i].frag.addr), skb->len, DMA_TO_DEVICE); dev_kfree_skb(skb); np->tx_skbuff[i] = NULL; } } return 0; } static void sundance_remove1(struct pci_dev *pdev) { struct net_device *dev = pci_get_drvdata(pdev); if (dev) { struct netdev_private *np = netdev_priv(dev); unregister_netdev(dev); dma_free_coherent(&pdev->dev, RX_TOTAL_SIZE, np->rx_ring, np->rx_ring_dma); dma_free_coherent(&pdev->dev, TX_TOTAL_SIZE, np->tx_ring, np->tx_ring_dma); pci_iounmap(pdev, np->base); pci_release_regions(pdev); free_netdev(dev); } } static int __maybe_unused sundance_suspend(struct device *dev_d) { struct net_device *dev = dev_get_drvdata(dev_d); struct netdev_private *np = netdev_priv(dev); void __iomem *ioaddr = np->base; if (!netif_running(dev)) return 0; netdev_close(dev); netif_device_detach(dev); if (np->wol_enabled) { iowrite8(AcceptBroadcast | AcceptMyPhys, ioaddr + RxMode); iowrite16(RxEnable, ioaddr + MACCtrl1); } device_set_wakeup_enable(dev_d, np->wol_enabled); return 0; } static int __maybe_unused sundance_resume(struct device *dev_d) { struct net_device *dev = dev_get_drvdata(dev_d); int err = 0; if (!netif_running(dev)) return 0; err = netdev_open(dev); if (err) { printk(KERN_ERR "%s: Can't resume interface!\n", dev->name); goto out; } netif_device_attach(dev); out: return err; } static SIMPLE_DEV_PM_OPS(sundance_pm_ops, sundance_suspend, sundance_resume); static struct pci_driver sundance_driver = { .name = DRV_NAME, .id_table = sundance_pci_tbl, .probe = sundance_probe1, .remove = sundance_remove1, .driver.pm = &sundance_pm_ops, }; module_pci_driver(sundance_driver);
linux-master
drivers/net/ethernet/dlink/sundance.c
// SPDX-License-Identifier: GPL-2.0-or-later /* D-Link DL2000-based Gigabit Ethernet Adapter Linux driver */ /* Copyright (c) 2001, 2002 by D-Link Corporation Written by Edward Peng.<[email protected]> Created 03-May-2001, base on Linux' sundance.c. */ #include "dl2k.h" #include <linux/dma-mapping.h> #define dw32(reg, val) iowrite32(val, ioaddr + (reg)) #define dw16(reg, val) iowrite16(val, ioaddr + (reg)) #define dw8(reg, val) iowrite8(val, ioaddr + (reg)) #define dr32(reg) ioread32(ioaddr + (reg)) #define dr16(reg) ioread16(ioaddr + (reg)) #define dr8(reg) ioread8(ioaddr + (reg)) #define MAX_UNITS 8 static int mtu[MAX_UNITS]; static int vlan[MAX_UNITS]; static int jumbo[MAX_UNITS]; static char *media[MAX_UNITS]; static int tx_flow=-1; static int rx_flow=-1; static int copy_thresh; static int rx_coalesce=10; /* Rx frame count each interrupt */ static int rx_timeout=200; /* Rx DMA wait time in 640ns increments */ static int tx_coalesce=16; /* HW xmit count each TxDMAComplete */ MODULE_AUTHOR ("Edward Peng"); MODULE_DESCRIPTION ("D-Link DL2000-based Gigabit Ethernet Adapter"); MODULE_LICENSE("GPL"); module_param_array(mtu, int, NULL, 0); module_param_array(media, charp, NULL, 0); module_param_array(vlan, int, NULL, 0); module_param_array(jumbo, int, NULL, 0); module_param(tx_flow, int, 0); module_param(rx_flow, int, 0); module_param(copy_thresh, int, 0); module_param(rx_coalesce, int, 0); /* Rx frame count each interrupt */ module_param(rx_timeout, int, 0); /* Rx DMA wait time in 64ns increments */ module_param(tx_coalesce, int, 0); /* HW xmit count each TxDMAComplete */ /* Enable the default interrupts */ #define DEFAULT_INTR (RxDMAComplete | HostError | IntRequested | TxDMAComplete| \ UpdateStats | LinkEvent) static void dl2k_enable_int(struct netdev_private *np) { void __iomem *ioaddr = np->ioaddr; dw16(IntEnable, DEFAULT_INTR); } static const int max_intrloop = 50; static const int multicast_filter_limit = 0x40; static int rio_open (struct net_device *dev); static void rio_timer (struct timer_list *t); static void rio_tx_timeout (struct net_device *dev, unsigned int txqueue); static netdev_tx_t start_xmit (struct sk_buff *skb, struct net_device *dev); static irqreturn_t rio_interrupt (int irq, void *dev_instance); static void rio_free_tx (struct net_device *dev, int irq); static void tx_error (struct net_device *dev, int tx_status); static int receive_packet (struct net_device *dev); static void rio_error (struct net_device *dev, int int_status); static void set_multicast (struct net_device *dev); static struct net_device_stats *get_stats (struct net_device *dev); static int clear_stats (struct net_device *dev); static int rio_ioctl (struct net_device *dev, struct ifreq *rq, int cmd); static int rio_close (struct net_device *dev); static int find_miiphy (struct net_device *dev); static int parse_eeprom (struct net_device *dev); static int read_eeprom (struct netdev_private *, int eep_addr); static int mii_wait_link (struct net_device *dev, int wait); static int mii_set_media (struct net_device *dev); static int mii_get_media (struct net_device *dev); static int mii_set_media_pcs (struct net_device *dev); static int mii_get_media_pcs (struct net_device *dev); static int mii_read (struct net_device *dev, int phy_addr, int reg_num); static int mii_write (struct net_device *dev, int phy_addr, int reg_num, u16 data); static const struct ethtool_ops ethtool_ops; static const struct net_device_ops netdev_ops = { .ndo_open = rio_open, .ndo_start_xmit = start_xmit, .ndo_stop = rio_close, .ndo_get_stats = get_stats, .ndo_validate_addr = eth_validate_addr, .ndo_set_mac_address = eth_mac_addr, .ndo_set_rx_mode = set_multicast, .ndo_eth_ioctl = rio_ioctl, .ndo_tx_timeout = rio_tx_timeout, }; static int rio_probe1 (struct pci_dev *pdev, const struct pci_device_id *ent) { struct net_device *dev; struct netdev_private *np; static int card_idx; int chip_idx = ent->driver_data; int err, irq; void __iomem *ioaddr; void *ring_space; dma_addr_t ring_dma; err = pci_enable_device (pdev); if (err) return err; irq = pdev->irq; err = pci_request_regions (pdev, "dl2k"); if (err) goto err_out_disable; pci_set_master (pdev); err = -ENOMEM; dev = alloc_etherdev (sizeof (*np)); if (!dev) goto err_out_res; SET_NETDEV_DEV(dev, &pdev->dev); np = netdev_priv(dev); /* IO registers range. */ ioaddr = pci_iomap(pdev, 0, 0); if (!ioaddr) goto err_out_dev; np->eeprom_addr = ioaddr; #ifdef MEM_MAPPING /* MM registers range. */ ioaddr = pci_iomap(pdev, 1, 0); if (!ioaddr) goto err_out_iounmap; #endif np->ioaddr = ioaddr; np->chip_id = chip_idx; np->pdev = pdev; spin_lock_init (&np->tx_lock); spin_lock_init (&np->rx_lock); /* Parse manual configuration */ np->an_enable = 1; np->tx_coalesce = 1; if (card_idx < MAX_UNITS) { if (media[card_idx] != NULL) { np->an_enable = 0; if (strcmp (media[card_idx], "auto") == 0 || strcmp (media[card_idx], "autosense") == 0 || strcmp (media[card_idx], "0") == 0 ) { np->an_enable = 2; } else if (strcmp (media[card_idx], "100mbps_fd") == 0 || strcmp (media[card_idx], "4") == 0) { np->speed = 100; np->full_duplex = 1; } else if (strcmp (media[card_idx], "100mbps_hd") == 0 || strcmp (media[card_idx], "3") == 0) { np->speed = 100; np->full_duplex = 0; } else if (strcmp (media[card_idx], "10mbps_fd") == 0 || strcmp (media[card_idx], "2") == 0) { np->speed = 10; np->full_duplex = 1; } else if (strcmp (media[card_idx], "10mbps_hd") == 0 || strcmp (media[card_idx], "1") == 0) { np->speed = 10; np->full_duplex = 0; } else if (strcmp (media[card_idx], "1000mbps_fd") == 0 || strcmp (media[card_idx], "6") == 0) { np->speed=1000; np->full_duplex=1; } else if (strcmp (media[card_idx], "1000mbps_hd") == 0 || strcmp (media[card_idx], "5") == 0) { np->speed = 1000; np->full_duplex = 0; } else { np->an_enable = 1; } } if (jumbo[card_idx] != 0) { np->jumbo = 1; dev->mtu = MAX_JUMBO; } else { np->jumbo = 0; if (mtu[card_idx] > 0 && mtu[card_idx] < PACKET_SIZE) dev->mtu = mtu[card_idx]; } np->vlan = (vlan[card_idx] > 0 && vlan[card_idx] < 4096) ? vlan[card_idx] : 0; if (rx_coalesce > 0 && rx_timeout > 0) { np->rx_coalesce = rx_coalesce; np->rx_timeout = rx_timeout; np->coalesce = 1; } np->tx_flow = (tx_flow == 0) ? 0 : 1; np->rx_flow = (rx_flow == 0) ? 0 : 1; if (tx_coalesce < 1) tx_coalesce = 1; else if (tx_coalesce > TX_RING_SIZE-1) tx_coalesce = TX_RING_SIZE - 1; } dev->netdev_ops = &netdev_ops; dev->watchdog_timeo = TX_TIMEOUT; dev->ethtool_ops = &ethtool_ops; #if 0 dev->features = NETIF_F_IP_CSUM; #endif /* MTU range: 68 - 1536 or 8000 */ dev->min_mtu = ETH_MIN_MTU; dev->max_mtu = np->jumbo ? MAX_JUMBO : PACKET_SIZE; pci_set_drvdata (pdev, dev); ring_space = dma_alloc_coherent(&pdev->dev, TX_TOTAL_SIZE, &ring_dma, GFP_KERNEL); if (!ring_space) goto err_out_iounmap; np->tx_ring = ring_space; np->tx_ring_dma = ring_dma; ring_space = dma_alloc_coherent(&pdev->dev, RX_TOTAL_SIZE, &ring_dma, GFP_KERNEL); if (!ring_space) goto err_out_unmap_tx; np->rx_ring = ring_space; np->rx_ring_dma = ring_dma; /* Parse eeprom data */ parse_eeprom (dev); /* Find PHY address */ err = find_miiphy (dev); if (err) goto err_out_unmap_rx; /* Fiber device? */ np->phy_media = (dr16(ASICCtrl) & PhyMedia) ? 1 : 0; np->link_status = 0; /* Set media and reset PHY */ if (np->phy_media) { /* default Auto-Negotiation for fiber deivices */ if (np->an_enable == 2) { np->an_enable = 1; } } else { /* Auto-Negotiation is mandatory for 1000BASE-T, IEEE 802.3ab Annex 28D page 14 */ if (np->speed == 1000) np->an_enable = 1; } err = register_netdev (dev); if (err) goto err_out_unmap_rx; card_idx++; printk (KERN_INFO "%s: %s, %pM, IRQ %d\n", dev->name, np->name, dev->dev_addr, irq); if (tx_coalesce > 1) printk(KERN_INFO "tx_coalesce:\t%d packets\n", tx_coalesce); if (np->coalesce) printk(KERN_INFO "rx_coalesce:\t%d packets\n" "rx_timeout: \t%d ns\n", np->rx_coalesce, np->rx_timeout*640); if (np->vlan) printk(KERN_INFO "vlan(id):\t%d\n", np->vlan); return 0; err_out_unmap_rx: dma_free_coherent(&pdev->dev, RX_TOTAL_SIZE, np->rx_ring, np->rx_ring_dma); err_out_unmap_tx: dma_free_coherent(&pdev->dev, TX_TOTAL_SIZE, np->tx_ring, np->tx_ring_dma); err_out_iounmap: #ifdef MEM_MAPPING pci_iounmap(pdev, np->ioaddr); #endif pci_iounmap(pdev, np->eeprom_addr); err_out_dev: free_netdev (dev); err_out_res: pci_release_regions (pdev); err_out_disable: pci_disable_device (pdev); return err; } static int find_miiphy (struct net_device *dev) { struct netdev_private *np = netdev_priv(dev); int i, phy_found = 0; np->phy_addr = 1; for (i = 31; i >= 0; i--) { int mii_status = mii_read (dev, i, 1); if (mii_status != 0xffff && mii_status != 0x0000) { np->phy_addr = i; phy_found++; } } if (!phy_found) { printk (KERN_ERR "%s: No MII PHY found!\n", dev->name); return -ENODEV; } return 0; } static int parse_eeprom (struct net_device *dev) { struct netdev_private *np = netdev_priv(dev); void __iomem *ioaddr = np->ioaddr; int i, j; u8 sromdata[256]; u8 *psib; u32 crc; PSROM_t psrom = (PSROM_t) sromdata; int cid, next; for (i = 0; i < 128; i++) ((__le16 *) sromdata)[i] = cpu_to_le16(read_eeprom(np, i)); if (np->pdev->vendor == PCI_VENDOR_ID_DLINK) { /* D-Link Only */ /* Check CRC */ crc = ~ether_crc_le (256 - 4, sromdata); if (psrom->crc != cpu_to_le32(crc)) { printk (KERN_ERR "%s: EEPROM data CRC error.\n", dev->name); return -1; } } /* Set MAC address */ eth_hw_addr_set(dev, psrom->mac_addr); if (np->chip_id == CHIP_IP1000A) { np->led_mode = psrom->led_mode; return 0; } if (np->pdev->vendor != PCI_VENDOR_ID_DLINK) { return 0; } /* Parse Software Information Block */ i = 0x30; psib = (u8 *) sromdata; do { cid = psib[i++]; next = psib[i++]; if ((cid == 0 && next == 0) || (cid == 0xff && next == 0xff)) { printk (KERN_ERR "Cell data error\n"); return -1; } switch (cid) { case 0: /* Format version */ break; case 1: /* End of cell */ return 0; case 2: /* Duplex Polarity */ np->duplex_polarity = psib[i]; dw8(PhyCtrl, dr8(PhyCtrl) | psib[i]); break; case 3: /* Wake Polarity */ np->wake_polarity = psib[i]; break; case 9: /* Adapter description */ j = (next - i > 255) ? 255 : next - i; memcpy (np->name, &(psib[i]), j); break; case 4: case 5: case 6: case 7: case 8: /* Reversed */ break; default: /* Unknown cell */ return -1; } i = next; } while (1); return 0; } static void rio_set_led_mode(struct net_device *dev) { struct netdev_private *np = netdev_priv(dev); void __iomem *ioaddr = np->ioaddr; u32 mode; if (np->chip_id != CHIP_IP1000A) return; mode = dr32(ASICCtrl); mode &= ~(IPG_AC_LED_MODE_BIT_1 | IPG_AC_LED_MODE | IPG_AC_LED_SPEED); if (np->led_mode & 0x01) mode |= IPG_AC_LED_MODE; if (np->led_mode & 0x02) mode |= IPG_AC_LED_MODE_BIT_1; if (np->led_mode & 0x08) mode |= IPG_AC_LED_SPEED; dw32(ASICCtrl, mode); } static inline dma_addr_t desc_to_dma(struct netdev_desc *desc) { return le64_to_cpu(desc->fraginfo) & DMA_BIT_MASK(48); } static void free_list(struct net_device *dev) { struct netdev_private *np = netdev_priv(dev); struct sk_buff *skb; int i; /* Free all the skbuffs in the queue. */ for (i = 0; i < RX_RING_SIZE; i++) { skb = np->rx_skbuff[i]; if (skb) { dma_unmap_single(&np->pdev->dev, desc_to_dma(&np->rx_ring[i]), skb->len, DMA_FROM_DEVICE); dev_kfree_skb(skb); np->rx_skbuff[i] = NULL; } np->rx_ring[i].status = 0; np->rx_ring[i].fraginfo = 0; } for (i = 0; i < TX_RING_SIZE; i++) { skb = np->tx_skbuff[i]; if (skb) { dma_unmap_single(&np->pdev->dev, desc_to_dma(&np->tx_ring[i]), skb->len, DMA_TO_DEVICE); dev_kfree_skb(skb); np->tx_skbuff[i] = NULL; } } } static void rio_reset_ring(struct netdev_private *np) { int i; np->cur_rx = 0; np->cur_tx = 0; np->old_rx = 0; np->old_tx = 0; for (i = 0; i < TX_RING_SIZE; i++) np->tx_ring[i].status = cpu_to_le64(TFDDone); for (i = 0; i < RX_RING_SIZE; i++) np->rx_ring[i].status = 0; } /* allocate and initialize Tx and Rx descriptors */ static int alloc_list(struct net_device *dev) { struct netdev_private *np = netdev_priv(dev); int i; rio_reset_ring(np); np->rx_buf_sz = (dev->mtu <= 1500 ? PACKET_SIZE : dev->mtu + 32); /* Initialize Tx descriptors, TFDListPtr leaves in start_xmit(). */ for (i = 0; i < TX_RING_SIZE; i++) { np->tx_skbuff[i] = NULL; np->tx_ring[i].next_desc = cpu_to_le64(np->tx_ring_dma + ((i + 1) % TX_RING_SIZE) * sizeof(struct netdev_desc)); } /* Initialize Rx descriptors & allocate buffers */ for (i = 0; i < RX_RING_SIZE; i++) { /* Allocated fixed size of skbuff */ struct sk_buff *skb; skb = netdev_alloc_skb_ip_align(dev, np->rx_buf_sz); np->rx_skbuff[i] = skb; if (!skb) { free_list(dev); return -ENOMEM; } np->rx_ring[i].next_desc = cpu_to_le64(np->rx_ring_dma + ((i + 1) % RX_RING_SIZE) * sizeof(struct netdev_desc)); /* Rubicon now supports 40 bits of addressing space. */ np->rx_ring[i].fraginfo = cpu_to_le64(dma_map_single(&np->pdev->dev, skb->data, np->rx_buf_sz, DMA_FROM_DEVICE)); np->rx_ring[i].fraginfo |= cpu_to_le64((u64)np->rx_buf_sz << 48); } return 0; } static void rio_hw_init(struct net_device *dev) { struct netdev_private *np = netdev_priv(dev); void __iomem *ioaddr = np->ioaddr; int i; u16 macctrl; /* Reset all logic functions */ dw16(ASICCtrl + 2, GlobalReset | DMAReset | FIFOReset | NetworkReset | HostReset); mdelay(10); rio_set_led_mode(dev); /* DebugCtrl bit 4, 5, 9 must set */ dw32(DebugCtrl, dr32(DebugCtrl) | 0x0230); if (np->chip_id == CHIP_IP1000A && (np->pdev->revision == 0x40 || np->pdev->revision == 0x41)) { /* PHY magic taken from ipg driver, undocumented registers */ mii_write(dev, np->phy_addr, 31, 0x0001); mii_write(dev, np->phy_addr, 27, 0x01e0); mii_write(dev, np->phy_addr, 31, 0x0002); mii_write(dev, np->phy_addr, 27, 0xeb8e); mii_write(dev, np->phy_addr, 31, 0x0000); mii_write(dev, np->phy_addr, 30, 0x005e); /* advertise 1000BASE-T half & full duplex, prefer MASTER */ mii_write(dev, np->phy_addr, MII_CTRL1000, 0x0700); } if (np->phy_media) mii_set_media_pcs(dev); else mii_set_media(dev); /* Jumbo frame */ if (np->jumbo != 0) dw16(MaxFrameSize, MAX_JUMBO+14); /* Set RFDListPtr */ dw32(RFDListPtr0, np->rx_ring_dma); dw32(RFDListPtr1, 0); /* Set station address */ /* 16 or 32-bit access is required by TC9020 datasheet but 8-bit works * too. However, it doesn't work on IP1000A so we use 16-bit access. */ for (i = 0; i < 3; i++) dw16(StationAddr0 + 2 * i, cpu_to_le16(((const u16 *)dev->dev_addr)[i])); set_multicast (dev); if (np->coalesce) { dw32(RxDMAIntCtrl, np->rx_coalesce | np->rx_timeout << 16); } /* Set RIO to poll every N*320nsec. */ dw8(RxDMAPollPeriod, 0x20); dw8(TxDMAPollPeriod, 0xff); dw8(RxDMABurstThresh, 0x30); dw8(RxDMAUrgentThresh, 0x30); dw32(RmonStatMask, 0x0007ffff); /* clear statistics */ clear_stats (dev); /* VLAN supported */ if (np->vlan) { /* priority field in RxDMAIntCtrl */ dw32(RxDMAIntCtrl, dr32(RxDMAIntCtrl) | 0x7 << 10); /* VLANId */ dw16(VLANId, np->vlan); /* Length/Type should be 0x8100 */ dw32(VLANTag, 0x8100 << 16 | np->vlan); /* Enable AutoVLANuntagging, but disable AutoVLANtagging. VLAN information tagged by TFC' VID, CFI fields. */ dw32(MACCtrl, dr32(MACCtrl) | AutoVLANuntagging); } /* Start Tx/Rx */ dw32(MACCtrl, dr32(MACCtrl) | StatsEnable | RxEnable | TxEnable); macctrl = 0; macctrl |= (np->vlan) ? AutoVLANuntagging : 0; macctrl |= (np->full_duplex) ? DuplexSelect : 0; macctrl |= (np->tx_flow) ? TxFlowControlEnable : 0; macctrl |= (np->rx_flow) ? RxFlowControlEnable : 0; dw16(MACCtrl, macctrl); } static void rio_hw_stop(struct net_device *dev) { struct netdev_private *np = netdev_priv(dev); void __iomem *ioaddr = np->ioaddr; /* Disable interrupts */ dw16(IntEnable, 0); /* Stop Tx and Rx logics */ dw32(MACCtrl, TxDisable | RxDisable | StatsDisable); } static int rio_open(struct net_device *dev) { struct netdev_private *np = netdev_priv(dev); const int irq = np->pdev->irq; int i; i = alloc_list(dev); if (i) return i; rio_hw_init(dev); i = request_irq(irq, rio_interrupt, IRQF_SHARED, dev->name, dev); if (i) { rio_hw_stop(dev); free_list(dev); return i; } timer_setup(&np->timer, rio_timer, 0); np->timer.expires = jiffies + 1 * HZ; add_timer(&np->timer); netif_start_queue (dev); dl2k_enable_int(np); return 0; } static void rio_timer (struct timer_list *t) { struct netdev_private *np = from_timer(np, t, timer); struct net_device *dev = pci_get_drvdata(np->pdev); unsigned int entry; int next_tick = 1*HZ; unsigned long flags; spin_lock_irqsave(&np->rx_lock, flags); /* Recover rx ring exhausted error */ if (np->cur_rx - np->old_rx >= RX_RING_SIZE) { printk(KERN_INFO "Try to recover rx ring exhausted...\n"); /* Re-allocate skbuffs to fill the descriptor ring */ for (; np->cur_rx - np->old_rx > 0; np->old_rx++) { struct sk_buff *skb; entry = np->old_rx % RX_RING_SIZE; /* Dropped packets don't need to re-allocate */ if (np->rx_skbuff[entry] == NULL) { skb = netdev_alloc_skb_ip_align(dev, np->rx_buf_sz); if (skb == NULL) { np->rx_ring[entry].fraginfo = 0; printk (KERN_INFO "%s: Still unable to re-allocate Rx skbuff.#%d\n", dev->name, entry); break; } np->rx_skbuff[entry] = skb; np->rx_ring[entry].fraginfo = cpu_to_le64 (dma_map_single(&np->pdev->dev, skb->data, np->rx_buf_sz, DMA_FROM_DEVICE)); } np->rx_ring[entry].fraginfo |= cpu_to_le64((u64)np->rx_buf_sz << 48); np->rx_ring[entry].status = 0; } /* end for */ } /* end if */ spin_unlock_irqrestore (&np->rx_lock, flags); np->timer.expires = jiffies + next_tick; add_timer(&np->timer); } static void rio_tx_timeout (struct net_device *dev, unsigned int txqueue) { struct netdev_private *np = netdev_priv(dev); void __iomem *ioaddr = np->ioaddr; printk (KERN_INFO "%s: Tx timed out (%4.4x), is buffer full?\n", dev->name, dr32(TxStatus)); rio_free_tx(dev, 0); dev->if_port = 0; netif_trans_update(dev); /* prevent tx timeout */ } static netdev_tx_t start_xmit (struct sk_buff *skb, struct net_device *dev) { struct netdev_private *np = netdev_priv(dev); void __iomem *ioaddr = np->ioaddr; struct netdev_desc *txdesc; unsigned entry; u64 tfc_vlan_tag = 0; if (np->link_status == 0) { /* Link Down */ dev_kfree_skb(skb); return NETDEV_TX_OK; } entry = np->cur_tx % TX_RING_SIZE; np->tx_skbuff[entry] = skb; txdesc = &np->tx_ring[entry]; #if 0 if (skb->ip_summed == CHECKSUM_PARTIAL) { txdesc->status |= cpu_to_le64 (TCPChecksumEnable | UDPChecksumEnable | IPChecksumEnable); } #endif if (np->vlan) { tfc_vlan_tag = VLANTagInsert | ((u64)np->vlan << 32) | ((u64)skb->priority << 45); } txdesc->fraginfo = cpu_to_le64 (dma_map_single(&np->pdev->dev, skb->data, skb->len, DMA_TO_DEVICE)); txdesc->fraginfo |= cpu_to_le64((u64)skb->len << 48); /* DL2K bug: DMA fails to get next descriptor ptr in 10Mbps mode * Work around: Always use 1 descriptor in 10Mbps mode */ if (entry % np->tx_coalesce == 0 || np->speed == 10) txdesc->status = cpu_to_le64 (entry | tfc_vlan_tag | WordAlignDisable | TxDMAIndicate | (1 << FragCountShift)); else txdesc->status = cpu_to_le64 (entry | tfc_vlan_tag | WordAlignDisable | (1 << FragCountShift)); /* TxDMAPollNow */ dw32(DMACtrl, dr32(DMACtrl) | 0x00001000); /* Schedule ISR */ dw32(CountDown, 10000); np->cur_tx = (np->cur_tx + 1) % TX_RING_SIZE; if ((np->cur_tx - np->old_tx + TX_RING_SIZE) % TX_RING_SIZE < TX_QUEUE_LEN - 1 && np->speed != 10) { /* do nothing */ } else if (!netif_queue_stopped(dev)) { netif_stop_queue (dev); } /* The first TFDListPtr */ if (!dr32(TFDListPtr0)) { dw32(TFDListPtr0, np->tx_ring_dma + entry * sizeof (struct netdev_desc)); dw32(TFDListPtr1, 0); } return NETDEV_TX_OK; } static irqreturn_t rio_interrupt (int irq, void *dev_instance) { struct net_device *dev = dev_instance; struct netdev_private *np = netdev_priv(dev); void __iomem *ioaddr = np->ioaddr; unsigned int_status; int cnt = max_intrloop; int handled = 0; while (1) { int_status = dr16(IntStatus); dw16(IntStatus, int_status); int_status &= DEFAULT_INTR; if (int_status == 0 || --cnt < 0) break; handled = 1; /* Processing received packets */ if (int_status & RxDMAComplete) receive_packet (dev); /* TxDMAComplete interrupt */ if ((int_status & (TxDMAComplete|IntRequested))) { int tx_status; tx_status = dr32(TxStatus); if (tx_status & 0x01) tx_error (dev, tx_status); /* Free used tx skbuffs */ rio_free_tx (dev, 1); } /* Handle uncommon events */ if (int_status & (HostError | LinkEvent | UpdateStats)) rio_error (dev, int_status); } if (np->cur_tx != np->old_tx) dw32(CountDown, 100); return IRQ_RETVAL(handled); } static void rio_free_tx (struct net_device *dev, int irq) { struct netdev_private *np = netdev_priv(dev); int entry = np->old_tx % TX_RING_SIZE; unsigned long flag = 0; if (irq) spin_lock(&np->tx_lock); else spin_lock_irqsave(&np->tx_lock, flag); /* Free used tx skbuffs */ while (entry != np->cur_tx) { struct sk_buff *skb; if (!(np->tx_ring[entry].status & cpu_to_le64(TFDDone))) break; skb = np->tx_skbuff[entry]; dma_unmap_single(&np->pdev->dev, desc_to_dma(&np->tx_ring[entry]), skb->len, DMA_TO_DEVICE); if (irq) dev_consume_skb_irq(skb); else dev_kfree_skb(skb); np->tx_skbuff[entry] = NULL; entry = (entry + 1) % TX_RING_SIZE; } if (irq) spin_unlock(&np->tx_lock); else spin_unlock_irqrestore(&np->tx_lock, flag); np->old_tx = entry; /* If the ring is no longer full, clear tx_full and call netif_wake_queue() */ if (netif_queue_stopped(dev) && ((np->cur_tx - np->old_tx + TX_RING_SIZE) % TX_RING_SIZE < TX_QUEUE_LEN - 1 || np->speed == 10)) { netif_wake_queue (dev); } } static void tx_error (struct net_device *dev, int tx_status) { struct netdev_private *np = netdev_priv(dev); void __iomem *ioaddr = np->ioaddr; int frame_id; int i; frame_id = (tx_status & 0xffff0000); printk (KERN_ERR "%s: Transmit error, TxStatus %4.4x, FrameId %d.\n", dev->name, tx_status, frame_id); dev->stats.tx_errors++; /* Ttransmit Underrun */ if (tx_status & 0x10) { dev->stats.tx_fifo_errors++; dw16(TxStartThresh, dr16(TxStartThresh) + 0x10); /* Transmit Underrun need to set TxReset, DMARest, FIFOReset */ dw16(ASICCtrl + 2, TxReset | DMAReset | FIFOReset | NetworkReset); /* Wait for ResetBusy bit clear */ for (i = 50; i > 0; i--) { if (!(dr16(ASICCtrl + 2) & ResetBusy)) break; mdelay (1); } rio_set_led_mode(dev); rio_free_tx (dev, 1); /* Reset TFDListPtr */ dw32(TFDListPtr0, np->tx_ring_dma + np->old_tx * sizeof (struct netdev_desc)); dw32(TFDListPtr1, 0); /* Let TxStartThresh stay default value */ } /* Late Collision */ if (tx_status & 0x04) { dev->stats.tx_fifo_errors++; /* TxReset and clear FIFO */ dw16(ASICCtrl + 2, TxReset | FIFOReset); /* Wait reset done */ for (i = 50; i > 0; i--) { if (!(dr16(ASICCtrl + 2) & ResetBusy)) break; mdelay (1); } rio_set_led_mode(dev); /* Let TxStartThresh stay default value */ } /* Maximum Collisions */ if (tx_status & 0x08) dev->stats.collisions++; /* Restart the Tx */ dw32(MACCtrl, dr16(MACCtrl) | TxEnable); } static int receive_packet (struct net_device *dev) { struct netdev_private *np = netdev_priv(dev); int entry = np->cur_rx % RX_RING_SIZE; int cnt = 30; /* If RFDDone, FrameStart and FrameEnd set, there is a new packet in. */ while (1) { struct netdev_desc *desc = &np->rx_ring[entry]; int pkt_len; u64 frame_status; if (!(desc->status & cpu_to_le64(RFDDone)) || !(desc->status & cpu_to_le64(FrameStart)) || !(desc->status & cpu_to_le64(FrameEnd))) break; /* Chip omits the CRC. */ frame_status = le64_to_cpu(desc->status); pkt_len = frame_status & 0xffff; if (--cnt < 0) break; /* Update rx error statistics, drop packet. */ if (frame_status & RFS_Errors) { dev->stats.rx_errors++; if (frame_status & (RxRuntFrame | RxLengthError)) dev->stats.rx_length_errors++; if (frame_status & RxFCSError) dev->stats.rx_crc_errors++; if (frame_status & RxAlignmentError && np->speed != 1000) dev->stats.rx_frame_errors++; if (frame_status & RxFIFOOverrun) dev->stats.rx_fifo_errors++; } else { struct sk_buff *skb; /* Small skbuffs for short packets */ if (pkt_len > copy_thresh) { dma_unmap_single(&np->pdev->dev, desc_to_dma(desc), np->rx_buf_sz, DMA_FROM_DEVICE); skb_put (skb = np->rx_skbuff[entry], pkt_len); np->rx_skbuff[entry] = NULL; } else if ((skb = netdev_alloc_skb_ip_align(dev, pkt_len))) { dma_sync_single_for_cpu(&np->pdev->dev, desc_to_dma(desc), np->rx_buf_sz, DMA_FROM_DEVICE); skb_copy_to_linear_data (skb, np->rx_skbuff[entry]->data, pkt_len); skb_put (skb, pkt_len); dma_sync_single_for_device(&np->pdev->dev, desc_to_dma(desc), np->rx_buf_sz, DMA_FROM_DEVICE); } skb->protocol = eth_type_trans (skb, dev); #if 0 /* Checksum done by hw, but csum value unavailable. */ if (np->pdev->pci_rev_id >= 0x0c && !(frame_status & (TCPError | UDPError | IPError))) { skb->ip_summed = CHECKSUM_UNNECESSARY; } #endif netif_rx (skb); } entry = (entry + 1) % RX_RING_SIZE; } spin_lock(&np->rx_lock); np->cur_rx = entry; /* Re-allocate skbuffs to fill the descriptor ring */ entry = np->old_rx; while (entry != np->cur_rx) { struct sk_buff *skb; /* Dropped packets don't need to re-allocate */ if (np->rx_skbuff[entry] == NULL) { skb = netdev_alloc_skb_ip_align(dev, np->rx_buf_sz); if (skb == NULL) { np->rx_ring[entry].fraginfo = 0; printk (KERN_INFO "%s: receive_packet: " "Unable to re-allocate Rx skbuff.#%d\n", dev->name, entry); break; } np->rx_skbuff[entry] = skb; np->rx_ring[entry].fraginfo = cpu_to_le64(dma_map_single(&np->pdev->dev, skb->data, np->rx_buf_sz, DMA_FROM_DEVICE)); } np->rx_ring[entry].fraginfo |= cpu_to_le64((u64)np->rx_buf_sz << 48); np->rx_ring[entry].status = 0; entry = (entry + 1) % RX_RING_SIZE; } np->old_rx = entry; spin_unlock(&np->rx_lock); return 0; } static void rio_error (struct net_device *dev, int int_status) { struct netdev_private *np = netdev_priv(dev); void __iomem *ioaddr = np->ioaddr; u16 macctrl; /* Link change event */ if (int_status & LinkEvent) { if (mii_wait_link (dev, 10) == 0) { printk (KERN_INFO "%s: Link up\n", dev->name); if (np->phy_media) mii_get_media_pcs (dev); else mii_get_media (dev); if (np->speed == 1000) np->tx_coalesce = tx_coalesce; else np->tx_coalesce = 1; macctrl = 0; macctrl |= (np->vlan) ? AutoVLANuntagging : 0; macctrl |= (np->full_duplex) ? DuplexSelect : 0; macctrl |= (np->tx_flow) ? TxFlowControlEnable : 0; macctrl |= (np->rx_flow) ? RxFlowControlEnable : 0; dw16(MACCtrl, macctrl); np->link_status = 1; netif_carrier_on(dev); } else { printk (KERN_INFO "%s: Link off\n", dev->name); np->link_status = 0; netif_carrier_off(dev); } } /* UpdateStats statistics registers */ if (int_status & UpdateStats) { get_stats (dev); } /* PCI Error, a catastronphic error related to the bus interface occurs, set GlobalReset and HostReset to reset. */ if (int_status & HostError) { printk (KERN_ERR "%s: HostError! IntStatus %4.4x.\n", dev->name, int_status); dw16(ASICCtrl + 2, GlobalReset | HostReset); mdelay (500); rio_set_led_mode(dev); } } static struct net_device_stats * get_stats (struct net_device *dev) { struct netdev_private *np = netdev_priv(dev); void __iomem *ioaddr = np->ioaddr; #ifdef MEM_MAPPING int i; #endif unsigned int stat_reg; /* All statistics registers need to be acknowledged, else statistic overflow could cause problems */ dev->stats.rx_packets += dr32(FramesRcvOk); dev->stats.tx_packets += dr32(FramesXmtOk); dev->stats.rx_bytes += dr32(OctetRcvOk); dev->stats.tx_bytes += dr32(OctetXmtOk); dev->stats.multicast = dr32(McstFramesRcvdOk); dev->stats.collisions += dr32(SingleColFrames) + dr32(MultiColFrames); /* detailed tx errors */ stat_reg = dr16(FramesAbortXSColls); dev->stats.tx_aborted_errors += stat_reg; dev->stats.tx_errors += stat_reg; stat_reg = dr16(CarrierSenseErrors); dev->stats.tx_carrier_errors += stat_reg; dev->stats.tx_errors += stat_reg; /* Clear all other statistic register. */ dr32(McstOctetXmtOk); dr16(BcstFramesXmtdOk); dr32(McstFramesXmtdOk); dr16(BcstFramesRcvdOk); dr16(MacControlFramesRcvd); dr16(FrameTooLongErrors); dr16(InRangeLengthErrors); dr16(FramesCheckSeqErrors); dr16(FramesLostRxErrors); dr32(McstOctetXmtOk); dr32(BcstOctetXmtOk); dr32(McstFramesXmtdOk); dr32(FramesWDeferredXmt); dr32(LateCollisions); dr16(BcstFramesXmtdOk); dr16(MacControlFramesXmtd); dr16(FramesWEXDeferal); #ifdef MEM_MAPPING for (i = 0x100; i <= 0x150; i += 4) dr32(i); #endif dr16(TxJumboFrames); dr16(RxJumboFrames); dr16(TCPCheckSumErrors); dr16(UDPCheckSumErrors); dr16(IPCheckSumErrors); return &dev->stats; } static int clear_stats (struct net_device *dev) { struct netdev_private *np = netdev_priv(dev); void __iomem *ioaddr = np->ioaddr; #ifdef MEM_MAPPING int i; #endif /* All statistics registers need to be acknowledged, else statistic overflow could cause problems */ dr32(FramesRcvOk); dr32(FramesXmtOk); dr32(OctetRcvOk); dr32(OctetXmtOk); dr32(McstFramesRcvdOk); dr32(SingleColFrames); dr32(MultiColFrames); dr32(LateCollisions); /* detailed rx errors */ dr16(FrameTooLongErrors); dr16(InRangeLengthErrors); dr16(FramesCheckSeqErrors); dr16(FramesLostRxErrors); /* detailed tx errors */ dr16(FramesAbortXSColls); dr16(CarrierSenseErrors); /* Clear all other statistic register. */ dr32(McstOctetXmtOk); dr16(BcstFramesXmtdOk); dr32(McstFramesXmtdOk); dr16(BcstFramesRcvdOk); dr16(MacControlFramesRcvd); dr32(McstOctetXmtOk); dr32(BcstOctetXmtOk); dr32(McstFramesXmtdOk); dr32(FramesWDeferredXmt); dr16(BcstFramesXmtdOk); dr16(MacControlFramesXmtd); dr16(FramesWEXDeferal); #ifdef MEM_MAPPING for (i = 0x100; i <= 0x150; i += 4) dr32(i); #endif dr16(TxJumboFrames); dr16(RxJumboFrames); dr16(TCPCheckSumErrors); dr16(UDPCheckSumErrors); dr16(IPCheckSumErrors); return 0; } static void set_multicast (struct net_device *dev) { struct netdev_private *np = netdev_priv(dev); void __iomem *ioaddr = np->ioaddr; u32 hash_table[2]; u16 rx_mode = 0; hash_table[0] = hash_table[1] = 0; /* RxFlowcontrol DA: 01-80-C2-00-00-01. Hash index=0x39 */ hash_table[1] |= 0x02000000; if (dev->flags & IFF_PROMISC) { /* Receive all frames promiscuously. */ rx_mode = ReceiveAllFrames; } else if ((dev->flags & IFF_ALLMULTI) || (netdev_mc_count(dev) > multicast_filter_limit)) { /* Receive broadcast and multicast frames */ rx_mode = ReceiveBroadcast | ReceiveMulticast | ReceiveUnicast; } else if (!netdev_mc_empty(dev)) { struct netdev_hw_addr *ha; /* Receive broadcast frames and multicast frames filtering by Hashtable */ rx_mode = ReceiveBroadcast | ReceiveMulticastHash | ReceiveUnicast; netdev_for_each_mc_addr(ha, dev) { int bit, index = 0; int crc = ether_crc_le(ETH_ALEN, ha->addr); /* The inverted high significant 6 bits of CRC are used as an index to hashtable */ for (bit = 0; bit < 6; bit++) if (crc & (1 << (31 - bit))) index |= (1 << bit); hash_table[index / 32] |= (1 << (index % 32)); } } else { rx_mode = ReceiveBroadcast | ReceiveUnicast; } if (np->vlan) { /* ReceiveVLANMatch field in ReceiveMode */ rx_mode |= ReceiveVLANMatch; } dw32(HashTable0, hash_table[0]); dw32(HashTable1, hash_table[1]); dw16(ReceiveMode, rx_mode); } static void rio_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) { struct netdev_private *np = netdev_priv(dev); strscpy(info->driver, "dl2k", sizeof(info->driver)); strscpy(info->bus_info, pci_name(np->pdev), sizeof(info->bus_info)); } static int rio_get_link_ksettings(struct net_device *dev, struct ethtool_link_ksettings *cmd) { struct netdev_private *np = netdev_priv(dev); u32 supported, advertising; if (np->phy_media) { /* fiber device */ supported = SUPPORTED_Autoneg | SUPPORTED_FIBRE; advertising = ADVERTISED_Autoneg | ADVERTISED_FIBRE; cmd->base.port = PORT_FIBRE; } else { /* copper device */ supported = SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full | SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full | SUPPORTED_1000baseT_Full | SUPPORTED_Autoneg | SUPPORTED_MII; advertising = ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | ADVERTISED_1000baseT_Full | ADVERTISED_Autoneg | ADVERTISED_MII; cmd->base.port = PORT_MII; } if (np->link_status) { cmd->base.speed = np->speed; cmd->base.duplex = np->full_duplex ? DUPLEX_FULL : DUPLEX_HALF; } else { cmd->base.speed = SPEED_UNKNOWN; cmd->base.duplex = DUPLEX_UNKNOWN; } if (np->an_enable) cmd->base.autoneg = AUTONEG_ENABLE; else cmd->base.autoneg = AUTONEG_DISABLE; cmd->base.phy_address = np->phy_addr; ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported, supported); ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising, advertising); return 0; } static int rio_set_link_ksettings(struct net_device *dev, const struct ethtool_link_ksettings *cmd) { struct netdev_private *np = netdev_priv(dev); u32 speed = cmd->base.speed; u8 duplex = cmd->base.duplex; netif_carrier_off(dev); if (cmd->base.autoneg == AUTONEG_ENABLE) { if (np->an_enable) { return 0; } else { np->an_enable = 1; mii_set_media(dev); return 0; } } else { np->an_enable = 0; if (np->speed == 1000) { speed = SPEED_100; duplex = DUPLEX_FULL; printk("Warning!! Can't disable Auto negotiation in 1000Mbps, change to Manual 100Mbps, Full duplex.\n"); } switch (speed) { case SPEED_10: np->speed = 10; np->full_duplex = (duplex == DUPLEX_FULL); break; case SPEED_100: np->speed = 100; np->full_duplex = (duplex == DUPLEX_FULL); break; case SPEED_1000: /* not supported */ default: return -EINVAL; } mii_set_media(dev); } return 0; } static u32 rio_get_link(struct net_device *dev) { struct netdev_private *np = netdev_priv(dev); return np->link_status; } static const struct ethtool_ops ethtool_ops = { .get_drvinfo = rio_get_drvinfo, .get_link = rio_get_link, .get_link_ksettings = rio_get_link_ksettings, .set_link_ksettings = rio_set_link_ksettings, }; static int rio_ioctl (struct net_device *dev, struct ifreq *rq, int cmd) { int phy_addr; struct netdev_private *np = netdev_priv(dev); struct mii_ioctl_data *miidata = if_mii(rq); phy_addr = np->phy_addr; switch (cmd) { case SIOCGMIIPHY: miidata->phy_id = phy_addr; break; case SIOCGMIIREG: miidata->val_out = mii_read (dev, phy_addr, miidata->reg_num); break; case SIOCSMIIREG: if (!capable(CAP_NET_ADMIN)) return -EPERM; mii_write (dev, phy_addr, miidata->reg_num, miidata->val_in); break; default: return -EOPNOTSUPP; } return 0; } #define EEP_READ 0x0200 #define EEP_BUSY 0x8000 /* Read the EEPROM word */ /* We use I/O instruction to read/write eeprom to avoid fail on some machines */ static int read_eeprom(struct netdev_private *np, int eep_addr) { void __iomem *ioaddr = np->eeprom_addr; int i = 1000; dw16(EepromCtrl, EEP_READ | (eep_addr & 0xff)); while (i-- > 0) { if (!(dr16(EepromCtrl) & EEP_BUSY)) return dr16(EepromData); } return 0; } enum phy_ctrl_bits { MII_READ = 0x00, MII_CLK = 0x01, MII_DATA1 = 0x02, MII_WRITE = 0x04, MII_DUPLEX = 0x08, }; #define mii_delay() dr8(PhyCtrl) static void mii_sendbit (struct net_device *dev, u32 data) { struct netdev_private *np = netdev_priv(dev); void __iomem *ioaddr = np->ioaddr; data = ((data) ? MII_DATA1 : 0) | (dr8(PhyCtrl) & 0xf8) | MII_WRITE; dw8(PhyCtrl, data); mii_delay (); dw8(PhyCtrl, data | MII_CLK); mii_delay (); } static int mii_getbit (struct net_device *dev) { struct netdev_private *np = netdev_priv(dev); void __iomem *ioaddr = np->ioaddr; u8 data; data = (dr8(PhyCtrl) & 0xf8) | MII_READ; dw8(PhyCtrl, data); mii_delay (); dw8(PhyCtrl, data | MII_CLK); mii_delay (); return (dr8(PhyCtrl) >> 1) & 1; } static void mii_send_bits (struct net_device *dev, u32 data, int len) { int i; for (i = len - 1; i >= 0; i--) { mii_sendbit (dev, data & (1 << i)); } } static int mii_read (struct net_device *dev, int phy_addr, int reg_num) { u32 cmd; int i; u32 retval = 0; /* Preamble */ mii_send_bits (dev, 0xffffffff, 32); /* ST(2), OP(2), ADDR(5), REG#(5), TA(2), Data(16) total 32 bits */ /* ST,OP = 0110'b for read operation */ cmd = (0x06 << 10 | phy_addr << 5 | reg_num); mii_send_bits (dev, cmd, 14); /* Turnaround */ if (mii_getbit (dev)) goto err_out; /* Read data */ for (i = 0; i < 16; i++) { retval |= mii_getbit (dev); retval <<= 1; } /* End cycle */ mii_getbit (dev); return (retval >> 1) & 0xffff; err_out: return 0; } static int mii_write (struct net_device *dev, int phy_addr, int reg_num, u16 data) { u32 cmd; /* Preamble */ mii_send_bits (dev, 0xffffffff, 32); /* ST(2), OP(2), ADDR(5), REG#(5), TA(2), Data(16) total 32 bits */ /* ST,OP,AAAAA,RRRRR,TA = 0101xxxxxxxxxx10'b = 0x5002 for write */ cmd = (0x5002 << 16) | (phy_addr << 23) | (reg_num << 18) | data; mii_send_bits (dev, cmd, 32); /* End cycle */ mii_getbit (dev); return 0; } static int mii_wait_link (struct net_device *dev, int wait) { __u16 bmsr; int phy_addr; struct netdev_private *np; np = netdev_priv(dev); phy_addr = np->phy_addr; do { bmsr = mii_read (dev, phy_addr, MII_BMSR); if (bmsr & BMSR_LSTATUS) return 0; mdelay (1); } while (--wait > 0); return -1; } static int mii_get_media (struct net_device *dev) { __u16 negotiate; __u16 bmsr; __u16 mscr; __u16 mssr; int phy_addr; struct netdev_private *np; np = netdev_priv(dev); phy_addr = np->phy_addr; bmsr = mii_read (dev, phy_addr, MII_BMSR); if (np->an_enable) { if (!(bmsr & BMSR_ANEGCOMPLETE)) { /* Auto-Negotiation not completed */ return -1; } negotiate = mii_read (dev, phy_addr, MII_ADVERTISE) & mii_read (dev, phy_addr, MII_LPA); mscr = mii_read (dev, phy_addr, MII_CTRL1000); mssr = mii_read (dev, phy_addr, MII_STAT1000); if (mscr & ADVERTISE_1000FULL && mssr & LPA_1000FULL) { np->speed = 1000; np->full_duplex = 1; printk (KERN_INFO "Auto 1000 Mbps, Full duplex\n"); } else if (mscr & ADVERTISE_1000HALF && mssr & LPA_1000HALF) { np->speed = 1000; np->full_duplex = 0; printk (KERN_INFO "Auto 1000 Mbps, Half duplex\n"); } else if (negotiate & ADVERTISE_100FULL) { np->speed = 100; np->full_duplex = 1; printk (KERN_INFO "Auto 100 Mbps, Full duplex\n"); } else if (negotiate & ADVERTISE_100HALF) { np->speed = 100; np->full_duplex = 0; printk (KERN_INFO "Auto 100 Mbps, Half duplex\n"); } else if (negotiate & ADVERTISE_10FULL) { np->speed = 10; np->full_duplex = 1; printk (KERN_INFO "Auto 10 Mbps, Full duplex\n"); } else if (negotiate & ADVERTISE_10HALF) { np->speed = 10; np->full_duplex = 0; printk (KERN_INFO "Auto 10 Mbps, Half duplex\n"); } if (negotiate & ADVERTISE_PAUSE_CAP) { np->tx_flow &= 1; np->rx_flow &= 1; } else if (negotiate & ADVERTISE_PAUSE_ASYM) { np->tx_flow = 0; np->rx_flow &= 1; } /* else tx_flow, rx_flow = user select */ } else { __u16 bmcr = mii_read (dev, phy_addr, MII_BMCR); switch (bmcr & (BMCR_SPEED100 | BMCR_SPEED1000)) { case BMCR_SPEED1000: printk (KERN_INFO "Operating at 1000 Mbps, "); break; case BMCR_SPEED100: printk (KERN_INFO "Operating at 100 Mbps, "); break; case 0: printk (KERN_INFO "Operating at 10 Mbps, "); } if (bmcr & BMCR_FULLDPLX) { printk (KERN_CONT "Full duplex\n"); } else { printk (KERN_CONT "Half duplex\n"); } } if (np->tx_flow) printk(KERN_INFO "Enable Tx Flow Control\n"); else printk(KERN_INFO "Disable Tx Flow Control\n"); if (np->rx_flow) printk(KERN_INFO "Enable Rx Flow Control\n"); else printk(KERN_INFO "Disable Rx Flow Control\n"); return 0; } static int mii_set_media (struct net_device *dev) { __u16 pscr; __u16 bmcr; __u16 bmsr; __u16 anar; int phy_addr; struct netdev_private *np; np = netdev_priv(dev); phy_addr = np->phy_addr; /* Does user set speed? */ if (np->an_enable) { /* Advertise capabilities */ bmsr = mii_read (dev, phy_addr, MII_BMSR); anar = mii_read (dev, phy_addr, MII_ADVERTISE) & ~(ADVERTISE_100FULL | ADVERTISE_10FULL | ADVERTISE_100HALF | ADVERTISE_10HALF | ADVERTISE_100BASE4); if (bmsr & BMSR_100FULL) anar |= ADVERTISE_100FULL; if (bmsr & BMSR_100HALF) anar |= ADVERTISE_100HALF; if (bmsr & BMSR_100BASE4) anar |= ADVERTISE_100BASE4; if (bmsr & BMSR_10FULL) anar |= ADVERTISE_10FULL; if (bmsr & BMSR_10HALF) anar |= ADVERTISE_10HALF; anar |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM; mii_write (dev, phy_addr, MII_ADVERTISE, anar); /* Enable Auto crossover */ pscr = mii_read (dev, phy_addr, MII_PHY_SCR); pscr |= 3 << 5; /* 11'b */ mii_write (dev, phy_addr, MII_PHY_SCR, pscr); /* Soft reset PHY */ mii_write (dev, phy_addr, MII_BMCR, BMCR_RESET); bmcr = BMCR_ANENABLE | BMCR_ANRESTART | BMCR_RESET; mii_write (dev, phy_addr, MII_BMCR, bmcr); mdelay(1); } else { /* Force speed setting */ /* 1) Disable Auto crossover */ pscr = mii_read (dev, phy_addr, MII_PHY_SCR); pscr &= ~(3 << 5); mii_write (dev, phy_addr, MII_PHY_SCR, pscr); /* 2) PHY Reset */ bmcr = mii_read (dev, phy_addr, MII_BMCR); bmcr |= BMCR_RESET; mii_write (dev, phy_addr, MII_BMCR, bmcr); /* 3) Power Down */ bmcr = 0x1940; /* must be 0x1940 */ mii_write (dev, phy_addr, MII_BMCR, bmcr); mdelay (100); /* wait a certain time */ /* 4) Advertise nothing */ mii_write (dev, phy_addr, MII_ADVERTISE, 0); /* 5) Set media and Power Up */ bmcr = BMCR_PDOWN; if (np->speed == 100) { bmcr |= BMCR_SPEED100; printk (KERN_INFO "Manual 100 Mbps, "); } else if (np->speed == 10) { printk (KERN_INFO "Manual 10 Mbps, "); } if (np->full_duplex) { bmcr |= BMCR_FULLDPLX; printk (KERN_CONT "Full duplex\n"); } else { printk (KERN_CONT "Half duplex\n"); } #if 0 /* Set 1000BaseT Master/Slave setting */ mscr = mii_read (dev, phy_addr, MII_CTRL1000); mscr |= MII_MSCR_CFG_ENABLE; mscr &= ~MII_MSCR_CFG_VALUE = 0; #endif mii_write (dev, phy_addr, MII_BMCR, bmcr); mdelay(10); } return 0; } static int mii_get_media_pcs (struct net_device *dev) { __u16 negotiate; __u16 bmsr; int phy_addr; struct netdev_private *np; np = netdev_priv(dev); phy_addr = np->phy_addr; bmsr = mii_read (dev, phy_addr, PCS_BMSR); if (np->an_enable) { if (!(bmsr & BMSR_ANEGCOMPLETE)) { /* Auto-Negotiation not completed */ return -1; } negotiate = mii_read (dev, phy_addr, PCS_ANAR) & mii_read (dev, phy_addr, PCS_ANLPAR); np->speed = 1000; if (negotiate & PCS_ANAR_FULL_DUPLEX) { printk (KERN_INFO "Auto 1000 Mbps, Full duplex\n"); np->full_duplex = 1; } else { printk (KERN_INFO "Auto 1000 Mbps, half duplex\n"); np->full_duplex = 0; } if (negotiate & PCS_ANAR_PAUSE) { np->tx_flow &= 1; np->rx_flow &= 1; } else if (negotiate & PCS_ANAR_ASYMMETRIC) { np->tx_flow = 0; np->rx_flow &= 1; } /* else tx_flow, rx_flow = user select */ } else { __u16 bmcr = mii_read (dev, phy_addr, PCS_BMCR); printk (KERN_INFO "Operating at 1000 Mbps, "); if (bmcr & BMCR_FULLDPLX) { printk (KERN_CONT "Full duplex\n"); } else { printk (KERN_CONT "Half duplex\n"); } } if (np->tx_flow) printk(KERN_INFO "Enable Tx Flow Control\n"); else printk(KERN_INFO "Disable Tx Flow Control\n"); if (np->rx_flow) printk(KERN_INFO "Enable Rx Flow Control\n"); else printk(KERN_INFO "Disable Rx Flow Control\n"); return 0; } static int mii_set_media_pcs (struct net_device *dev) { __u16 bmcr; __u16 esr; __u16 anar; int phy_addr; struct netdev_private *np; np = netdev_priv(dev); phy_addr = np->phy_addr; /* Auto-Negotiation? */ if (np->an_enable) { /* Advertise capabilities */ esr = mii_read (dev, phy_addr, PCS_ESR); anar = mii_read (dev, phy_addr, MII_ADVERTISE) & ~PCS_ANAR_HALF_DUPLEX & ~PCS_ANAR_FULL_DUPLEX; if (esr & (MII_ESR_1000BT_HD | MII_ESR_1000BX_HD)) anar |= PCS_ANAR_HALF_DUPLEX; if (esr & (MII_ESR_1000BT_FD | MII_ESR_1000BX_FD)) anar |= PCS_ANAR_FULL_DUPLEX; anar |= PCS_ANAR_PAUSE | PCS_ANAR_ASYMMETRIC; mii_write (dev, phy_addr, MII_ADVERTISE, anar); /* Soft reset PHY */ mii_write (dev, phy_addr, MII_BMCR, BMCR_RESET); bmcr = BMCR_ANENABLE | BMCR_ANRESTART | BMCR_RESET; mii_write (dev, phy_addr, MII_BMCR, bmcr); mdelay(1); } else { /* Force speed setting */ /* PHY Reset */ bmcr = BMCR_RESET; mii_write (dev, phy_addr, MII_BMCR, bmcr); mdelay(10); if (np->full_duplex) { bmcr = BMCR_FULLDPLX; printk (KERN_INFO "Manual full duplex\n"); } else { bmcr = 0; printk (KERN_INFO "Manual half duplex\n"); } mii_write (dev, phy_addr, MII_BMCR, bmcr); mdelay(10); /* Advertise nothing */ mii_write (dev, phy_addr, MII_ADVERTISE, 0); } return 0; } static int rio_close (struct net_device *dev) { struct netdev_private *np = netdev_priv(dev); struct pci_dev *pdev = np->pdev; netif_stop_queue (dev); rio_hw_stop(dev); free_irq(pdev->irq, dev); del_timer_sync (&np->timer); free_list(dev); return 0; } static void rio_remove1 (struct pci_dev *pdev) { struct net_device *dev = pci_get_drvdata (pdev); if (dev) { struct netdev_private *np = netdev_priv(dev); unregister_netdev (dev); dma_free_coherent(&pdev->dev, RX_TOTAL_SIZE, np->rx_ring, np->rx_ring_dma); dma_free_coherent(&pdev->dev, TX_TOTAL_SIZE, np->tx_ring, np->tx_ring_dma); #ifdef MEM_MAPPING pci_iounmap(pdev, np->ioaddr); #endif pci_iounmap(pdev, np->eeprom_addr); free_netdev (dev); pci_release_regions (pdev); pci_disable_device (pdev); } } #ifdef CONFIG_PM_SLEEP static int rio_suspend(struct device *device) { struct net_device *dev = dev_get_drvdata(device); struct netdev_private *np = netdev_priv(dev); if (!netif_running(dev)) return 0; netif_device_detach(dev); del_timer_sync(&np->timer); rio_hw_stop(dev); return 0; } static int rio_resume(struct device *device) { struct net_device *dev = dev_get_drvdata(device); struct netdev_private *np = netdev_priv(dev); if (!netif_running(dev)) return 0; rio_reset_ring(np); rio_hw_init(dev); np->timer.expires = jiffies + 1 * HZ; add_timer(&np->timer); netif_device_attach(dev); dl2k_enable_int(np); return 0; } static SIMPLE_DEV_PM_OPS(rio_pm_ops, rio_suspend, rio_resume); #define RIO_PM_OPS (&rio_pm_ops) #else #define RIO_PM_OPS NULL #endif /* CONFIG_PM_SLEEP */ static struct pci_driver rio_driver = { .name = "dl2k", .id_table = rio_pci_tbl, .probe = rio_probe1, .remove = rio_remove1, .driver.pm = RIO_PM_OPS, }; module_pci_driver(rio_driver); /* Read Documentation/networking/device_drivers/ethernet/dlink/dl2k.rst. */
linux-master
drivers/net/ethernet/dlink/dl2k.c
// SPDX-License-Identifier: GPL-2.0-or-later /* Xilinx EmacLite Linux driver for the Xilinx Ethernet MAC Lite device. * * This is a new flat driver which is based on the original emac_lite * driver from John Williams <[email protected]>. * * Copyright (c) 2007 - 2013 Xilinx, Inc. */ #include <linux/module.h> #include <linux/platform_device.h> #include <linux/uaccess.h> #include <linux/netdevice.h> #include <linux/etherdevice.h> #include <linux/skbuff.h> #include <linux/ethtool.h> #include <linux/io.h> #include <linux/slab.h> #include <linux/of.h> #include <linux/of_address.h> #include <linux/of_mdio.h> #include <linux/of_net.h> #include <linux/phy.h> #include <linux/interrupt.h> #include <linux/iopoll.h> #define DRIVER_NAME "xilinx_emaclite" /* Register offsets for the EmacLite Core */ #define XEL_TXBUFF_OFFSET 0x0 /* Transmit Buffer */ #define XEL_MDIOADDR_OFFSET 0x07E4 /* MDIO Address Register */ #define XEL_MDIOWR_OFFSET 0x07E8 /* MDIO Write Data Register */ #define XEL_MDIORD_OFFSET 0x07EC /* MDIO Read Data Register */ #define XEL_MDIOCTRL_OFFSET 0x07F0 /* MDIO Control Register */ #define XEL_GIER_OFFSET 0x07F8 /* GIE Register */ #define XEL_TSR_OFFSET 0x07FC /* Tx status */ #define XEL_TPLR_OFFSET 0x07F4 /* Tx packet length */ #define XEL_RXBUFF_OFFSET 0x1000 /* Receive Buffer */ #define XEL_RPLR_OFFSET 0x100C /* Rx packet length */ #define XEL_RSR_OFFSET 0x17FC /* Rx status */ #define XEL_BUFFER_OFFSET 0x0800 /* Next Tx/Rx buffer's offset */ /* MDIO Address Register Bit Masks */ #define XEL_MDIOADDR_REGADR_MASK 0x0000001F /* Register Address */ #define XEL_MDIOADDR_PHYADR_MASK 0x000003E0 /* PHY Address */ #define XEL_MDIOADDR_PHYADR_SHIFT 5 #define XEL_MDIOADDR_OP_MASK 0x00000400 /* RD/WR Operation */ /* MDIO Write Data Register Bit Masks */ #define XEL_MDIOWR_WRDATA_MASK 0x0000FFFF /* Data to be Written */ /* MDIO Read Data Register Bit Masks */ #define XEL_MDIORD_RDDATA_MASK 0x0000FFFF /* Data to be Read */ /* MDIO Control Register Bit Masks */ #define XEL_MDIOCTRL_MDIOSTS_MASK 0x00000001 /* MDIO Status Mask */ #define XEL_MDIOCTRL_MDIOEN_MASK 0x00000008 /* MDIO Enable */ /* Global Interrupt Enable Register (GIER) Bit Masks */ #define XEL_GIER_GIE_MASK 0x80000000 /* Global Enable */ /* Transmit Status Register (TSR) Bit Masks */ #define XEL_TSR_XMIT_BUSY_MASK 0x00000001 /* Tx complete */ #define XEL_TSR_PROGRAM_MASK 0x00000002 /* Program the MAC address */ #define XEL_TSR_XMIT_IE_MASK 0x00000008 /* Tx interrupt enable bit */ #define XEL_TSR_XMIT_ACTIVE_MASK 0x80000000 /* Buffer is active, SW bit * only. This is not documented * in the HW spec */ /* Define for programming the MAC address into the EmacLite */ #define XEL_TSR_PROG_MAC_ADDR (XEL_TSR_XMIT_BUSY_MASK | XEL_TSR_PROGRAM_MASK) /* Receive Status Register (RSR) */ #define XEL_RSR_RECV_DONE_MASK 0x00000001 /* Rx complete */ #define XEL_RSR_RECV_IE_MASK 0x00000008 /* Rx interrupt enable bit */ /* Transmit Packet Length Register (TPLR) */ #define XEL_TPLR_LENGTH_MASK 0x0000FFFF /* Tx packet length */ /* Receive Packet Length Register (RPLR) */ #define XEL_RPLR_LENGTH_MASK 0x0000FFFF /* Rx packet length */ #define XEL_HEADER_OFFSET 12 /* Offset to length field */ #define XEL_HEADER_SHIFT 16 /* Shift value for length */ /* General Ethernet Definitions */ #define XEL_ARP_PACKET_SIZE 28 /* Max ARP packet size */ #define XEL_HEADER_IP_LENGTH_OFFSET 16 /* IP Length Offset */ #define TX_TIMEOUT (60 * HZ) /* Tx timeout is 60 seconds. */ #ifdef __BIG_ENDIAN #define xemaclite_readl ioread32be #define xemaclite_writel iowrite32be #else #define xemaclite_readl ioread32 #define xemaclite_writel iowrite32 #endif /** * struct net_local - Our private per device data * @ndev: instance of the network device * @tx_ping_pong: indicates whether Tx Pong buffer is configured in HW * @rx_ping_pong: indicates whether Rx Pong buffer is configured in HW * @next_tx_buf_to_use: next Tx buffer to write to * @next_rx_buf_to_use: next Rx buffer to read from * @base_addr: base address of the Emaclite device * @reset_lock: lock to serialize xmit and tx_timeout execution * @deferred_skb: holds an skb (for transmission at a later time) when the * Tx buffer is not free * @phy_dev: pointer to the PHY device * @phy_node: pointer to the PHY device node * @mii_bus: pointer to the MII bus * @last_link: last link status */ struct net_local { struct net_device *ndev; bool tx_ping_pong; bool rx_ping_pong; u32 next_tx_buf_to_use; u32 next_rx_buf_to_use; void __iomem *base_addr; spinlock_t reset_lock; /* serialize xmit and tx_timeout execution */ struct sk_buff *deferred_skb; struct phy_device *phy_dev; struct device_node *phy_node; struct mii_bus *mii_bus; int last_link; }; /*************************/ /* EmacLite driver calls */ /*************************/ /** * xemaclite_enable_interrupts - Enable the interrupts for the EmacLite device * @drvdata: Pointer to the Emaclite device private data * * This function enables the Tx and Rx interrupts for the Emaclite device along * with the Global Interrupt Enable. */ static void xemaclite_enable_interrupts(struct net_local *drvdata) { u32 reg_data; /* Enable the Tx interrupts for the first Buffer */ reg_data = xemaclite_readl(drvdata->base_addr + XEL_TSR_OFFSET); xemaclite_writel(reg_data | XEL_TSR_XMIT_IE_MASK, drvdata->base_addr + XEL_TSR_OFFSET); /* Enable the Rx interrupts for the first buffer */ xemaclite_writel(XEL_RSR_RECV_IE_MASK, drvdata->base_addr + XEL_RSR_OFFSET); /* Enable the Global Interrupt Enable */ xemaclite_writel(XEL_GIER_GIE_MASK, drvdata->base_addr + XEL_GIER_OFFSET); } /** * xemaclite_disable_interrupts - Disable the interrupts for the EmacLite device * @drvdata: Pointer to the Emaclite device private data * * This function disables the Tx and Rx interrupts for the Emaclite device, * along with the Global Interrupt Enable. */ static void xemaclite_disable_interrupts(struct net_local *drvdata) { u32 reg_data; /* Disable the Global Interrupt Enable */ xemaclite_writel(XEL_GIER_GIE_MASK, drvdata->base_addr + XEL_GIER_OFFSET); /* Disable the Tx interrupts for the first buffer */ reg_data = xemaclite_readl(drvdata->base_addr + XEL_TSR_OFFSET); xemaclite_writel(reg_data & (~XEL_TSR_XMIT_IE_MASK), drvdata->base_addr + XEL_TSR_OFFSET); /* Disable the Rx interrupts for the first buffer */ reg_data = xemaclite_readl(drvdata->base_addr + XEL_RSR_OFFSET); xemaclite_writel(reg_data & (~XEL_RSR_RECV_IE_MASK), drvdata->base_addr + XEL_RSR_OFFSET); } /** * xemaclite_aligned_write - Write from 16-bit aligned to 32-bit aligned address * @src_ptr: Void pointer to the 16-bit aligned source address * @dest_ptr: Pointer to the 32-bit aligned destination address * @length: Number bytes to write from source to destination * * This function writes data from a 16-bit aligned buffer to a 32-bit aligned * address in the EmacLite device. */ static void xemaclite_aligned_write(const void *src_ptr, u32 *dest_ptr, unsigned int length) { const u16 *from_u16_ptr; u32 align_buffer; u32 *to_u32_ptr; u16 *to_u16_ptr; to_u32_ptr = dest_ptr; from_u16_ptr = src_ptr; align_buffer = 0; for (; length > 3; length -= 4) { to_u16_ptr = (u16 *)&align_buffer; *to_u16_ptr++ = *from_u16_ptr++; *to_u16_ptr++ = *from_u16_ptr++; /* This barrier resolves occasional issues seen around * cases where the data is not properly flushed out * from the processor store buffers to the destination * memory locations. */ wmb(); /* Output a word */ *to_u32_ptr++ = align_buffer; } if (length) { u8 *from_u8_ptr, *to_u8_ptr; /* Set up to output the remaining data */ align_buffer = 0; to_u8_ptr = (u8 *)&align_buffer; from_u8_ptr = (u8 *)from_u16_ptr; /* Output the remaining data */ for (; length > 0; length--) *to_u8_ptr++ = *from_u8_ptr++; /* This barrier resolves occasional issues seen around * cases where the data is not properly flushed out * from the processor store buffers to the destination * memory locations. */ wmb(); *to_u32_ptr = align_buffer; } } /** * xemaclite_aligned_read - Read from 32-bit aligned to 16-bit aligned buffer * @src_ptr: Pointer to the 32-bit aligned source address * @dest_ptr: Pointer to the 16-bit aligned destination address * @length: Number bytes to read from source to destination * * This function reads data from a 32-bit aligned address in the EmacLite device * to a 16-bit aligned buffer. */ static void xemaclite_aligned_read(u32 *src_ptr, u8 *dest_ptr, unsigned int length) { u16 *to_u16_ptr, *from_u16_ptr; u32 *from_u32_ptr; u32 align_buffer; from_u32_ptr = src_ptr; to_u16_ptr = (u16 *)dest_ptr; for (; length > 3; length -= 4) { /* Copy each word into the temporary buffer */ align_buffer = *from_u32_ptr++; from_u16_ptr = (u16 *)&align_buffer; /* Read data from source */ *to_u16_ptr++ = *from_u16_ptr++; *to_u16_ptr++ = *from_u16_ptr++; } if (length) { u8 *to_u8_ptr, *from_u8_ptr; /* Set up to read the remaining data */ to_u8_ptr = (u8 *)to_u16_ptr; align_buffer = *from_u32_ptr++; from_u8_ptr = (u8 *)&align_buffer; /* Read the remaining data */ for (; length > 0; length--) *to_u8_ptr = *from_u8_ptr; } } /** * xemaclite_send_data - Send an Ethernet frame * @drvdata: Pointer to the Emaclite device private data * @data: Pointer to the data to be sent * @byte_count: Total frame size, including header * * This function checks if the Tx buffer of the Emaclite device is free to send * data. If so, it fills the Tx buffer with data for transmission. Otherwise, it * returns an error. * * Return: 0 upon success or -1 if the buffer(s) are full. * * Note: The maximum Tx packet size can not be more than Ethernet header * (14 Bytes) + Maximum MTU (1500 bytes). This is excluding FCS. */ static int xemaclite_send_data(struct net_local *drvdata, u8 *data, unsigned int byte_count) { u32 reg_data; void __iomem *addr; /* Determine the expected Tx buffer address */ addr = drvdata->base_addr + drvdata->next_tx_buf_to_use; /* If the length is too large, truncate it */ if (byte_count > ETH_FRAME_LEN) byte_count = ETH_FRAME_LEN; /* Check if the expected buffer is available */ reg_data = xemaclite_readl(addr + XEL_TSR_OFFSET); if ((reg_data & (XEL_TSR_XMIT_BUSY_MASK | XEL_TSR_XMIT_ACTIVE_MASK)) == 0) { /* Switch to next buffer if configured */ if (drvdata->tx_ping_pong != 0) drvdata->next_tx_buf_to_use ^= XEL_BUFFER_OFFSET; } else if (drvdata->tx_ping_pong != 0) { /* If the expected buffer is full, try the other buffer, * if it is configured in HW */ addr = (void __iomem __force *)((uintptr_t __force)addr ^ XEL_BUFFER_OFFSET); reg_data = xemaclite_readl(addr + XEL_TSR_OFFSET); if ((reg_data & (XEL_TSR_XMIT_BUSY_MASK | XEL_TSR_XMIT_ACTIVE_MASK)) != 0) return -1; /* Buffers were full, return failure */ } else { return -1; /* Buffer was full, return failure */ } /* Write the frame to the buffer */ xemaclite_aligned_write(data, (u32 __force *)addr, byte_count); xemaclite_writel((byte_count & XEL_TPLR_LENGTH_MASK), addr + XEL_TPLR_OFFSET); /* Update the Tx Status Register to indicate that there is a * frame to send. Set the XEL_TSR_XMIT_ACTIVE_MASK flag which * is used by the interrupt handler to check whether a frame * has been transmitted */ reg_data = xemaclite_readl(addr + XEL_TSR_OFFSET); reg_data |= (XEL_TSR_XMIT_BUSY_MASK | XEL_TSR_XMIT_ACTIVE_MASK); xemaclite_writel(reg_data, addr + XEL_TSR_OFFSET); return 0; } /** * xemaclite_recv_data - Receive a frame * @drvdata: Pointer to the Emaclite device private data * @data: Address where the data is to be received * @maxlen: Maximum supported ethernet packet length * * This function is intended to be called from the interrupt context or * with a wrapper which waits for the receive frame to be available. * * Return: Total number of bytes received */ static u16 xemaclite_recv_data(struct net_local *drvdata, u8 *data, int maxlen) { void __iomem *addr; u16 length, proto_type; u32 reg_data; /* Determine the expected buffer address */ addr = (drvdata->base_addr + drvdata->next_rx_buf_to_use); /* Verify which buffer has valid data */ reg_data = xemaclite_readl(addr + XEL_RSR_OFFSET); if ((reg_data & XEL_RSR_RECV_DONE_MASK) == XEL_RSR_RECV_DONE_MASK) { if (drvdata->rx_ping_pong != 0) drvdata->next_rx_buf_to_use ^= XEL_BUFFER_OFFSET; } else { /* The instance is out of sync, try other buffer if other * buffer is configured, return 0 otherwise. If the instance is * out of sync, do not update the 'next_rx_buf_to_use' since it * will correct on subsequent calls */ if (drvdata->rx_ping_pong != 0) addr = (void __iomem __force *) ((uintptr_t __force)addr ^ XEL_BUFFER_OFFSET); else return 0; /* No data was available */ /* Verify that buffer has valid data */ reg_data = xemaclite_readl(addr + XEL_RSR_OFFSET); if ((reg_data & XEL_RSR_RECV_DONE_MASK) != XEL_RSR_RECV_DONE_MASK) return 0; /* No data was available */ } /* Get the protocol type of the ethernet frame that arrived */ proto_type = ((ntohl(xemaclite_readl(addr + XEL_HEADER_OFFSET + XEL_RXBUFF_OFFSET)) >> XEL_HEADER_SHIFT) & XEL_RPLR_LENGTH_MASK); /* Check if received ethernet frame is a raw ethernet frame * or an IP packet or an ARP packet */ if (proto_type > ETH_DATA_LEN) { if (proto_type == ETH_P_IP) { length = ((ntohl(xemaclite_readl(addr + XEL_HEADER_IP_LENGTH_OFFSET + XEL_RXBUFF_OFFSET)) >> XEL_HEADER_SHIFT) & XEL_RPLR_LENGTH_MASK); length = min_t(u16, length, ETH_DATA_LEN); length += ETH_HLEN + ETH_FCS_LEN; } else if (proto_type == ETH_P_ARP) { length = XEL_ARP_PACKET_SIZE + ETH_HLEN + ETH_FCS_LEN; } else { /* Field contains type other than IP or ARP, use max * frame size and let user parse it */ length = ETH_FRAME_LEN + ETH_FCS_LEN; } } else { /* Use the length in the frame, plus the header and trailer */ length = proto_type + ETH_HLEN + ETH_FCS_LEN; } if (WARN_ON(length > maxlen)) length = maxlen; /* Read from the EmacLite device */ xemaclite_aligned_read((u32 __force *)(addr + XEL_RXBUFF_OFFSET), data, length); /* Acknowledge the frame */ reg_data = xemaclite_readl(addr + XEL_RSR_OFFSET); reg_data &= ~XEL_RSR_RECV_DONE_MASK; xemaclite_writel(reg_data, addr + XEL_RSR_OFFSET); return length; } /** * xemaclite_update_address - Update the MAC address in the device * @drvdata: Pointer to the Emaclite device private data * @address_ptr:Pointer to the MAC address (MAC address is a 48-bit value) * * Tx must be idle and Rx should be idle for deterministic results. * It is recommended that this function should be called after the * initialization and before transmission of any packets from the device. * The MAC address can be programmed using any of the two transmit * buffers (if configured). */ static void xemaclite_update_address(struct net_local *drvdata, const u8 *address_ptr) { void __iomem *addr; u32 reg_data; /* Determine the expected Tx buffer address */ addr = drvdata->base_addr + drvdata->next_tx_buf_to_use; xemaclite_aligned_write(address_ptr, (u32 __force *)addr, ETH_ALEN); xemaclite_writel(ETH_ALEN, addr + XEL_TPLR_OFFSET); /* Update the MAC address in the EmacLite */ reg_data = xemaclite_readl(addr + XEL_TSR_OFFSET); xemaclite_writel(reg_data | XEL_TSR_PROG_MAC_ADDR, addr + XEL_TSR_OFFSET); /* Wait for EmacLite to finish with the MAC address update */ while ((xemaclite_readl(addr + XEL_TSR_OFFSET) & XEL_TSR_PROG_MAC_ADDR) != 0) ; } /** * xemaclite_set_mac_address - Set the MAC address for this device * @dev: Pointer to the network device instance * @address: Void pointer to the sockaddr structure * * This function copies the HW address from the sockaddr structure to the * net_device structure and updates the address in HW. * * Return: Error if the net device is busy or 0 if the addr is set * successfully */ static int xemaclite_set_mac_address(struct net_device *dev, void *address) { struct net_local *lp = netdev_priv(dev); struct sockaddr *addr = address; if (netif_running(dev)) return -EBUSY; eth_hw_addr_set(dev, addr->sa_data); xemaclite_update_address(lp, dev->dev_addr); return 0; } /** * xemaclite_tx_timeout - Callback for Tx Timeout * @dev: Pointer to the network device * @txqueue: Unused * * This function is called when Tx time out occurs for Emaclite device. */ static void xemaclite_tx_timeout(struct net_device *dev, unsigned int txqueue) { struct net_local *lp = netdev_priv(dev); unsigned long flags; dev_err(&lp->ndev->dev, "Exceeded transmit timeout of %lu ms\n", TX_TIMEOUT * 1000UL / HZ); dev->stats.tx_errors++; /* Reset the device */ spin_lock_irqsave(&lp->reset_lock, flags); /* Shouldn't really be necessary, but shouldn't hurt */ netif_stop_queue(dev); xemaclite_disable_interrupts(lp); xemaclite_enable_interrupts(lp); if (lp->deferred_skb) { dev_kfree_skb_irq(lp->deferred_skb); lp->deferred_skb = NULL; dev->stats.tx_errors++; } /* To exclude tx timeout */ netif_trans_update(dev); /* prevent tx timeout */ /* We're all ready to go. Start the queue */ netif_wake_queue(dev); spin_unlock_irqrestore(&lp->reset_lock, flags); } /**********************/ /* Interrupt Handlers */ /**********************/ /** * xemaclite_tx_handler - Interrupt handler for frames sent * @dev: Pointer to the network device * * This function updates the number of packets transmitted and handles the * deferred skb, if there is one. */ static void xemaclite_tx_handler(struct net_device *dev) { struct net_local *lp = netdev_priv(dev); dev->stats.tx_packets++; if (!lp->deferred_skb) return; if (xemaclite_send_data(lp, (u8 *)lp->deferred_skb->data, lp->deferred_skb->len)) return; dev->stats.tx_bytes += lp->deferred_skb->len; dev_consume_skb_irq(lp->deferred_skb); lp->deferred_skb = NULL; netif_trans_update(dev); /* prevent tx timeout */ netif_wake_queue(dev); } /** * xemaclite_rx_handler- Interrupt handler for frames received * @dev: Pointer to the network device * * This function allocates memory for a socket buffer, fills it with data * received and hands it over to the TCP/IP stack. */ static void xemaclite_rx_handler(struct net_device *dev) { struct net_local *lp = netdev_priv(dev); struct sk_buff *skb; u32 len; len = ETH_FRAME_LEN + ETH_FCS_LEN; skb = netdev_alloc_skb(dev, len + NET_IP_ALIGN); if (!skb) { /* Couldn't get memory. */ dev->stats.rx_dropped++; dev_err(&lp->ndev->dev, "Could not allocate receive buffer\n"); return; } skb_reserve(skb, NET_IP_ALIGN); len = xemaclite_recv_data(lp, (u8 *)skb->data, len); if (!len) { dev->stats.rx_errors++; dev_kfree_skb_irq(skb); return; } skb_put(skb, len); /* Tell the skb how much data we got */ skb->protocol = eth_type_trans(skb, dev); skb_checksum_none_assert(skb); dev->stats.rx_packets++; dev->stats.rx_bytes += len; if (!skb_defer_rx_timestamp(skb)) netif_rx(skb); /* Send the packet upstream */ } /** * xemaclite_interrupt - Interrupt handler for this driver * @irq: Irq of the Emaclite device * @dev_id: Void pointer to the network device instance used as callback * reference * * Return: IRQ_HANDLED * * This function handles the Tx and Rx interrupts of the EmacLite device. */ static irqreturn_t xemaclite_interrupt(int irq, void *dev_id) { bool tx_complete = false; struct net_device *dev = dev_id; struct net_local *lp = netdev_priv(dev); void __iomem *base_addr = lp->base_addr; u32 tx_status; /* Check if there is Rx Data available */ if ((xemaclite_readl(base_addr + XEL_RSR_OFFSET) & XEL_RSR_RECV_DONE_MASK) || (xemaclite_readl(base_addr + XEL_BUFFER_OFFSET + XEL_RSR_OFFSET) & XEL_RSR_RECV_DONE_MASK)) xemaclite_rx_handler(dev); /* Check if the Transmission for the first buffer is completed */ tx_status = xemaclite_readl(base_addr + XEL_TSR_OFFSET); if (((tx_status & XEL_TSR_XMIT_BUSY_MASK) == 0) && (tx_status & XEL_TSR_XMIT_ACTIVE_MASK) != 0) { tx_status &= ~XEL_TSR_XMIT_ACTIVE_MASK; xemaclite_writel(tx_status, base_addr + XEL_TSR_OFFSET); tx_complete = true; } /* Check if the Transmission for the second buffer is completed */ tx_status = xemaclite_readl(base_addr + XEL_BUFFER_OFFSET + XEL_TSR_OFFSET); if (((tx_status & XEL_TSR_XMIT_BUSY_MASK) == 0) && (tx_status & XEL_TSR_XMIT_ACTIVE_MASK) != 0) { tx_status &= ~XEL_TSR_XMIT_ACTIVE_MASK; xemaclite_writel(tx_status, base_addr + XEL_BUFFER_OFFSET + XEL_TSR_OFFSET); tx_complete = true; } /* If there was a Tx interrupt, call the Tx Handler */ if (tx_complete != 0) xemaclite_tx_handler(dev); return IRQ_HANDLED; } /**********************/ /* MDIO Bus functions */ /**********************/ /** * xemaclite_mdio_wait - Wait for the MDIO to be ready to use * @lp: Pointer to the Emaclite device private data * * This function waits till the device is ready to accept a new MDIO * request. * * Return: 0 for success or ETIMEDOUT for a timeout */ static int xemaclite_mdio_wait(struct net_local *lp) { u32 val; /* wait for the MDIO interface to not be busy or timeout * after some time. */ return readx_poll_timeout(xemaclite_readl, lp->base_addr + XEL_MDIOCTRL_OFFSET, val, !(val & XEL_MDIOCTRL_MDIOSTS_MASK), 1000, 20000); } /** * xemaclite_mdio_read - Read from a given MII management register * @bus: the mii_bus struct * @phy_id: the phy address * @reg: register number to read from * * This function waits till the device is ready to accept a new MDIO * request and then writes the phy address to the MDIO Address register * and reads data from MDIO Read Data register, when its available. * * Return: Value read from the MII management register */ static int xemaclite_mdio_read(struct mii_bus *bus, int phy_id, int reg) { struct net_local *lp = bus->priv; u32 ctrl_reg; u32 rc; if (xemaclite_mdio_wait(lp)) return -ETIMEDOUT; /* Write the PHY address, register number and set the OP bit in the * MDIO Address register. Set the Status bit in the MDIO Control * register to start a MDIO read transaction. */ ctrl_reg = xemaclite_readl(lp->base_addr + XEL_MDIOCTRL_OFFSET); xemaclite_writel(XEL_MDIOADDR_OP_MASK | ((phy_id << XEL_MDIOADDR_PHYADR_SHIFT) | reg), lp->base_addr + XEL_MDIOADDR_OFFSET); xemaclite_writel(ctrl_reg | XEL_MDIOCTRL_MDIOSTS_MASK, lp->base_addr + XEL_MDIOCTRL_OFFSET); if (xemaclite_mdio_wait(lp)) return -ETIMEDOUT; rc = xemaclite_readl(lp->base_addr + XEL_MDIORD_OFFSET); dev_dbg(&lp->ndev->dev, "%s(phy_id=%i, reg=%x) == %x\n", __func__, phy_id, reg, rc); return rc; } /** * xemaclite_mdio_write - Write to a given MII management register * @bus: the mii_bus struct * @phy_id: the phy address * @reg: register number to write to * @val: value to write to the register number specified by reg * * This function waits till the device is ready to accept a new MDIO * request and then writes the val to the MDIO Write Data register. * * Return: 0 upon success or a negative error upon failure */ static int xemaclite_mdio_write(struct mii_bus *bus, int phy_id, int reg, u16 val) { struct net_local *lp = bus->priv; u32 ctrl_reg; dev_dbg(&lp->ndev->dev, "%s(phy_id=%i, reg=%x, val=%x)\n", __func__, phy_id, reg, val); if (xemaclite_mdio_wait(lp)) return -ETIMEDOUT; /* Write the PHY address, register number and clear the OP bit in the * MDIO Address register and then write the value into the MDIO Write * Data register. Finally, set the Status bit in the MDIO Control * register to start a MDIO write transaction. */ ctrl_reg = xemaclite_readl(lp->base_addr + XEL_MDIOCTRL_OFFSET); xemaclite_writel(~XEL_MDIOADDR_OP_MASK & ((phy_id << XEL_MDIOADDR_PHYADR_SHIFT) | reg), lp->base_addr + XEL_MDIOADDR_OFFSET); xemaclite_writel(val, lp->base_addr + XEL_MDIOWR_OFFSET); xemaclite_writel(ctrl_reg | XEL_MDIOCTRL_MDIOSTS_MASK, lp->base_addr + XEL_MDIOCTRL_OFFSET); return 0; } /** * xemaclite_mdio_setup - Register mii_bus for the Emaclite device * @lp: Pointer to the Emaclite device private data * @dev: Pointer to OF device structure * * This function enables MDIO bus in the Emaclite device and registers a * mii_bus. * * Return: 0 upon success or a negative error upon failure */ static int xemaclite_mdio_setup(struct net_local *lp, struct device *dev) { struct mii_bus *bus; struct resource res; struct device_node *np = of_get_parent(lp->phy_node); struct device_node *npp; int rc, ret; /* Don't register the MDIO bus if the phy_node or its parent node * can't be found. */ if (!np) { dev_err(dev, "Failed to register mdio bus.\n"); return -ENODEV; } npp = of_get_parent(np); ret = of_address_to_resource(npp, 0, &res); of_node_put(npp); if (ret) { dev_err(dev, "%s resource error!\n", dev->of_node->full_name); of_node_put(np); return ret; } if (lp->ndev->mem_start != res.start) { struct phy_device *phydev; phydev = of_phy_find_device(lp->phy_node); if (!phydev) dev_info(dev, "MDIO of the phy is not registered yet\n"); else put_device(&phydev->mdio.dev); of_node_put(np); return 0; } /* Enable the MDIO bus by asserting the enable bit in MDIO Control * register. */ xemaclite_writel(XEL_MDIOCTRL_MDIOEN_MASK, lp->base_addr + XEL_MDIOCTRL_OFFSET); bus = mdiobus_alloc(); if (!bus) { dev_err(dev, "Failed to allocate mdiobus\n"); of_node_put(np); return -ENOMEM; } snprintf(bus->id, MII_BUS_ID_SIZE, "%.8llx", (unsigned long long)res.start); bus->priv = lp; bus->name = "Xilinx Emaclite MDIO"; bus->read = xemaclite_mdio_read; bus->write = xemaclite_mdio_write; bus->parent = dev; rc = of_mdiobus_register(bus, np); of_node_put(np); if (rc) { dev_err(dev, "Failed to register mdio bus.\n"); goto err_register; } lp->mii_bus = bus; return 0; err_register: mdiobus_free(bus); return rc; } /** * xemaclite_adjust_link - Link state callback for the Emaclite device * @ndev: pointer to net_device struct * * There's nothing in the Emaclite device to be configured when the link * state changes. We just print the status. */ static void xemaclite_adjust_link(struct net_device *ndev) { struct net_local *lp = netdev_priv(ndev); struct phy_device *phy = lp->phy_dev; int link_state; /* hash together the state values to decide if something has changed */ link_state = phy->speed | (phy->duplex << 1) | phy->link; if (lp->last_link != link_state) { lp->last_link = link_state; phy_print_status(phy); } } /** * xemaclite_open - Open the network device * @dev: Pointer to the network device * * This function sets the MAC address, requests an IRQ and enables interrupts * for the Emaclite device and starts the Tx queue. * It also connects to the phy device, if MDIO is included in Emaclite device. * * Return: 0 on success. -ENODEV, if PHY cannot be connected. * Non-zero error value on failure. */ static int xemaclite_open(struct net_device *dev) { struct net_local *lp = netdev_priv(dev); int retval; /* Just to be safe, stop the device first */ xemaclite_disable_interrupts(lp); if (lp->phy_node) { lp->phy_dev = of_phy_connect(lp->ndev, lp->phy_node, xemaclite_adjust_link, 0, PHY_INTERFACE_MODE_MII); if (!lp->phy_dev) { dev_err(&lp->ndev->dev, "of_phy_connect() failed\n"); return -ENODEV; } /* EmacLite doesn't support giga-bit speeds */ phy_set_max_speed(lp->phy_dev, SPEED_100); phy_start(lp->phy_dev); } /* Set the MAC address each time opened */ xemaclite_update_address(lp, dev->dev_addr); /* Grab the IRQ */ retval = request_irq(dev->irq, xemaclite_interrupt, 0, dev->name, dev); if (retval) { dev_err(&lp->ndev->dev, "Could not allocate interrupt %d\n", dev->irq); if (lp->phy_dev) phy_disconnect(lp->phy_dev); lp->phy_dev = NULL; return retval; } /* Enable Interrupts */ xemaclite_enable_interrupts(lp); /* We're ready to go */ netif_start_queue(dev); return 0; } /** * xemaclite_close - Close the network device * @dev: Pointer to the network device * * This function stops the Tx queue, disables interrupts and frees the IRQ for * the Emaclite device. * It also disconnects the phy device associated with the Emaclite device. * * Return: 0, always. */ static int xemaclite_close(struct net_device *dev) { struct net_local *lp = netdev_priv(dev); netif_stop_queue(dev); xemaclite_disable_interrupts(lp); free_irq(dev->irq, dev); if (lp->phy_dev) phy_disconnect(lp->phy_dev); lp->phy_dev = NULL; return 0; } /** * xemaclite_send - Transmit a frame * @orig_skb: Pointer to the socket buffer to be transmitted * @dev: Pointer to the network device * * This function checks if the Tx buffer of the Emaclite device is free to send * data. If so, it fills the Tx buffer with data from socket buffer data, * updates the stats and frees the socket buffer. The Tx completion is signaled * by an interrupt. If the Tx buffer isn't free, then the socket buffer is * deferred and the Tx queue is stopped so that the deferred socket buffer can * be transmitted when the Emaclite device is free to transmit data. * * Return: NETDEV_TX_OK, always. */ static netdev_tx_t xemaclite_send(struct sk_buff *orig_skb, struct net_device *dev) { struct net_local *lp = netdev_priv(dev); struct sk_buff *new_skb; unsigned int len; unsigned long flags; len = orig_skb->len; new_skb = orig_skb; spin_lock_irqsave(&lp->reset_lock, flags); if (xemaclite_send_data(lp, (u8 *)new_skb->data, len) != 0) { /* If the Emaclite Tx buffer is busy, stop the Tx queue and * defer the skb for transmission during the ISR, after the * current transmission is complete */ netif_stop_queue(dev); lp->deferred_skb = new_skb; /* Take the time stamp now, since we can't do this in an ISR. */ skb_tx_timestamp(new_skb); spin_unlock_irqrestore(&lp->reset_lock, flags); return NETDEV_TX_OK; } spin_unlock_irqrestore(&lp->reset_lock, flags); skb_tx_timestamp(new_skb); dev->stats.tx_bytes += len; dev_consume_skb_any(new_skb); return NETDEV_TX_OK; } /** * get_bool - Get a parameter from the OF device * @ofdev: Pointer to OF device structure * @s: Property to be retrieved * * This function looks for a property in the device node and returns the value * of the property if its found or 0 if the property is not found. * * Return: Value of the parameter if the parameter is found, or 0 otherwise */ static bool get_bool(struct platform_device *ofdev, const char *s) { u32 *p = (u32 *)of_get_property(ofdev->dev.of_node, s, NULL); if (!p) { dev_warn(&ofdev->dev, "Parameter %s not found, defaulting to false\n", s); return false; } return (bool)*p; } /** * xemaclite_ethtools_get_drvinfo - Get various Axi Emac Lite driver info * @ndev: Pointer to net_device structure * @ed: Pointer to ethtool_drvinfo structure * * This implements ethtool command for getting the driver information. * Issue "ethtool -i ethX" under linux prompt to execute this function. */ static void xemaclite_ethtools_get_drvinfo(struct net_device *ndev, struct ethtool_drvinfo *ed) { strscpy(ed->driver, DRIVER_NAME, sizeof(ed->driver)); } static const struct ethtool_ops xemaclite_ethtool_ops = { .get_drvinfo = xemaclite_ethtools_get_drvinfo, .get_link = ethtool_op_get_link, .get_link_ksettings = phy_ethtool_get_link_ksettings, .set_link_ksettings = phy_ethtool_set_link_ksettings, }; static const struct net_device_ops xemaclite_netdev_ops; /** * xemaclite_of_probe - Probe method for the Emaclite device. * @ofdev: Pointer to OF device structure * * This function probes for the Emaclite device in the device tree. * It initializes the driver data structure and the hardware, sets the MAC * address and registers the network device. * It also registers a mii_bus for the Emaclite device, if MDIO is included * in the device. * * Return: 0, if the driver is bound to the Emaclite device, or * a negative error if there is failure. */ static int xemaclite_of_probe(struct platform_device *ofdev) { struct resource *res; struct net_device *ndev = NULL; struct net_local *lp = NULL; struct device *dev = &ofdev->dev; int rc = 0; dev_info(dev, "Device Tree Probing\n"); /* Create an ethernet device instance */ ndev = alloc_etherdev(sizeof(struct net_local)); if (!ndev) return -ENOMEM; dev_set_drvdata(dev, ndev); SET_NETDEV_DEV(ndev, &ofdev->dev); lp = netdev_priv(ndev); lp->ndev = ndev; /* Get IRQ for the device */ rc = platform_get_irq(ofdev, 0); if (rc < 0) goto error; ndev->irq = rc; res = platform_get_resource(ofdev, IORESOURCE_MEM, 0); lp->base_addr = devm_ioremap_resource(&ofdev->dev, res); if (IS_ERR(lp->base_addr)) { rc = PTR_ERR(lp->base_addr); goto error; } ndev->mem_start = res->start; ndev->mem_end = res->end; spin_lock_init(&lp->reset_lock); lp->next_tx_buf_to_use = 0x0; lp->next_rx_buf_to_use = 0x0; lp->tx_ping_pong = get_bool(ofdev, "xlnx,tx-ping-pong"); lp->rx_ping_pong = get_bool(ofdev, "xlnx,rx-ping-pong"); rc = of_get_ethdev_address(ofdev->dev.of_node, ndev); if (rc) { dev_warn(dev, "No MAC address found, using random\n"); eth_hw_addr_random(ndev); } /* Clear the Tx CSR's in case this is a restart */ xemaclite_writel(0, lp->base_addr + XEL_TSR_OFFSET); xemaclite_writel(0, lp->base_addr + XEL_BUFFER_OFFSET + XEL_TSR_OFFSET); /* Set the MAC address in the EmacLite device */ xemaclite_update_address(lp, ndev->dev_addr); lp->phy_node = of_parse_phandle(ofdev->dev.of_node, "phy-handle", 0); xemaclite_mdio_setup(lp, &ofdev->dev); dev_info(dev, "MAC address is now %pM\n", ndev->dev_addr); ndev->netdev_ops = &xemaclite_netdev_ops; ndev->ethtool_ops = &xemaclite_ethtool_ops; ndev->flags &= ~IFF_MULTICAST; ndev->watchdog_timeo = TX_TIMEOUT; /* Finally, register the device */ rc = register_netdev(ndev); if (rc) { dev_err(dev, "Cannot register network device, aborting\n"); goto put_node; } dev_info(dev, "Xilinx EmacLite at 0x%08lX mapped to 0x%p, irq=%d\n", (unsigned long __force)ndev->mem_start, lp->base_addr, ndev->irq); return 0; put_node: of_node_put(lp->phy_node); error: free_netdev(ndev); return rc; } /** * xemaclite_of_remove - Unbind the driver from the Emaclite device. * @of_dev: Pointer to OF device structure * * This function is called if a device is physically removed from the system or * if the driver module is being unloaded. It frees any resources allocated to * the device. * * Return: 0, always. */ static int xemaclite_of_remove(struct platform_device *of_dev) { struct net_device *ndev = platform_get_drvdata(of_dev); struct net_local *lp = netdev_priv(ndev); /* Un-register the mii_bus, if configured */ if (lp->mii_bus) { mdiobus_unregister(lp->mii_bus); mdiobus_free(lp->mii_bus); lp->mii_bus = NULL; } unregister_netdev(ndev); of_node_put(lp->phy_node); lp->phy_node = NULL; free_netdev(ndev); return 0; } #ifdef CONFIG_NET_POLL_CONTROLLER static void xemaclite_poll_controller(struct net_device *ndev) { disable_irq(ndev->irq); xemaclite_interrupt(ndev->irq, ndev); enable_irq(ndev->irq); } #endif /* Ioctl MII Interface */ static int xemaclite_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) { if (!dev->phydev || !netif_running(dev)) return -EINVAL; switch (cmd) { case SIOCGMIIPHY: case SIOCGMIIREG: case SIOCSMIIREG: return phy_mii_ioctl(dev->phydev, rq, cmd); default: return -EOPNOTSUPP; } } static const struct net_device_ops xemaclite_netdev_ops = { .ndo_open = xemaclite_open, .ndo_stop = xemaclite_close, .ndo_start_xmit = xemaclite_send, .ndo_set_mac_address = xemaclite_set_mac_address, .ndo_tx_timeout = xemaclite_tx_timeout, .ndo_eth_ioctl = xemaclite_ioctl, #ifdef CONFIG_NET_POLL_CONTROLLER .ndo_poll_controller = xemaclite_poll_controller, #endif }; /* Match table for OF platform binding */ static const struct of_device_id xemaclite_of_match[] = { { .compatible = "xlnx,opb-ethernetlite-1.01.a", }, { .compatible = "xlnx,opb-ethernetlite-1.01.b", }, { .compatible = "xlnx,xps-ethernetlite-1.00.a", }, { .compatible = "xlnx,xps-ethernetlite-2.00.a", }, { .compatible = "xlnx,xps-ethernetlite-2.01.a", }, { .compatible = "xlnx,xps-ethernetlite-3.00.a", }, { /* end of list */ }, }; MODULE_DEVICE_TABLE(of, xemaclite_of_match); static struct platform_driver xemaclite_of_driver = { .driver = { .name = DRIVER_NAME, .of_match_table = xemaclite_of_match, }, .probe = xemaclite_of_probe, .remove = xemaclite_of_remove, }; module_platform_driver(xemaclite_of_driver); MODULE_AUTHOR("Xilinx, Inc."); MODULE_DESCRIPTION("Xilinx Ethernet MAC Lite driver"); MODULE_LICENSE("GPL");
linux-master
drivers/net/ethernet/xilinx/xilinx_emaclite.c
// SPDX-License-Identifier: GPL-2.0 /* * MDIO bus driver for the Xilinx TEMAC device * * Copyright (c) 2009 Secret Lab Technologies, Ltd. */ #include <linux/io.h> #include <linux/netdevice.h> #include <linux/mutex.h> #include <linux/phy.h> #include <linux/of.h> #include <linux/of_address.h> #include <linux/platform_device.h> #include <linux/slab.h> #include <linux/of_mdio.h> #include <linux/platform_data/xilinx-ll-temac.h> #include "ll_temac.h" /* --------------------------------------------------------------------- * MDIO Bus functions */ static int temac_mdio_read(struct mii_bus *bus, int phy_id, int reg) { struct temac_local *lp = bus->priv; u32 rc; unsigned long flags; /* Write the PHY address to the MIIM Access Initiator register. * When the transfer completes, the PHY register value will appear * in the LSW0 register */ spin_lock_irqsave(lp->indirect_lock, flags); temac_iow(lp, XTE_LSW0_OFFSET, (phy_id << 5) | reg); rc = temac_indirect_in32_locked(lp, XTE_MIIMAI_OFFSET); spin_unlock_irqrestore(lp->indirect_lock, flags); dev_dbg(lp->dev, "temac_mdio_read(phy_id=%i, reg=%x) == %x\n", phy_id, reg, rc); return rc; } static int temac_mdio_write(struct mii_bus *bus, int phy_id, int reg, u16 val) { struct temac_local *lp = bus->priv; unsigned long flags; dev_dbg(lp->dev, "temac_mdio_write(phy_id=%i, reg=%x, val=%x)\n", phy_id, reg, val); /* First write the desired value into the write data register * and then write the address into the access initiator register */ spin_lock_irqsave(lp->indirect_lock, flags); temac_indirect_out32_locked(lp, XTE_MGTDR_OFFSET, val); temac_indirect_out32_locked(lp, XTE_MIIMAI_OFFSET, (phy_id << 5) | reg); spin_unlock_irqrestore(lp->indirect_lock, flags); return 0; } int temac_mdio_setup(struct temac_local *lp, struct platform_device *pdev) { struct ll_temac_platform_data *pdata = dev_get_platdata(&pdev->dev); struct device_node *np = dev_of_node(&pdev->dev); struct mii_bus *bus; u32 bus_hz; int clk_div; int rc; struct resource res; /* Get MDIO bus frequency (if specified) */ bus_hz = 0; if (np) of_property_read_u32(np, "clock-frequency", &bus_hz); else if (pdata) bus_hz = pdata->mdio_clk_freq; /* Calculate a reasonable divisor for the clock rate */ clk_div = 0x3f; /* worst-case default setting */ if (bus_hz != 0) { clk_div = bus_hz / (2500 * 1000 * 2) - 1; if (clk_div < 1) clk_div = 1; if (clk_div > 0x3f) clk_div = 0x3f; } /* Enable the MDIO bus by asserting the enable bit and writing * in the clock config */ temac_indirect_out32(lp, XTE_MC_OFFSET, 1 << 6 | clk_div); bus = devm_mdiobus_alloc(&pdev->dev); if (!bus) return -ENOMEM; if (np) { of_address_to_resource(np, 0, &res); snprintf(bus->id, MII_BUS_ID_SIZE, "%.8llx", (unsigned long long)res.start); } else if (pdata) { snprintf(bus->id, MII_BUS_ID_SIZE, "%.8llx", pdata->mdio_bus_id); } bus->priv = lp; bus->name = "Xilinx TEMAC MDIO"; bus->read = temac_mdio_read; bus->write = temac_mdio_write; bus->parent = lp->dev; lp->mii_bus = bus; rc = of_mdiobus_register(bus, np); if (rc) return rc; dev_dbg(lp->dev, "MDIO bus registered; MC:%x\n", temac_indirect_in32(lp, XTE_MC_OFFSET)); return 0; } void temac_mdio_teardown(struct temac_local *lp) { mdiobus_unregister(lp->mii_bus); }
linux-master
drivers/net/ethernet/xilinx/ll_temac_mdio.c
// SPDX-License-Identifier: GPL-2.0 /* * MDIO bus driver for the Xilinx Axi Ethernet device * * Copyright (c) 2009 Secret Lab Technologies, Ltd. * Copyright (c) 2010 - 2011 Michal Simek <[email protected]> * Copyright (c) 2010 - 2011 PetaLogix * Copyright (c) 2019 SED Systems, a division of Calian Ltd. * Copyright (c) 2010 - 2012 Xilinx, Inc. All rights reserved. */ #include <linux/clk.h> #include <linux/of_address.h> #include <linux/of_mdio.h> #include <linux/jiffies.h> #include <linux/iopoll.h> #include "xilinx_axienet.h" #define DEFAULT_MDIO_FREQ 2500000 /* 2.5 MHz */ #define DEFAULT_HOST_CLOCK 150000000 /* 150 MHz */ /* Wait till MDIO interface is ready to accept a new transaction.*/ static int axienet_mdio_wait_until_ready(struct axienet_local *lp) { u32 val; return readx_poll_timeout(axinet_ior_read_mcr, lp, val, val & XAE_MDIO_MCR_READY_MASK, 1, 20000); } /* Enable the MDIO MDC. Called prior to a read/write operation */ static void axienet_mdio_mdc_enable(struct axienet_local *lp) { axienet_iow(lp, XAE_MDIO_MC_OFFSET, ((u32)lp->mii_clk_div | XAE_MDIO_MC_MDIOEN_MASK)); } /* Disable the MDIO MDC. Called after a read/write operation*/ static void axienet_mdio_mdc_disable(struct axienet_local *lp) { u32 mc_reg; mc_reg = axienet_ior(lp, XAE_MDIO_MC_OFFSET); axienet_iow(lp, XAE_MDIO_MC_OFFSET, (mc_reg & ~XAE_MDIO_MC_MDIOEN_MASK)); } /** * axienet_mdio_read - MDIO interface read function * @bus: Pointer to mii bus structure * @phy_id: Address of the PHY device * @reg: PHY register to read * * Return: The register contents on success, -ETIMEDOUT on a timeout * * Reads the contents of the requested register from the requested PHY * address by first writing the details into MCR register. After a while * the register MRD is read to obtain the PHY register content. */ static int axienet_mdio_read(struct mii_bus *bus, int phy_id, int reg) { u32 rc; int ret; struct axienet_local *lp = bus->priv; axienet_mdio_mdc_enable(lp); ret = axienet_mdio_wait_until_ready(lp); if (ret < 0) { axienet_mdio_mdc_disable(lp); return ret; } axienet_iow(lp, XAE_MDIO_MCR_OFFSET, (((phy_id << XAE_MDIO_MCR_PHYAD_SHIFT) & XAE_MDIO_MCR_PHYAD_MASK) | ((reg << XAE_MDIO_MCR_REGAD_SHIFT) & XAE_MDIO_MCR_REGAD_MASK) | XAE_MDIO_MCR_INITIATE_MASK | XAE_MDIO_MCR_OP_READ_MASK)); ret = axienet_mdio_wait_until_ready(lp); if (ret < 0) { axienet_mdio_mdc_disable(lp); return ret; } rc = axienet_ior(lp, XAE_MDIO_MRD_OFFSET) & 0x0000FFFF; dev_dbg(lp->dev, "axienet_mdio_read(phy_id=%i, reg=%x) == %x\n", phy_id, reg, rc); axienet_mdio_mdc_disable(lp); return rc; } /** * axienet_mdio_write - MDIO interface write function * @bus: Pointer to mii bus structure * @phy_id: Address of the PHY device * @reg: PHY register to write to * @val: Value to be written into the register * * Return: 0 on success, -ETIMEDOUT on a timeout * * Writes the value to the requested register by first writing the value * into MWD register. The MCR register is then appropriately setup * to finish the write operation. */ static int axienet_mdio_write(struct mii_bus *bus, int phy_id, int reg, u16 val) { int ret; struct axienet_local *lp = bus->priv; dev_dbg(lp->dev, "axienet_mdio_write(phy_id=%i, reg=%x, val=%x)\n", phy_id, reg, val); axienet_mdio_mdc_enable(lp); ret = axienet_mdio_wait_until_ready(lp); if (ret < 0) { axienet_mdio_mdc_disable(lp); return ret; } axienet_iow(lp, XAE_MDIO_MWD_OFFSET, (u32)val); axienet_iow(lp, XAE_MDIO_MCR_OFFSET, (((phy_id << XAE_MDIO_MCR_PHYAD_SHIFT) & XAE_MDIO_MCR_PHYAD_MASK) | ((reg << XAE_MDIO_MCR_REGAD_SHIFT) & XAE_MDIO_MCR_REGAD_MASK) | XAE_MDIO_MCR_INITIATE_MASK | XAE_MDIO_MCR_OP_WRITE_MASK)); ret = axienet_mdio_wait_until_ready(lp); if (ret < 0) { axienet_mdio_mdc_disable(lp); return ret; } axienet_mdio_mdc_disable(lp); return 0; } /** * axienet_mdio_enable - MDIO hardware setup function * @lp: Pointer to axienet local data structure. * @np: Pointer to mdio device tree node. * * Return: 0 on success, -ETIMEDOUT on a timeout, -EOVERFLOW on a clock * divisor overflow. * * Sets up the MDIO interface by initializing the MDIO clock and enabling the * MDIO interface in hardware. **/ static int axienet_mdio_enable(struct axienet_local *lp, struct device_node *np) { u32 mdio_freq = DEFAULT_MDIO_FREQ; u32 host_clock; u32 clk_div; int ret; lp->mii_clk_div = 0; if (lp->axi_clk) { host_clock = clk_get_rate(lp->axi_clk); } else { struct device_node *np1; /* Legacy fallback: detect CPU clock frequency and use as AXI * bus clock frequency. This only works on certain platforms. */ np1 = of_find_node_by_name(NULL, "cpu"); if (!np1) { netdev_warn(lp->ndev, "Could not find CPU device node.\n"); host_clock = DEFAULT_HOST_CLOCK; } else { int ret = of_property_read_u32(np1, "clock-frequency", &host_clock); if (ret) { netdev_warn(lp->ndev, "CPU clock-frequency property not found.\n"); host_clock = DEFAULT_HOST_CLOCK; } of_node_put(np1); } netdev_info(lp->ndev, "Setting assumed host clock to %u\n", host_clock); } if (np) of_property_read_u32(np, "clock-frequency", &mdio_freq); if (mdio_freq != DEFAULT_MDIO_FREQ) netdev_info(lp->ndev, "Setting non-standard mdio bus frequency to %u Hz\n", mdio_freq); /* clk_div can be calculated by deriving it from the equation: * fMDIO = fHOST / ((1 + clk_div) * 2) * * Where fMDIO <= 2500000, so we get: * fHOST / ((1 + clk_div) * 2) <= 2500000 * * Then we get: * 1 / ((1 + clk_div) * 2) <= (2500000 / fHOST) * * Then we get: * 1 / (1 + clk_div) <= ((2500000 * 2) / fHOST) * * Then we get: * 1 / (1 + clk_div) <= (5000000 / fHOST) * * So: * (1 + clk_div) >= (fHOST / 5000000) * * And finally: * clk_div >= (fHOST / 5000000) - 1 * * fHOST can be read from the flattened device tree as property * "clock-frequency" from the CPU */ clk_div = (host_clock / (mdio_freq * 2)) - 1; /* If there is any remainder from the division of * fHOST / (mdio_freq * 2), then we need to add * 1 to the clock divisor or we will surely be * above the requested frequency */ if (host_clock % (mdio_freq * 2)) clk_div++; /* Check for overflow of mii_clk_div */ if (clk_div & ~XAE_MDIO_MC_CLOCK_DIVIDE_MAX) { netdev_warn(lp->ndev, "MDIO clock divisor overflow\n"); return -EOVERFLOW; } lp->mii_clk_div = (u8)clk_div; netdev_dbg(lp->ndev, "Setting MDIO clock divisor to %u/%u Hz host clock.\n", lp->mii_clk_div, host_clock); axienet_mdio_mdc_enable(lp); ret = axienet_mdio_wait_until_ready(lp); if (ret) axienet_mdio_mdc_disable(lp); return ret; } /** * axienet_mdio_setup - MDIO setup function * @lp: Pointer to axienet local data structure. * * Return: 0 on success, -ETIMEDOUT on a timeout, -EOVERFLOW on a clock * divisor overflow, -ENOMEM when mdiobus_alloc (to allocate * memory for mii bus structure) fails. * * Sets up the MDIO interface by initializing the MDIO clock. * Register the MDIO interface. **/ int axienet_mdio_setup(struct axienet_local *lp) { struct device_node *mdio_node; struct mii_bus *bus; int ret; bus = mdiobus_alloc(); if (!bus) return -ENOMEM; snprintf(bus->id, MII_BUS_ID_SIZE, "axienet-%.8llx", (unsigned long long)lp->regs_start); bus->priv = lp; bus->name = "Xilinx Axi Ethernet MDIO"; bus->read = axienet_mdio_read; bus->write = axienet_mdio_write; bus->parent = lp->dev; lp->mii_bus = bus; mdio_node = of_get_child_by_name(lp->dev->of_node, "mdio"); ret = axienet_mdio_enable(lp, mdio_node); if (ret < 0) goto unregister; ret = of_mdiobus_register(bus, mdio_node); if (ret) goto unregister_mdio_enabled; of_node_put(mdio_node); axienet_mdio_mdc_disable(lp); return 0; unregister_mdio_enabled: axienet_mdio_mdc_disable(lp); unregister: of_node_put(mdio_node); mdiobus_free(bus); lp->mii_bus = NULL; return ret; } /** * axienet_mdio_teardown - MDIO remove function * @lp: Pointer to axienet local data structure. * * Unregisters the MDIO and frees any associate memory for mii bus. */ void axienet_mdio_teardown(struct axienet_local *lp) { mdiobus_unregister(lp->mii_bus); mdiobus_free(lp->mii_bus); lp->mii_bus = NULL; }
linux-master
drivers/net/ethernet/xilinx/xilinx_axienet_mdio.c
// SPDX-License-Identifier: GPL-2.0-only /* * Driver for Xilinx TEMAC Ethernet device * * Copyright (c) 2008 Nissin Systems Co., Ltd., Yoshio Kashiwagi * Copyright (c) 2005-2008 DLA Systems, David H. Lynch Jr. <[email protected]> * Copyright (c) 2008-2009 Secret Lab Technologies Ltd. * * This is a driver for the Xilinx ll_temac ipcore which is often used * in the Virtex and Spartan series of chips. * * Notes: * - The ll_temac hardware uses indirect access for many of the TEMAC * registers, include the MDIO bus. However, indirect access to MDIO * registers take considerably more clock cycles than to TEMAC registers. * MDIO accesses are long, so threads doing them should probably sleep * rather than busywait. However, since only one indirect access can be * in progress at any given time, that means that *all* indirect accesses * could end up sleeping (to wait for an MDIO access to complete). * Fortunately none of the indirect accesses are on the 'hot' path for tx * or rx, so this should be okay. * * TODO: * - Factor out locallink DMA code into separate driver * - Fix support for hardware checksumming. * - Testing. Lots and lots of testing. * */ #include <linux/delay.h> #include <linux/etherdevice.h> #include <linux/mii.h> #include <linux/module.h> #include <linux/mutex.h> #include <linux/netdevice.h> #include <linux/if_ether.h> #include <linux/of.h> #include <linux/of_irq.h> #include <linux/of_mdio.h> #include <linux/of_net.h> #include <linux/platform_device.h> #include <linux/skbuff.h> #include <linux/spinlock.h> #include <linux/tcp.h> /* needed for sizeof(tcphdr) */ #include <linux/udp.h> /* needed for sizeof(udphdr) */ #include <linux/phy.h> #include <linux/in.h> #include <linux/io.h> #include <linux/ip.h> #include <linux/slab.h> #include <linux/interrupt.h> #include <linux/workqueue.h> #include <linux/dma-mapping.h> #include <linux/processor.h> #include <linux/platform_data/xilinx-ll-temac.h> #include "ll_temac.h" /* Descriptors defines for Tx and Rx DMA */ #define TX_BD_NUM_DEFAULT 64 #define RX_BD_NUM_DEFAULT 1024 #define TX_BD_NUM_MAX 4096 #define RX_BD_NUM_MAX 4096 /* --------------------------------------------------------------------- * Low level register access functions */ static u32 _temac_ior_be(struct temac_local *lp, int offset) { return ioread32be(lp->regs + offset); } static void _temac_iow_be(struct temac_local *lp, int offset, u32 value) { return iowrite32be(value, lp->regs + offset); } static u32 _temac_ior_le(struct temac_local *lp, int offset) { return ioread32(lp->regs + offset); } static void _temac_iow_le(struct temac_local *lp, int offset, u32 value) { return iowrite32(value, lp->regs + offset); } static bool hard_acs_rdy(struct temac_local *lp) { return temac_ior(lp, XTE_RDY0_OFFSET) & XTE_RDY0_HARD_ACS_RDY_MASK; } static bool hard_acs_rdy_or_timeout(struct temac_local *lp, ktime_t timeout) { ktime_t cur = ktime_get(); return hard_acs_rdy(lp) || ktime_after(cur, timeout); } /* Poll for maximum 20 ms. This is similar to the 2 jiffies @ 100 Hz * that was used before, and should cover MDIO bus speed down to 3200 * Hz. */ #define HARD_ACS_RDY_POLL_NS (20 * NSEC_PER_MSEC) /* * temac_indirect_busywait - Wait for current indirect register access * to complete. */ int temac_indirect_busywait(struct temac_local *lp) { ktime_t timeout = ktime_add_ns(ktime_get(), HARD_ACS_RDY_POLL_NS); spin_until_cond(hard_acs_rdy_or_timeout(lp, timeout)); if (WARN_ON(!hard_acs_rdy(lp))) return -ETIMEDOUT; return 0; } /* * temac_indirect_in32 - Indirect register read access. This function * must be called without lp->indirect_lock being held. */ u32 temac_indirect_in32(struct temac_local *lp, int reg) { unsigned long flags; int val; spin_lock_irqsave(lp->indirect_lock, flags); val = temac_indirect_in32_locked(lp, reg); spin_unlock_irqrestore(lp->indirect_lock, flags); return val; } /* * temac_indirect_in32_locked - Indirect register read access. This * function must be called with lp->indirect_lock being held. Use * this together with spin_lock_irqsave/spin_lock_irqrestore to avoid * repeated lock/unlock and to ensure uninterrupted access to indirect * registers. */ u32 temac_indirect_in32_locked(struct temac_local *lp, int reg) { /* This initial wait should normally not spin, as we always * try to wait for indirect access to complete before * releasing the indirect_lock. */ if (WARN_ON(temac_indirect_busywait(lp))) return -ETIMEDOUT; /* Initiate read from indirect register */ temac_iow(lp, XTE_CTL0_OFFSET, reg); /* Wait for indirect register access to complete. We really * should not see timeouts, and could even end up causing * problem for following indirect access, so let's make a bit * of WARN noise. */ if (WARN_ON(temac_indirect_busywait(lp))) return -ETIMEDOUT; /* Value is ready now */ return temac_ior(lp, XTE_LSW0_OFFSET); } /* * temac_indirect_out32 - Indirect register write access. This function * must be called without lp->indirect_lock being held. */ void temac_indirect_out32(struct temac_local *lp, int reg, u32 value) { unsigned long flags; spin_lock_irqsave(lp->indirect_lock, flags); temac_indirect_out32_locked(lp, reg, value); spin_unlock_irqrestore(lp->indirect_lock, flags); } /* * temac_indirect_out32_locked - Indirect register write access. This * function must be called with lp->indirect_lock being held. Use * this together with spin_lock_irqsave/spin_lock_irqrestore to avoid * repeated lock/unlock and to ensure uninterrupted access to indirect * registers. */ void temac_indirect_out32_locked(struct temac_local *lp, int reg, u32 value) { /* As in temac_indirect_in32_locked(), we should normally not * spin here. And if it happens, we actually end up silently * ignoring the write request. Ouch. */ if (WARN_ON(temac_indirect_busywait(lp))) return; /* Initiate write to indirect register */ temac_iow(lp, XTE_LSW0_OFFSET, value); temac_iow(lp, XTE_CTL0_OFFSET, CNTLREG_WRITE_ENABLE_MASK | reg); /* As in temac_indirect_in32_locked(), we should not see timeouts * here. And if it happens, we continue before the write has * completed. Not good. */ WARN_ON(temac_indirect_busywait(lp)); } /* * temac_dma_in32_* - Memory mapped DMA read, these function expects a * register input that is based on DCR word addresses which are then * converted to memory mapped byte addresses. To be assigned to * lp->dma_in32. */ static u32 temac_dma_in32_be(struct temac_local *lp, int reg) { return ioread32be(lp->sdma_regs + (reg << 2)); } static u32 temac_dma_in32_le(struct temac_local *lp, int reg) { return ioread32(lp->sdma_regs + (reg << 2)); } /* * temac_dma_out32_* - Memory mapped DMA read, these function expects * a register input that is based on DCR word addresses which are then * converted to memory mapped byte addresses. To be assigned to * lp->dma_out32. */ static void temac_dma_out32_be(struct temac_local *lp, int reg, u32 value) { iowrite32be(value, lp->sdma_regs + (reg << 2)); } static void temac_dma_out32_le(struct temac_local *lp, int reg, u32 value) { iowrite32(value, lp->sdma_regs + (reg << 2)); } /* DMA register access functions can be DCR based or memory mapped. * The PowerPC 440 is DCR based, the PowerPC 405 and MicroBlaze are both * memory mapped. */ #ifdef CONFIG_PPC_DCR /* * temac_dma_dcr_in32 - DCR based DMA read */ static u32 temac_dma_dcr_in(struct temac_local *lp, int reg) { return dcr_read(lp->sdma_dcrs, reg); } /* * temac_dma_dcr_out32 - DCR based DMA write */ static void temac_dma_dcr_out(struct temac_local *lp, int reg, u32 value) { dcr_write(lp->sdma_dcrs, reg, value); } /* * temac_dcr_setup - If the DMA is DCR based, then setup the address and * I/O functions */ static int temac_dcr_setup(struct temac_local *lp, struct platform_device *op, struct device_node *np) { unsigned int dcrs; /* setup the dcr address mapping if it's in the device tree */ dcrs = dcr_resource_start(np, 0); if (dcrs != 0) { lp->sdma_dcrs = dcr_map(np, dcrs, dcr_resource_len(np, 0)); lp->dma_in = temac_dma_dcr_in; lp->dma_out = temac_dma_dcr_out; dev_dbg(&op->dev, "DCR base: %x\n", dcrs); return 0; } /* no DCR in the device tree, indicate a failure */ return -1; } #else /* * temac_dcr_setup - This is a stub for when DCR is not supported, * such as with MicroBlaze and x86 */ static int temac_dcr_setup(struct temac_local *lp, struct platform_device *op, struct device_node *np) { return -1; } #endif /* * temac_dma_bd_release - Release buffer descriptor rings */ static void temac_dma_bd_release(struct net_device *ndev) { struct temac_local *lp = netdev_priv(ndev); int i; /* Reset Local Link (DMA) */ lp->dma_out(lp, DMA_CONTROL_REG, DMA_CONTROL_RST); for (i = 0; i < lp->rx_bd_num; i++) { if (!lp->rx_skb[i]) break; dma_unmap_single(ndev->dev.parent, lp->rx_bd_v[i].phys, XTE_MAX_JUMBO_FRAME_SIZE, DMA_FROM_DEVICE); dev_kfree_skb(lp->rx_skb[i]); } if (lp->rx_bd_v) dma_free_coherent(ndev->dev.parent, sizeof(*lp->rx_bd_v) * lp->rx_bd_num, lp->rx_bd_v, lp->rx_bd_p); if (lp->tx_bd_v) dma_free_coherent(ndev->dev.parent, sizeof(*lp->tx_bd_v) * lp->tx_bd_num, lp->tx_bd_v, lp->tx_bd_p); } /* * temac_dma_bd_init - Setup buffer descriptor rings */ static int temac_dma_bd_init(struct net_device *ndev) { struct temac_local *lp = netdev_priv(ndev); struct sk_buff *skb; dma_addr_t skb_dma_addr; int i; lp->rx_skb = devm_kcalloc(&ndev->dev, lp->rx_bd_num, sizeof(*lp->rx_skb), GFP_KERNEL); if (!lp->rx_skb) goto out; /* allocate the tx and rx ring buffer descriptors. */ /* returns a virtual address and a physical address. */ lp->tx_bd_v = dma_alloc_coherent(ndev->dev.parent, sizeof(*lp->tx_bd_v) * lp->tx_bd_num, &lp->tx_bd_p, GFP_KERNEL); if (!lp->tx_bd_v) goto out; lp->rx_bd_v = dma_alloc_coherent(ndev->dev.parent, sizeof(*lp->rx_bd_v) * lp->rx_bd_num, &lp->rx_bd_p, GFP_KERNEL); if (!lp->rx_bd_v) goto out; for (i = 0; i < lp->tx_bd_num; i++) { lp->tx_bd_v[i].next = cpu_to_be32(lp->tx_bd_p + sizeof(*lp->tx_bd_v) * ((i + 1) % lp->tx_bd_num)); } for (i = 0; i < lp->rx_bd_num; i++) { lp->rx_bd_v[i].next = cpu_to_be32(lp->rx_bd_p + sizeof(*lp->rx_bd_v) * ((i + 1) % lp->rx_bd_num)); skb = __netdev_alloc_skb_ip_align(ndev, XTE_MAX_JUMBO_FRAME_SIZE, GFP_KERNEL); if (!skb) goto out; lp->rx_skb[i] = skb; /* returns physical address of skb->data */ skb_dma_addr = dma_map_single(ndev->dev.parent, skb->data, XTE_MAX_JUMBO_FRAME_SIZE, DMA_FROM_DEVICE); if (dma_mapping_error(ndev->dev.parent, skb_dma_addr)) goto out; lp->rx_bd_v[i].phys = cpu_to_be32(skb_dma_addr); lp->rx_bd_v[i].len = cpu_to_be32(XTE_MAX_JUMBO_FRAME_SIZE); lp->rx_bd_v[i].app0 = cpu_to_be32(STS_CTRL_APP0_IRQONEND); } /* Configure DMA channel (irq setup) */ lp->dma_out(lp, TX_CHNL_CTRL, lp->coalesce_delay_tx << 24 | lp->coalesce_count_tx << 16 | 0x00000400 | // Use 1 Bit Wide Counters. Currently Not Used! CHNL_CTRL_IRQ_EN | CHNL_CTRL_IRQ_ERR_EN | CHNL_CTRL_IRQ_DLY_EN | CHNL_CTRL_IRQ_COAL_EN); lp->dma_out(lp, RX_CHNL_CTRL, lp->coalesce_delay_rx << 24 | lp->coalesce_count_rx << 16 | CHNL_CTRL_IRQ_IOE | CHNL_CTRL_IRQ_EN | CHNL_CTRL_IRQ_ERR_EN | CHNL_CTRL_IRQ_DLY_EN | CHNL_CTRL_IRQ_COAL_EN); /* Init descriptor indexes */ lp->tx_bd_ci = 0; lp->tx_bd_tail = 0; lp->rx_bd_ci = 0; lp->rx_bd_tail = lp->rx_bd_num - 1; /* Enable RX DMA transfers */ wmb(); lp->dma_out(lp, RX_CURDESC_PTR, lp->rx_bd_p); lp->dma_out(lp, RX_TAILDESC_PTR, lp->rx_bd_p + (sizeof(*lp->rx_bd_v) * lp->rx_bd_tail)); /* Prepare for TX DMA transfer */ lp->dma_out(lp, TX_CURDESC_PTR, lp->tx_bd_p); return 0; out: temac_dma_bd_release(ndev); return -ENOMEM; } /* --------------------------------------------------------------------- * net_device_ops */ static void temac_do_set_mac_address(struct net_device *ndev) { struct temac_local *lp = netdev_priv(ndev); unsigned long flags; /* set up unicast MAC address filter set its mac address */ spin_lock_irqsave(lp->indirect_lock, flags); temac_indirect_out32_locked(lp, XTE_UAW0_OFFSET, (ndev->dev_addr[0]) | (ndev->dev_addr[1] << 8) | (ndev->dev_addr[2] << 16) | (ndev->dev_addr[3] << 24)); /* There are reserved bits in EUAW1 * so don't affect them Set MAC bits [47:32] in EUAW1 */ temac_indirect_out32_locked(lp, XTE_UAW1_OFFSET, (ndev->dev_addr[4] & 0x000000ff) | (ndev->dev_addr[5] << 8)); spin_unlock_irqrestore(lp->indirect_lock, flags); } static int temac_init_mac_address(struct net_device *ndev, const void *address) { eth_hw_addr_set(ndev, address); if (!is_valid_ether_addr(ndev->dev_addr)) eth_hw_addr_random(ndev); temac_do_set_mac_address(ndev); return 0; } static int temac_set_mac_address(struct net_device *ndev, void *p) { struct sockaddr *addr = p; if (!is_valid_ether_addr(addr->sa_data)) return -EADDRNOTAVAIL; eth_hw_addr_set(ndev, addr->sa_data); temac_do_set_mac_address(ndev); return 0; } static void temac_set_multicast_list(struct net_device *ndev) { struct temac_local *lp = netdev_priv(ndev); u32 multi_addr_msw, multi_addr_lsw; int i = 0; unsigned long flags; bool promisc_mode_disabled = false; if (ndev->flags & (IFF_PROMISC | IFF_ALLMULTI) || (netdev_mc_count(ndev) > MULTICAST_CAM_TABLE_NUM)) { temac_indirect_out32(lp, XTE_AFM_OFFSET, XTE_AFM_EPPRM_MASK); dev_info(&ndev->dev, "Promiscuous mode enabled.\n"); return; } spin_lock_irqsave(lp->indirect_lock, flags); if (!netdev_mc_empty(ndev)) { struct netdev_hw_addr *ha; netdev_for_each_mc_addr(ha, ndev) { if (WARN_ON(i >= MULTICAST_CAM_TABLE_NUM)) break; multi_addr_msw = ((ha->addr[3] << 24) | (ha->addr[2] << 16) | (ha->addr[1] << 8) | (ha->addr[0])); temac_indirect_out32_locked(lp, XTE_MAW0_OFFSET, multi_addr_msw); multi_addr_lsw = ((ha->addr[5] << 8) | (ha->addr[4]) | (i << 16)); temac_indirect_out32_locked(lp, XTE_MAW1_OFFSET, multi_addr_lsw); i++; } } /* Clear all or remaining/unused address table entries */ while (i < MULTICAST_CAM_TABLE_NUM) { temac_indirect_out32_locked(lp, XTE_MAW0_OFFSET, 0); temac_indirect_out32_locked(lp, XTE_MAW1_OFFSET, i << 16); i++; } /* Enable address filter block if currently disabled */ if (temac_indirect_in32_locked(lp, XTE_AFM_OFFSET) & XTE_AFM_EPPRM_MASK) { temac_indirect_out32_locked(lp, XTE_AFM_OFFSET, 0); promisc_mode_disabled = true; } spin_unlock_irqrestore(lp->indirect_lock, flags); if (promisc_mode_disabled) dev_info(&ndev->dev, "Promiscuous mode disabled.\n"); } static struct temac_option { int flg; u32 opt; u32 reg; u32 m_or; u32 m_and; } temac_options[] = { /* Turn on jumbo packet support for both Rx and Tx */ { .opt = XTE_OPTION_JUMBO, .reg = XTE_TXC_OFFSET, .m_or = XTE_TXC_TXJMBO_MASK, }, { .opt = XTE_OPTION_JUMBO, .reg = XTE_RXC1_OFFSET, .m_or = XTE_RXC1_RXJMBO_MASK, }, /* Turn on VLAN packet support for both Rx and Tx */ { .opt = XTE_OPTION_VLAN, .reg = XTE_TXC_OFFSET, .m_or = XTE_TXC_TXVLAN_MASK, }, { .opt = XTE_OPTION_VLAN, .reg = XTE_RXC1_OFFSET, .m_or = XTE_RXC1_RXVLAN_MASK, }, /* Turn on FCS stripping on receive packets */ { .opt = XTE_OPTION_FCS_STRIP, .reg = XTE_RXC1_OFFSET, .m_or = XTE_RXC1_RXFCS_MASK, }, /* Turn on FCS insertion on transmit packets */ { .opt = XTE_OPTION_FCS_INSERT, .reg = XTE_TXC_OFFSET, .m_or = XTE_TXC_TXFCS_MASK, }, /* Turn on length/type field checking on receive packets */ { .opt = XTE_OPTION_LENTYPE_ERR, .reg = XTE_RXC1_OFFSET, .m_or = XTE_RXC1_RXLT_MASK, }, /* Turn on flow control */ { .opt = XTE_OPTION_FLOW_CONTROL, .reg = XTE_FCC_OFFSET, .m_or = XTE_FCC_RXFLO_MASK, }, /* Turn on flow control */ { .opt = XTE_OPTION_FLOW_CONTROL, .reg = XTE_FCC_OFFSET, .m_or = XTE_FCC_TXFLO_MASK, }, /* Turn on promiscuous frame filtering (all frames are received ) */ { .opt = XTE_OPTION_PROMISC, .reg = XTE_AFM_OFFSET, .m_or = XTE_AFM_EPPRM_MASK, }, /* Enable transmitter if not already enabled */ { .opt = XTE_OPTION_TXEN, .reg = XTE_TXC_OFFSET, .m_or = XTE_TXC_TXEN_MASK, }, /* Enable receiver? */ { .opt = XTE_OPTION_RXEN, .reg = XTE_RXC1_OFFSET, .m_or = XTE_RXC1_RXEN_MASK, }, {} }; /* * temac_setoptions */ static u32 temac_setoptions(struct net_device *ndev, u32 options) { struct temac_local *lp = netdev_priv(ndev); struct temac_option *tp = &temac_options[0]; int reg; unsigned long flags; spin_lock_irqsave(lp->indirect_lock, flags); while (tp->opt) { reg = temac_indirect_in32_locked(lp, tp->reg) & ~tp->m_or; if (options & tp->opt) { reg |= tp->m_or; temac_indirect_out32_locked(lp, tp->reg, reg); } tp++; } spin_unlock_irqrestore(lp->indirect_lock, flags); lp->options |= options; return 0; } /* Initialize temac */ static void temac_device_reset(struct net_device *ndev) { struct temac_local *lp = netdev_priv(ndev); u32 timeout; u32 val; unsigned long flags; /* Perform a software reset */ /* 0x300 host enable bit ? */ /* reset PHY through control register ?:1 */ dev_dbg(&ndev->dev, "%s()\n", __func__); /* Reset the receiver and wait for it to finish reset */ temac_indirect_out32(lp, XTE_RXC1_OFFSET, XTE_RXC1_RXRST_MASK); timeout = 1000; while (temac_indirect_in32(lp, XTE_RXC1_OFFSET) & XTE_RXC1_RXRST_MASK) { udelay(1); if (--timeout == 0) { dev_err(&ndev->dev, "%s RX reset timeout!!\n", __func__); break; } } /* Reset the transmitter and wait for it to finish reset */ temac_indirect_out32(lp, XTE_TXC_OFFSET, XTE_TXC_TXRST_MASK); timeout = 1000; while (temac_indirect_in32(lp, XTE_TXC_OFFSET) & XTE_TXC_TXRST_MASK) { udelay(1); if (--timeout == 0) { dev_err(&ndev->dev, "%s TX reset timeout!!\n", __func__); break; } } /* Disable the receiver */ spin_lock_irqsave(lp->indirect_lock, flags); val = temac_indirect_in32_locked(lp, XTE_RXC1_OFFSET); temac_indirect_out32_locked(lp, XTE_RXC1_OFFSET, val & ~XTE_RXC1_RXEN_MASK); spin_unlock_irqrestore(lp->indirect_lock, flags); /* Reset Local Link (DMA) */ lp->dma_out(lp, DMA_CONTROL_REG, DMA_CONTROL_RST); timeout = 1000; while (lp->dma_in(lp, DMA_CONTROL_REG) & DMA_CONTROL_RST) { udelay(1); if (--timeout == 0) { dev_err(&ndev->dev, "%s DMA reset timeout!!\n", __func__); break; } } lp->dma_out(lp, DMA_CONTROL_REG, DMA_TAIL_ENABLE); if (temac_dma_bd_init(ndev)) { dev_err(&ndev->dev, "%s descriptor allocation failed\n", __func__); } spin_lock_irqsave(lp->indirect_lock, flags); temac_indirect_out32_locked(lp, XTE_RXC0_OFFSET, 0); temac_indirect_out32_locked(lp, XTE_RXC1_OFFSET, 0); temac_indirect_out32_locked(lp, XTE_TXC_OFFSET, 0); temac_indirect_out32_locked(lp, XTE_FCC_OFFSET, XTE_FCC_RXFLO_MASK); spin_unlock_irqrestore(lp->indirect_lock, flags); /* Sync default options with HW * but leave receiver and transmitter disabled. */ temac_setoptions(ndev, lp->options & ~(XTE_OPTION_TXEN | XTE_OPTION_RXEN)); temac_do_set_mac_address(ndev); /* Set address filter table */ temac_set_multicast_list(ndev); if (temac_setoptions(ndev, lp->options)) dev_err(&ndev->dev, "Error setting TEMAC options\n"); /* Init Driver variable */ netif_trans_update(ndev); /* prevent tx timeout */ } static void temac_adjust_link(struct net_device *ndev) { struct temac_local *lp = netdev_priv(ndev); struct phy_device *phy = ndev->phydev; u32 mii_speed; int link_state; unsigned long flags; /* hash together the state values to decide if something has changed */ link_state = phy->speed | (phy->duplex << 1) | phy->link; if (lp->last_link != link_state) { spin_lock_irqsave(lp->indirect_lock, flags); mii_speed = temac_indirect_in32_locked(lp, XTE_EMCFG_OFFSET); mii_speed &= ~XTE_EMCFG_LINKSPD_MASK; switch (phy->speed) { case SPEED_1000: mii_speed |= XTE_EMCFG_LINKSPD_1000; break; case SPEED_100: mii_speed |= XTE_EMCFG_LINKSPD_100; break; case SPEED_10: mii_speed |= XTE_EMCFG_LINKSPD_10; break; } /* Write new speed setting out to TEMAC */ temac_indirect_out32_locked(lp, XTE_EMCFG_OFFSET, mii_speed); spin_unlock_irqrestore(lp->indirect_lock, flags); lp->last_link = link_state; phy_print_status(phy); } } #ifdef CONFIG_64BIT static void ptr_to_txbd(void *p, struct cdmac_bd *bd) { bd->app3 = (u32)(((u64)p) >> 32); bd->app4 = (u32)((u64)p & 0xFFFFFFFF); } static void *ptr_from_txbd(struct cdmac_bd *bd) { return (void *)(((u64)(bd->app3) << 32) | bd->app4); } #else static void ptr_to_txbd(void *p, struct cdmac_bd *bd) { bd->app4 = (u32)p; } static void *ptr_from_txbd(struct cdmac_bd *bd) { return (void *)(bd->app4); } #endif static void temac_start_xmit_done(struct net_device *ndev) { struct temac_local *lp = netdev_priv(ndev); struct cdmac_bd *cur_p; unsigned int stat = 0; struct sk_buff *skb; cur_p = &lp->tx_bd_v[lp->tx_bd_ci]; stat = be32_to_cpu(cur_p->app0); while (stat & STS_CTRL_APP0_CMPLT) { /* Make sure that the other fields are read after bd is * released by dma */ rmb(); dma_unmap_single(ndev->dev.parent, be32_to_cpu(cur_p->phys), be32_to_cpu(cur_p->len), DMA_TO_DEVICE); skb = (struct sk_buff *)ptr_from_txbd(cur_p); if (skb) dev_consume_skb_irq(skb); cur_p->app1 = 0; cur_p->app2 = 0; cur_p->app3 = 0; cur_p->app4 = 0; ndev->stats.tx_packets++; ndev->stats.tx_bytes += be32_to_cpu(cur_p->len); /* app0 must be visible last, as it is used to flag * availability of the bd */ smp_mb(); cur_p->app0 = 0; lp->tx_bd_ci++; if (lp->tx_bd_ci >= lp->tx_bd_num) lp->tx_bd_ci = 0; cur_p = &lp->tx_bd_v[lp->tx_bd_ci]; stat = be32_to_cpu(cur_p->app0); } /* Matches barrier in temac_start_xmit */ smp_mb(); netif_wake_queue(ndev); } static inline int temac_check_tx_bd_space(struct temac_local *lp, int num_frag) { struct cdmac_bd *cur_p; int tail; tail = lp->tx_bd_tail; cur_p = &lp->tx_bd_v[tail]; do { if (cur_p->app0) return NETDEV_TX_BUSY; /* Make sure to read next bd app0 after this one */ rmb(); tail++; if (tail >= lp->tx_bd_num) tail = 0; cur_p = &lp->tx_bd_v[tail]; num_frag--; } while (num_frag >= 0); return 0; } static netdev_tx_t temac_start_xmit(struct sk_buff *skb, struct net_device *ndev) { struct temac_local *lp = netdev_priv(ndev); struct cdmac_bd *cur_p; dma_addr_t tail_p, skb_dma_addr; int ii; unsigned long num_frag; skb_frag_t *frag; num_frag = skb_shinfo(skb)->nr_frags; frag = &skb_shinfo(skb)->frags[0]; cur_p = &lp->tx_bd_v[lp->tx_bd_tail]; if (temac_check_tx_bd_space(lp, num_frag + 1)) { if (netif_queue_stopped(ndev)) return NETDEV_TX_BUSY; netif_stop_queue(ndev); /* Matches barrier in temac_start_xmit_done */ smp_mb(); /* Space might have just been freed - check again */ if (temac_check_tx_bd_space(lp, num_frag + 1)) return NETDEV_TX_BUSY; netif_wake_queue(ndev); } cur_p->app0 = 0; if (skb->ip_summed == CHECKSUM_PARTIAL) { unsigned int csum_start_off = skb_checksum_start_offset(skb); unsigned int csum_index_off = csum_start_off + skb->csum_offset; cur_p->app0 |= cpu_to_be32(0x000001); /* TX Checksum Enabled */ cur_p->app1 = cpu_to_be32((csum_start_off << 16) | csum_index_off); cur_p->app2 = 0; /* initial checksum seed */ } cur_p->app0 |= cpu_to_be32(STS_CTRL_APP0_SOP); skb_dma_addr = dma_map_single(ndev->dev.parent, skb->data, skb_headlen(skb), DMA_TO_DEVICE); cur_p->len = cpu_to_be32(skb_headlen(skb)); if (WARN_ON_ONCE(dma_mapping_error(ndev->dev.parent, skb_dma_addr))) { dev_kfree_skb_any(skb); ndev->stats.tx_dropped++; return NETDEV_TX_OK; } cur_p->phys = cpu_to_be32(skb_dma_addr); for (ii = 0; ii < num_frag; ii++) { if (++lp->tx_bd_tail >= lp->tx_bd_num) lp->tx_bd_tail = 0; cur_p = &lp->tx_bd_v[lp->tx_bd_tail]; skb_dma_addr = dma_map_single(ndev->dev.parent, skb_frag_address(frag), skb_frag_size(frag), DMA_TO_DEVICE); if (dma_mapping_error(ndev->dev.parent, skb_dma_addr)) { if (--lp->tx_bd_tail < 0) lp->tx_bd_tail = lp->tx_bd_num - 1; cur_p = &lp->tx_bd_v[lp->tx_bd_tail]; while (--ii >= 0) { --frag; dma_unmap_single(ndev->dev.parent, be32_to_cpu(cur_p->phys), skb_frag_size(frag), DMA_TO_DEVICE); if (--lp->tx_bd_tail < 0) lp->tx_bd_tail = lp->tx_bd_num - 1; cur_p = &lp->tx_bd_v[lp->tx_bd_tail]; } dma_unmap_single(ndev->dev.parent, be32_to_cpu(cur_p->phys), skb_headlen(skb), DMA_TO_DEVICE); dev_kfree_skb_any(skb); ndev->stats.tx_dropped++; return NETDEV_TX_OK; } cur_p->phys = cpu_to_be32(skb_dma_addr); cur_p->len = cpu_to_be32(skb_frag_size(frag)); cur_p->app0 = 0; frag++; } cur_p->app0 |= cpu_to_be32(STS_CTRL_APP0_EOP); /* Mark last fragment with skb address, so it can be consumed * in temac_start_xmit_done() */ ptr_to_txbd((void *)skb, cur_p); tail_p = lp->tx_bd_p + sizeof(*lp->tx_bd_v) * lp->tx_bd_tail; lp->tx_bd_tail++; if (lp->tx_bd_tail >= lp->tx_bd_num) lp->tx_bd_tail = 0; skb_tx_timestamp(skb); /* Kick off the transfer */ wmb(); lp->dma_out(lp, TX_TAILDESC_PTR, tail_p); /* DMA start */ if (temac_check_tx_bd_space(lp, MAX_SKB_FRAGS + 1)) netif_stop_queue(ndev); return NETDEV_TX_OK; } static int ll_temac_recv_buffers_available(struct temac_local *lp) { int available; if (!lp->rx_skb[lp->rx_bd_ci]) return 0; available = 1 + lp->rx_bd_tail - lp->rx_bd_ci; if (available <= 0) available += lp->rx_bd_num; return available; } static void ll_temac_recv(struct net_device *ndev) { struct temac_local *lp = netdev_priv(ndev); unsigned long flags; int rx_bd; bool update_tail = false; spin_lock_irqsave(&lp->rx_lock, flags); /* Process all received buffers, passing them on network * stack. After this, the buffer descriptors will be in an * un-allocated stage, where no skb is allocated for it, and * they are therefore not available for TEMAC/DMA. */ do { struct cdmac_bd *bd = &lp->rx_bd_v[lp->rx_bd_ci]; struct sk_buff *skb = lp->rx_skb[lp->rx_bd_ci]; unsigned int bdstat = be32_to_cpu(bd->app0); int length; /* While this should not normally happen, we can end * here when GFP_ATOMIC allocations fail, and we * therefore have un-allocated buffers. */ if (!skb) break; /* Loop over all completed buffer descriptors */ if (!(bdstat & STS_CTRL_APP0_CMPLT)) break; dma_unmap_single(ndev->dev.parent, be32_to_cpu(bd->phys), XTE_MAX_JUMBO_FRAME_SIZE, DMA_FROM_DEVICE); /* The buffer is not valid for DMA anymore */ bd->phys = 0; bd->len = 0; length = be32_to_cpu(bd->app4) & 0x3FFF; skb_put(skb, length); skb->protocol = eth_type_trans(skb, ndev); skb_checksum_none_assert(skb); /* if we're doing rx csum offload, set it up */ if (((lp->temac_features & TEMAC_FEATURE_RX_CSUM) != 0) && (skb->protocol == htons(ETH_P_IP)) && (skb->len > 64)) { /* Convert from device endianness (be32) to cpu * endianness, and if necessary swap the bytes * (back) for proper IP checksum byte order * (be16). */ skb->csum = htons(be32_to_cpu(bd->app3) & 0xFFFF); skb->ip_summed = CHECKSUM_COMPLETE; } if (!skb_defer_rx_timestamp(skb)) netif_rx(skb); /* The skb buffer is now owned by network stack above */ lp->rx_skb[lp->rx_bd_ci] = NULL; ndev->stats.rx_packets++; ndev->stats.rx_bytes += length; rx_bd = lp->rx_bd_ci; if (++lp->rx_bd_ci >= lp->rx_bd_num) lp->rx_bd_ci = 0; } while (rx_bd != lp->rx_bd_tail); /* DMA operations will halt when the last buffer descriptor is * processed (ie. the one pointed to by RX_TAILDESC_PTR). * When that happens, no more interrupt events will be * generated. No IRQ_COAL or IRQ_DLY, and not even an * IRQ_ERR. To avoid stalling, we schedule a delayed work * when there is a potential risk of that happening. The work * will call this function, and thus re-schedule itself until * enough buffers are available again. */ if (ll_temac_recv_buffers_available(lp) < lp->coalesce_count_rx) schedule_delayed_work(&lp->restart_work, HZ / 1000); /* Allocate new buffers for those buffer descriptors that were * passed to network stack. Note that GFP_ATOMIC allocations * can fail (e.g. when a larger burst of GFP_ATOMIC * allocations occurs), so while we try to allocate all * buffers in the same interrupt where they were processed, we * continue with what we could get in case of allocation * failure. Allocation of remaining buffers will be retried * in following calls. */ while (1) { struct sk_buff *skb; struct cdmac_bd *bd; dma_addr_t skb_dma_addr; rx_bd = lp->rx_bd_tail + 1; if (rx_bd >= lp->rx_bd_num) rx_bd = 0; bd = &lp->rx_bd_v[rx_bd]; if (bd->phys) break; /* All skb's allocated */ skb = netdev_alloc_skb_ip_align(ndev, XTE_MAX_JUMBO_FRAME_SIZE); if (!skb) { dev_warn(&ndev->dev, "skb alloc failed\n"); break; } skb_dma_addr = dma_map_single(ndev->dev.parent, skb->data, XTE_MAX_JUMBO_FRAME_SIZE, DMA_FROM_DEVICE); if (WARN_ON_ONCE(dma_mapping_error(ndev->dev.parent, skb_dma_addr))) { dev_kfree_skb_any(skb); break; } bd->phys = cpu_to_be32(skb_dma_addr); bd->len = cpu_to_be32(XTE_MAX_JUMBO_FRAME_SIZE); bd->app0 = cpu_to_be32(STS_CTRL_APP0_IRQONEND); lp->rx_skb[rx_bd] = skb; lp->rx_bd_tail = rx_bd; update_tail = true; } /* Move tail pointer when buffers have been allocated */ if (update_tail) { lp->dma_out(lp, RX_TAILDESC_PTR, lp->rx_bd_p + sizeof(*lp->rx_bd_v) * lp->rx_bd_tail); } spin_unlock_irqrestore(&lp->rx_lock, flags); } /* Function scheduled to ensure a restart in case of DMA halt * condition caused by running out of buffer descriptors. */ static void ll_temac_restart_work_func(struct work_struct *work) { struct temac_local *lp = container_of(work, struct temac_local, restart_work.work); struct net_device *ndev = lp->ndev; ll_temac_recv(ndev); } static irqreturn_t ll_temac_tx_irq(int irq, void *_ndev) { struct net_device *ndev = _ndev; struct temac_local *lp = netdev_priv(ndev); unsigned int status; status = lp->dma_in(lp, TX_IRQ_REG); lp->dma_out(lp, TX_IRQ_REG, status); if (status & (IRQ_COAL | IRQ_DLY)) temac_start_xmit_done(lp->ndev); if (status & (IRQ_ERR | IRQ_DMAERR)) dev_err_ratelimited(&ndev->dev, "TX error 0x%x TX_CHNL_STS=0x%08x\n", status, lp->dma_in(lp, TX_CHNL_STS)); return IRQ_HANDLED; } static irqreturn_t ll_temac_rx_irq(int irq, void *_ndev) { struct net_device *ndev = _ndev; struct temac_local *lp = netdev_priv(ndev); unsigned int status; /* Read and clear the status registers */ status = lp->dma_in(lp, RX_IRQ_REG); lp->dma_out(lp, RX_IRQ_REG, status); if (status & (IRQ_COAL | IRQ_DLY)) ll_temac_recv(lp->ndev); if (status & (IRQ_ERR | IRQ_DMAERR)) dev_err_ratelimited(&ndev->dev, "RX error 0x%x RX_CHNL_STS=0x%08x\n", status, lp->dma_in(lp, RX_CHNL_STS)); return IRQ_HANDLED; } static int temac_open(struct net_device *ndev) { struct temac_local *lp = netdev_priv(ndev); struct phy_device *phydev = NULL; int rc; dev_dbg(&ndev->dev, "temac_open()\n"); if (lp->phy_node) { phydev = of_phy_connect(lp->ndev, lp->phy_node, temac_adjust_link, 0, 0); if (!phydev) { dev_err(lp->dev, "of_phy_connect() failed\n"); return -ENODEV; } phy_start(phydev); } else if (strlen(lp->phy_name) > 0) { phydev = phy_connect(lp->ndev, lp->phy_name, temac_adjust_link, lp->phy_interface); if (IS_ERR(phydev)) { dev_err(lp->dev, "phy_connect() failed\n"); return PTR_ERR(phydev); } phy_start(phydev); } temac_device_reset(ndev); rc = request_irq(lp->tx_irq, ll_temac_tx_irq, 0, ndev->name, ndev); if (rc) goto err_tx_irq; rc = request_irq(lp->rx_irq, ll_temac_rx_irq, 0, ndev->name, ndev); if (rc) goto err_rx_irq; return 0; err_rx_irq: free_irq(lp->tx_irq, ndev); err_tx_irq: if (phydev) phy_disconnect(phydev); dev_err(lp->dev, "request_irq() failed\n"); return rc; } static int temac_stop(struct net_device *ndev) { struct temac_local *lp = netdev_priv(ndev); struct phy_device *phydev = ndev->phydev; dev_dbg(&ndev->dev, "temac_close()\n"); cancel_delayed_work_sync(&lp->restart_work); free_irq(lp->tx_irq, ndev); free_irq(lp->rx_irq, ndev); if (phydev) phy_disconnect(phydev); temac_dma_bd_release(ndev); return 0; } #ifdef CONFIG_NET_POLL_CONTROLLER static void temac_poll_controller(struct net_device *ndev) { struct temac_local *lp = netdev_priv(ndev); disable_irq(lp->tx_irq); disable_irq(lp->rx_irq); ll_temac_rx_irq(lp->tx_irq, ndev); ll_temac_tx_irq(lp->rx_irq, ndev); enable_irq(lp->tx_irq); enable_irq(lp->rx_irq); } #endif static const struct net_device_ops temac_netdev_ops = { .ndo_open = temac_open, .ndo_stop = temac_stop, .ndo_start_xmit = temac_start_xmit, .ndo_set_rx_mode = temac_set_multicast_list, .ndo_set_mac_address = temac_set_mac_address, .ndo_validate_addr = eth_validate_addr, .ndo_eth_ioctl = phy_do_ioctl_running, #ifdef CONFIG_NET_POLL_CONTROLLER .ndo_poll_controller = temac_poll_controller, #endif }; /* --------------------------------------------------------------------- * SYSFS device attributes */ static ssize_t temac_show_llink_regs(struct device *dev, struct device_attribute *attr, char *buf) { struct net_device *ndev = dev_get_drvdata(dev); struct temac_local *lp = netdev_priv(ndev); int i, len = 0; for (i = 0; i < 0x11; i++) len += sprintf(buf + len, "%.8x%s", lp->dma_in(lp, i), (i % 8) == 7 ? "\n" : " "); len += sprintf(buf + len, "\n"); return len; } static DEVICE_ATTR(llink_regs, 0440, temac_show_llink_regs, NULL); static struct attribute *temac_device_attrs[] = { &dev_attr_llink_regs.attr, NULL, }; static const struct attribute_group temac_attr_group = { .attrs = temac_device_attrs, }; /* --------------------------------------------------------------------- * ethtool support */ static void ll_temac_ethtools_get_ringparam(struct net_device *ndev, struct ethtool_ringparam *ering, struct kernel_ethtool_ringparam *kernel_ering, struct netlink_ext_ack *extack) { struct temac_local *lp = netdev_priv(ndev); ering->rx_max_pending = RX_BD_NUM_MAX; ering->rx_mini_max_pending = 0; ering->rx_jumbo_max_pending = 0; ering->tx_max_pending = TX_BD_NUM_MAX; ering->rx_pending = lp->rx_bd_num; ering->rx_mini_pending = 0; ering->rx_jumbo_pending = 0; ering->tx_pending = lp->tx_bd_num; } static int ll_temac_ethtools_set_ringparam(struct net_device *ndev, struct ethtool_ringparam *ering, struct kernel_ethtool_ringparam *kernel_ering, struct netlink_ext_ack *extack) { struct temac_local *lp = netdev_priv(ndev); if (ering->rx_pending > RX_BD_NUM_MAX || ering->rx_mini_pending || ering->rx_jumbo_pending || ering->rx_pending > TX_BD_NUM_MAX) return -EINVAL; if (netif_running(ndev)) return -EBUSY; lp->rx_bd_num = ering->rx_pending; lp->tx_bd_num = ering->tx_pending; return 0; } static int ll_temac_ethtools_get_coalesce(struct net_device *ndev, struct ethtool_coalesce *ec, struct kernel_ethtool_coalesce *kernel_coal, struct netlink_ext_ack *extack) { struct temac_local *lp = netdev_priv(ndev); ec->rx_max_coalesced_frames = lp->coalesce_count_rx; ec->tx_max_coalesced_frames = lp->coalesce_count_tx; ec->rx_coalesce_usecs = (lp->coalesce_delay_rx * 512) / 100; ec->tx_coalesce_usecs = (lp->coalesce_delay_tx * 512) / 100; return 0; } static int ll_temac_ethtools_set_coalesce(struct net_device *ndev, struct ethtool_coalesce *ec, struct kernel_ethtool_coalesce *kernel_coal, struct netlink_ext_ack *extack) { struct temac_local *lp = netdev_priv(ndev); if (netif_running(ndev)) { netdev_err(ndev, "Please stop netif before applying configuration\n"); return -EFAULT; } if (ec->rx_max_coalesced_frames) lp->coalesce_count_rx = ec->rx_max_coalesced_frames; if (ec->tx_max_coalesced_frames) lp->coalesce_count_tx = ec->tx_max_coalesced_frames; /* With typical LocalLink clock speed of 200 MHz and * C_PRESCALAR=1023, each delay count corresponds to 5.12 us. */ if (ec->rx_coalesce_usecs) lp->coalesce_delay_rx = min(255U, (ec->rx_coalesce_usecs * 100) / 512); if (ec->tx_coalesce_usecs) lp->coalesce_delay_tx = min(255U, (ec->tx_coalesce_usecs * 100) / 512); return 0; } static const struct ethtool_ops temac_ethtool_ops = { .supported_coalesce_params = ETHTOOL_COALESCE_USECS | ETHTOOL_COALESCE_MAX_FRAMES, .nway_reset = phy_ethtool_nway_reset, .get_link = ethtool_op_get_link, .get_ts_info = ethtool_op_get_ts_info, .get_link_ksettings = phy_ethtool_get_link_ksettings, .set_link_ksettings = phy_ethtool_set_link_ksettings, .get_ringparam = ll_temac_ethtools_get_ringparam, .set_ringparam = ll_temac_ethtools_set_ringparam, .get_coalesce = ll_temac_ethtools_get_coalesce, .set_coalesce = ll_temac_ethtools_set_coalesce, }; static int temac_probe(struct platform_device *pdev) { struct ll_temac_platform_data *pdata = dev_get_platdata(&pdev->dev); struct device_node *temac_np = dev_of_node(&pdev->dev), *dma_np; struct temac_local *lp; struct net_device *ndev; u8 addr[ETH_ALEN]; __be32 *p; bool little_endian; int rc = 0; /* Init network device structure */ ndev = devm_alloc_etherdev(&pdev->dev, sizeof(*lp)); if (!ndev) return -ENOMEM; platform_set_drvdata(pdev, ndev); SET_NETDEV_DEV(ndev, &pdev->dev); ndev->features = NETIF_F_SG; ndev->netdev_ops = &temac_netdev_ops; ndev->ethtool_ops = &temac_ethtool_ops; #if 0 ndev->features |= NETIF_F_IP_CSUM; /* Can checksum TCP/UDP over IPv4. */ ndev->features |= NETIF_F_HW_CSUM; /* Can checksum all the packets. */ ndev->features |= NETIF_F_IPV6_CSUM; /* Can checksum IPV6 TCP/UDP */ ndev->features |= NETIF_F_HIGHDMA; /* Can DMA to high memory. */ ndev->features |= NETIF_F_HW_VLAN_CTAG_TX; /* Transmit VLAN hw accel */ ndev->features |= NETIF_F_HW_VLAN_CTAG_RX; /* Receive VLAN hw acceleration */ ndev->features |= NETIF_F_HW_VLAN_CTAG_FILTER; /* Receive VLAN filtering */ ndev->features |= NETIF_F_VLAN_CHALLENGED; /* cannot handle VLAN pkts */ ndev->features |= NETIF_F_GSO; /* Enable software GSO. */ ndev->features |= NETIF_F_MULTI_QUEUE; /* Has multiple TX/RX queues */ ndev->features |= NETIF_F_LRO; /* large receive offload */ #endif /* setup temac private info structure */ lp = netdev_priv(ndev); lp->ndev = ndev; lp->dev = &pdev->dev; lp->options = XTE_OPTION_DEFAULTS; lp->rx_bd_num = RX_BD_NUM_DEFAULT; lp->tx_bd_num = TX_BD_NUM_DEFAULT; spin_lock_init(&lp->rx_lock); INIT_DELAYED_WORK(&lp->restart_work, ll_temac_restart_work_func); /* Setup mutex for synchronization of indirect register access */ if (pdata) { if (!pdata->indirect_lock) { dev_err(&pdev->dev, "indirect_lock missing in platform_data\n"); return -EINVAL; } lp->indirect_lock = pdata->indirect_lock; } else { lp->indirect_lock = devm_kmalloc(&pdev->dev, sizeof(*lp->indirect_lock), GFP_KERNEL); if (!lp->indirect_lock) return -ENOMEM; spin_lock_init(lp->indirect_lock); } /* map device registers */ lp->regs = devm_platform_ioremap_resource_byname(pdev, 0); if (IS_ERR(lp->regs)) { dev_err(&pdev->dev, "could not map TEMAC registers\n"); return -ENOMEM; } /* Select register access functions with the specified * endianness mode. Default for OF devices is big-endian. */ little_endian = false; if (temac_np) little_endian = of_property_read_bool(temac_np, "little-endian"); else if (pdata) little_endian = pdata->reg_little_endian; if (little_endian) { lp->temac_ior = _temac_ior_le; lp->temac_iow = _temac_iow_le; } else { lp->temac_ior = _temac_ior_be; lp->temac_iow = _temac_iow_be; } /* Setup checksum offload, but default to off if not specified */ lp->temac_features = 0; if (temac_np) { p = (__be32 *)of_get_property(temac_np, "xlnx,txcsum", NULL); if (p && be32_to_cpu(*p)) lp->temac_features |= TEMAC_FEATURE_TX_CSUM; p = (__be32 *)of_get_property(temac_np, "xlnx,rxcsum", NULL); if (p && be32_to_cpu(*p)) lp->temac_features |= TEMAC_FEATURE_RX_CSUM; } else if (pdata) { if (pdata->txcsum) lp->temac_features |= TEMAC_FEATURE_TX_CSUM; if (pdata->rxcsum) lp->temac_features |= TEMAC_FEATURE_RX_CSUM; } if (lp->temac_features & TEMAC_FEATURE_TX_CSUM) /* Can checksum TCP/UDP over IPv4. */ ndev->features |= NETIF_F_IP_CSUM; /* Defaults for IRQ delay/coalescing setup. These are * configuration values, so does not belong in device-tree. */ lp->coalesce_delay_tx = 0x10; lp->coalesce_count_tx = 0x22; lp->coalesce_delay_rx = 0xff; lp->coalesce_count_rx = 0x07; /* Setup LocalLink DMA */ if (temac_np) { /* Find the DMA node, map the DMA registers, and * decode the DMA IRQs. */ dma_np = of_parse_phandle(temac_np, "llink-connected", 0); if (!dma_np) { dev_err(&pdev->dev, "could not find DMA node\n"); return -ENODEV; } /* Setup the DMA register accesses, could be DCR or * memory mapped. */ if (temac_dcr_setup(lp, pdev, dma_np)) { /* no DCR in the device tree, try non-DCR */ lp->sdma_regs = devm_of_iomap(&pdev->dev, dma_np, 0, NULL); if (IS_ERR(lp->sdma_regs)) { dev_err(&pdev->dev, "unable to map DMA registers\n"); of_node_put(dma_np); return PTR_ERR(lp->sdma_regs); } if (of_property_read_bool(dma_np, "little-endian")) { lp->dma_in = temac_dma_in32_le; lp->dma_out = temac_dma_out32_le; } else { lp->dma_in = temac_dma_in32_be; lp->dma_out = temac_dma_out32_be; } dev_dbg(&pdev->dev, "MEM base: %p\n", lp->sdma_regs); } /* Get DMA RX and TX interrupts */ lp->rx_irq = irq_of_parse_and_map(dma_np, 0); lp->tx_irq = irq_of_parse_and_map(dma_np, 1); /* Finished with the DMA node; drop the reference */ of_node_put(dma_np); } else if (pdata) { /* 2nd memory resource specifies DMA registers */ lp->sdma_regs = devm_platform_ioremap_resource(pdev, 1); if (IS_ERR(lp->sdma_regs)) { dev_err(&pdev->dev, "could not map DMA registers\n"); return PTR_ERR(lp->sdma_regs); } if (pdata->dma_little_endian) { lp->dma_in = temac_dma_in32_le; lp->dma_out = temac_dma_out32_le; } else { lp->dma_in = temac_dma_in32_be; lp->dma_out = temac_dma_out32_be; } /* Get DMA RX and TX interrupts */ lp->rx_irq = platform_get_irq(pdev, 0); lp->tx_irq = platform_get_irq(pdev, 1); /* IRQ delay/coalescing setup */ if (pdata->tx_irq_timeout || pdata->tx_irq_count) { lp->coalesce_delay_tx = pdata->tx_irq_timeout; lp->coalesce_count_tx = pdata->tx_irq_count; } if (pdata->rx_irq_timeout || pdata->rx_irq_count) { lp->coalesce_delay_rx = pdata->rx_irq_timeout; lp->coalesce_count_rx = pdata->rx_irq_count; } } /* Error handle returned DMA RX and TX interrupts */ if (lp->rx_irq <= 0) { rc = lp->rx_irq ?: -EINVAL; return dev_err_probe(&pdev->dev, rc, "could not get DMA RX irq\n"); } if (lp->tx_irq <= 0) { rc = lp->tx_irq ?: -EINVAL; return dev_err_probe(&pdev->dev, rc, "could not get DMA TX irq\n"); } if (temac_np) { /* Retrieve the MAC address */ rc = of_get_mac_address(temac_np, addr); if (rc) { dev_err(&pdev->dev, "could not find MAC address\n"); return -ENODEV; } temac_init_mac_address(ndev, addr); } else if (pdata) { temac_init_mac_address(ndev, pdata->mac_addr); } rc = temac_mdio_setup(lp, pdev); if (rc) dev_warn(&pdev->dev, "error registering MDIO bus\n"); if (temac_np) { lp->phy_node = of_parse_phandle(temac_np, "phy-handle", 0); if (lp->phy_node) dev_dbg(lp->dev, "using PHY node %pOF\n", temac_np); } else if (pdata) { snprintf(lp->phy_name, sizeof(lp->phy_name), PHY_ID_FMT, lp->mii_bus->id, pdata->phy_addr); lp->phy_interface = pdata->phy_interface; } /* Add the device attributes */ rc = sysfs_create_group(&lp->dev->kobj, &temac_attr_group); if (rc) { dev_err(lp->dev, "Error creating sysfs files\n"); goto err_sysfs_create; } rc = register_netdev(lp->ndev); if (rc) { dev_err(lp->dev, "register_netdev() error (%i)\n", rc); goto err_register_ndev; } return 0; err_register_ndev: sysfs_remove_group(&lp->dev->kobj, &temac_attr_group); err_sysfs_create: if (lp->phy_node) of_node_put(lp->phy_node); temac_mdio_teardown(lp); return rc; } static int temac_remove(struct platform_device *pdev) { struct net_device *ndev = platform_get_drvdata(pdev); struct temac_local *lp = netdev_priv(ndev); unregister_netdev(ndev); sysfs_remove_group(&lp->dev->kobj, &temac_attr_group); if (lp->phy_node) of_node_put(lp->phy_node); temac_mdio_teardown(lp); return 0; } static const struct of_device_id temac_of_match[] = { { .compatible = "xlnx,xps-ll-temac-1.01.b", }, { .compatible = "xlnx,xps-ll-temac-2.00.a", }, { .compatible = "xlnx,xps-ll-temac-2.02.a", }, { .compatible = "xlnx,xps-ll-temac-2.03.a", }, {}, }; MODULE_DEVICE_TABLE(of, temac_of_match); static struct platform_driver temac_driver = { .probe = temac_probe, .remove = temac_remove, .driver = { .name = "xilinx_temac", .of_match_table = temac_of_match, }, }; module_platform_driver(temac_driver); MODULE_DESCRIPTION("Xilinx LL_TEMAC Ethernet driver"); MODULE_AUTHOR("Yoshio Kashiwagi"); MODULE_LICENSE("GPL");
linux-master
drivers/net/ethernet/xilinx/ll_temac_main.c